2 * This file is part of wlcore
4 * Copyright (C) 2008-2010 Nokia Corporation
5 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 #include <linux/module.h>
24 #include <linux/firmware.h>
25 #include <linux/etherdevice.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
32 #include "wl12xx_80211.h"
39 #include "vendor_cmd.h"
44 #define WL1271_BOOT_RETRIES 3
46 static char *fwlog_param;
47 static int fwlog_mem_blocks = -1;
48 static int bug_on_recovery = -1;
49 static int no_recovery = -1;
51 static void __wl1271_op_remove_interface(struct wl1271 *wl,
52 struct ieee80211_vif *vif,
53 bool reset_tx_queues);
54 static void wlcore_op_stop_locked(struct wl1271 *wl);
55 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
57 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
61 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
64 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
67 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
70 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
74 wl1271_info("Association completed.");
78 static void wl1271_reg_notify(struct wiphy *wiphy,
79 struct regulatory_request *request)
81 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
82 struct wl1271 *wl = hw->priv;
84 /* copy the current dfs region */
86 wl->dfs_region = request->dfs_region;
88 wlcore_regdomain_config(wl);
91 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
96 /* we should hold wl->mutex */
97 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
102 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
104 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
110 * this function is being called when the rx_streaming interval
111 * has beed changed or rx_streaming should be disabled
113 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
116 int period = wl->conf.rx_streaming.interval;
118 /* don't reconfigure if rx_streaming is disabled */
119 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
122 /* reconfigure/disable according to new streaming_period */
124 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
125 (wl->conf.rx_streaming.always ||
126 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
127 ret = wl1271_set_rx_streaming(wl, wlvif, true);
129 ret = wl1271_set_rx_streaming(wl, wlvif, false);
130 /* don't cancel_work_sync since we might deadlock */
131 del_timer_sync(&wlvif->rx_streaming_timer);
137 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
140 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
141 rx_streaming_enable_work);
142 struct wl1271 *wl = wlvif->wl;
144 mutex_lock(&wl->mutex);
146 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
147 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
148 (!wl->conf.rx_streaming.always &&
149 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
152 if (!wl->conf.rx_streaming.interval)
155 ret = wl1271_ps_elp_wakeup(wl);
159 ret = wl1271_set_rx_streaming(wl, wlvif, true);
163 /* stop it after some time of inactivity */
164 mod_timer(&wlvif->rx_streaming_timer,
165 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
168 wl1271_ps_elp_sleep(wl);
170 mutex_unlock(&wl->mutex);
173 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
176 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
177 rx_streaming_disable_work);
178 struct wl1271 *wl = wlvif->wl;
180 mutex_lock(&wl->mutex);
182 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
185 ret = wl1271_ps_elp_wakeup(wl);
189 ret = wl1271_set_rx_streaming(wl, wlvif, false);
194 wl1271_ps_elp_sleep(wl);
196 mutex_unlock(&wl->mutex);
199 static void wl1271_rx_streaming_timer(unsigned long data)
201 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
202 struct wl1271 *wl = wlvif->wl;
203 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
206 /* wl->mutex must be taken */
207 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
209 /* if the watchdog is not armed, don't do anything */
210 if (wl->tx_allocated_blocks == 0)
213 cancel_delayed_work(&wl->tx_watchdog_work);
214 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
215 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
218 static void wlcore_rc_update_work(struct work_struct *work)
221 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
223 struct wl1271 *wl = wlvif->wl;
224 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
226 mutex_lock(&wl->mutex);
228 if (unlikely(wl->state != WLCORE_STATE_ON))
231 ret = wl1271_ps_elp_wakeup(wl);
235 if (ieee80211_vif_is_mesh(vif)) {
236 ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
237 true, wlvif->sta.hlid);
241 wlcore_hw_sta_rc_update(wl, wlvif);
245 wl1271_ps_elp_sleep(wl);
247 mutex_unlock(&wl->mutex);
250 static void wl12xx_tx_watchdog_work(struct work_struct *work)
252 struct delayed_work *dwork;
255 dwork = to_delayed_work(work);
256 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
258 mutex_lock(&wl->mutex);
260 if (unlikely(wl->state != WLCORE_STATE_ON))
263 /* Tx went out in the meantime - everything is ok */
264 if (unlikely(wl->tx_allocated_blocks == 0))
268 * if a ROC is in progress, we might not have any Tx for a long
269 * time (e.g. pending Tx on the non-ROC channels)
271 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
272 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
273 wl->conf.tx.tx_watchdog_timeout);
274 wl12xx_rearm_tx_watchdog_locked(wl);
279 * if a scan is in progress, we might not have any Tx for a long
282 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
283 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
284 wl->conf.tx.tx_watchdog_timeout);
285 wl12xx_rearm_tx_watchdog_locked(wl);
290 * AP might cache a frame for a long time for a sleeping station,
291 * so rearm the timer if there's an AP interface with stations. If
292 * Tx is genuinely stuck we will most hopefully discover it when all
293 * stations are removed due to inactivity.
295 if (wl->active_sta_count) {
296 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
298 wl->conf.tx.tx_watchdog_timeout,
299 wl->active_sta_count);
300 wl12xx_rearm_tx_watchdog_locked(wl);
304 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
305 wl->conf.tx.tx_watchdog_timeout);
306 wl12xx_queue_recovery_work(wl);
309 mutex_unlock(&wl->mutex);
312 static void wlcore_adjust_conf(struct wl1271 *wl)
316 if (!strcmp(fwlog_param, "continuous")) {
317 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
318 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
319 } else if (!strcmp(fwlog_param, "dbgpins")) {
320 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
321 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
322 } else if (!strcmp(fwlog_param, "disable")) {
323 wl->conf.fwlog.mem_blocks = 0;
324 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
326 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
330 if (bug_on_recovery != -1)
331 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
333 if (no_recovery != -1)
334 wl->conf.recovery.no_recovery = (u8) no_recovery;
337 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
338 struct wl12xx_vif *wlvif,
343 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
346 * Wake up from high level PS if the STA is asleep with too little
347 * packets in FW or if the STA is awake.
349 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
350 wl12xx_ps_link_end(wl, wlvif, hlid);
353 * Start high-level PS if the STA is asleep with enough blocks in FW.
354 * Make an exception if this is the only connected link. In this
355 * case FW-memory congestion is less of a problem.
356 * Note that a single connected STA means 2*ap_count + 1 active links,
357 * since we must account for the global and broadcast AP links
358 * for each AP. The "fw_ps" check assures us the other link is a STA
359 * connected to the AP. Otherwise the FW would not set the PSM bit.
361 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
362 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
363 wl12xx_ps_link_start(wl, wlvif, hlid, true);
366 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
367 struct wl12xx_vif *wlvif,
368 struct wl_fw_status *status)
370 unsigned long cur_fw_ps_map;
373 cur_fw_ps_map = status->link_ps_bitmap;
374 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
375 wl1271_debug(DEBUG_PSM,
376 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
377 wl->ap_fw_ps_map, cur_fw_ps_map,
378 wl->ap_fw_ps_map ^ cur_fw_ps_map);
380 wl->ap_fw_ps_map = cur_fw_ps_map;
383 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
384 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
385 wl->links[hlid].allocated_pkts);
388 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
390 struct wl12xx_vif *wlvif;
392 u32 old_tx_blk_count = wl->tx_blocks_available;
393 int avail, freed_blocks;
396 struct wl1271_link *lnk;
398 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
400 wl->fw_status_len, false);
404 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
406 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
407 "drv_rx_counter = %d, tx_results_counter = %d)",
409 status->fw_rx_counter,
410 status->drv_rx_counter,
411 status->tx_results_counter);
413 for (i = 0; i < NUM_TX_QUEUES; i++) {
414 /* prevent wrap-around in freed-packets counter */
415 wl->tx_allocated_pkts[i] -=
416 (status->counters.tx_released_pkts[i] -
417 wl->tx_pkts_freed[i]) & 0xff;
419 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
423 for_each_set_bit(i, wl->links_map, wl->num_links) {
427 /* prevent wrap-around in freed-packets counter */
428 diff = (status->counters.tx_lnk_free_pkts[i] -
429 lnk->prev_freed_pkts) & 0xff;
434 lnk->allocated_pkts -= diff;
435 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
437 /* accumulate the prev_freed_pkts counter */
438 lnk->total_freed_pkts += diff;
441 /* prevent wrap-around in total blocks counter */
442 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
443 freed_blocks = status->total_released_blks -
446 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
447 status->total_released_blks;
449 wl->tx_blocks_freed = status->total_released_blks;
451 wl->tx_allocated_blocks -= freed_blocks;
454 * If the FW freed some blocks:
455 * If we still have allocated blocks - re-arm the timer, Tx is
456 * not stuck. Otherwise, cancel the timer (no Tx currently).
459 if (wl->tx_allocated_blocks)
460 wl12xx_rearm_tx_watchdog_locked(wl);
462 cancel_delayed_work(&wl->tx_watchdog_work);
465 avail = status->tx_total - wl->tx_allocated_blocks;
468 * The FW might change the total number of TX memblocks before
469 * we get a notification about blocks being released. Thus, the
470 * available blocks calculation might yield a temporary result
471 * which is lower than the actual available blocks. Keeping in
472 * mind that only blocks that were allocated can be moved from
473 * TX to RX, tx_blocks_available should never decrease here.
475 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
478 /* if more blocks are available now, tx work can be scheduled */
479 if (wl->tx_blocks_available > old_tx_blk_count)
480 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
482 /* for AP update num of allocated TX blocks per link and ps status */
483 wl12xx_for_each_wlvif_ap(wl, wlvif) {
484 wl12xx_irq_update_links_status(wl, wlvif, status);
487 /* update the host-chipset time offset */
489 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
490 (s64)(status->fw_localtime);
492 wl->fw_fast_lnk_map = status->link_fast_bitmap;
497 static void wl1271_flush_deferred_work(struct wl1271 *wl)
501 /* Pass all received frames to the network stack */
502 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
503 ieee80211_rx_ni(wl->hw, skb);
505 /* Return sent skbs to the network stack */
506 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
507 ieee80211_tx_status_ni(wl->hw, skb);
510 static void wl1271_netstack_work(struct work_struct *work)
513 container_of(work, struct wl1271, netstack_work);
516 wl1271_flush_deferred_work(wl);
517 } while (skb_queue_len(&wl->deferred_rx_queue));
520 #define WL1271_IRQ_MAX_LOOPS 256
522 static int wlcore_irq_locked(struct wl1271 *wl)
526 int loopcount = WL1271_IRQ_MAX_LOOPS;
528 unsigned int defer_count;
532 * In case edge triggered interrupt must be used, we cannot iterate
533 * more than once without introducing race conditions with the hardirq.
535 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
538 wl1271_debug(DEBUG_IRQ, "IRQ work");
540 if (unlikely(wl->state != WLCORE_STATE_ON))
543 ret = wl1271_ps_elp_wakeup(wl);
547 while (!done && loopcount--) {
549 * In order to avoid a race with the hardirq, clear the flag
550 * before acknowledging the chip. Since the mutex is held,
551 * wl1271_ps_elp_wakeup cannot be called concurrently.
553 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
554 smp_mb__after_atomic();
556 ret = wlcore_fw_status(wl, wl->fw_status);
560 wlcore_hw_tx_immediate_compl(wl);
562 intr = wl->fw_status->intr;
563 intr &= WLCORE_ALL_INTR_MASK;
569 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
570 wl1271_error("HW watchdog interrupt received! starting recovery.");
571 wl->watchdog_recovery = true;
574 /* restarting the chip. ignore any other interrupt. */
578 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
579 wl1271_error("SW watchdog interrupt received! "
580 "starting recovery.");
581 wl->watchdog_recovery = true;
584 /* restarting the chip. ignore any other interrupt. */
588 if (likely(intr & WL1271_ACX_INTR_DATA)) {
589 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
591 ret = wlcore_rx(wl, wl->fw_status);
595 /* Check if any tx blocks were freed */
596 spin_lock_irqsave(&wl->wl_lock, flags);
597 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
598 wl1271_tx_total_queue_count(wl) > 0) {
599 spin_unlock_irqrestore(&wl->wl_lock, flags);
601 * In order to avoid starvation of the TX path,
602 * call the work function directly.
604 ret = wlcore_tx_work_locked(wl);
608 spin_unlock_irqrestore(&wl->wl_lock, flags);
611 /* check for tx results */
612 ret = wlcore_hw_tx_delayed_compl(wl);
616 /* Make sure the deferred queues don't get too long */
617 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
618 skb_queue_len(&wl->deferred_rx_queue);
619 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
620 wl1271_flush_deferred_work(wl);
623 if (intr & WL1271_ACX_INTR_EVENT_A) {
624 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
625 ret = wl1271_event_handle(wl, 0);
630 if (intr & WL1271_ACX_INTR_EVENT_B) {
631 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
632 ret = wl1271_event_handle(wl, 1);
637 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
638 wl1271_debug(DEBUG_IRQ,
639 "WL1271_ACX_INTR_INIT_COMPLETE");
641 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
642 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
645 wl1271_ps_elp_sleep(wl);
651 static irqreturn_t wlcore_irq(int irq, void *cookie)
655 struct wl1271 *wl = cookie;
657 /* complete the ELP completion */
658 spin_lock_irqsave(&wl->wl_lock, flags);
659 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
661 complete(wl->elp_compl);
662 wl->elp_compl = NULL;
665 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
666 /* don't enqueue a work right now. mark it as pending */
667 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
668 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
669 disable_irq_nosync(wl->irq);
670 pm_wakeup_event(wl->dev, 0);
671 spin_unlock_irqrestore(&wl->wl_lock, flags);
674 spin_unlock_irqrestore(&wl->wl_lock, flags);
676 /* TX might be handled here, avoid redundant work */
677 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
678 cancel_work_sync(&wl->tx_work);
680 mutex_lock(&wl->mutex);
682 ret = wlcore_irq_locked(wl);
684 wl12xx_queue_recovery_work(wl);
686 spin_lock_irqsave(&wl->wl_lock, flags);
687 /* In case TX was not handled here, queue TX work */
688 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
689 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
690 wl1271_tx_total_queue_count(wl) > 0)
691 ieee80211_queue_work(wl->hw, &wl->tx_work);
692 spin_unlock_irqrestore(&wl->wl_lock, flags);
694 mutex_unlock(&wl->mutex);
699 struct vif_counter_data {
702 struct ieee80211_vif *cur_vif;
703 bool cur_vif_running;
706 static void wl12xx_vif_count_iter(void *data, u8 *mac,
707 struct ieee80211_vif *vif)
709 struct vif_counter_data *counter = data;
712 if (counter->cur_vif == vif)
713 counter->cur_vif_running = true;
716 /* caller must not hold wl->mutex, as it might deadlock */
717 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
718 struct ieee80211_vif *cur_vif,
719 struct vif_counter_data *data)
721 memset(data, 0, sizeof(*data));
722 data->cur_vif = cur_vif;
724 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
725 wl12xx_vif_count_iter, data);
728 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
730 const struct firmware *fw;
732 enum wl12xx_fw_type fw_type;
736 fw_type = WL12XX_FW_TYPE_PLT;
737 fw_name = wl->plt_fw_name;
740 * we can't call wl12xx_get_vif_count() here because
741 * wl->mutex is taken, so use the cached last_vif_count value
743 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
744 fw_type = WL12XX_FW_TYPE_MULTI;
745 fw_name = wl->mr_fw_name;
747 fw_type = WL12XX_FW_TYPE_NORMAL;
748 fw_name = wl->sr_fw_name;
752 if (wl->fw_type == fw_type)
755 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
757 ret = reject_firmware(&fw, fw_name, wl->dev);
760 wl1271_error("could not get firmware %s: %d", fw_name, ret);
765 wl1271_error("firmware size is not multiple of 32 bits: %zu",
772 wl->fw_type = WL12XX_FW_TYPE_NONE;
773 wl->fw_len = fw->size;
774 wl->fw = vmalloc(wl->fw_len);
777 wl1271_error("could not allocate memory for the firmware");
782 memcpy(wl->fw, fw->data, wl->fw_len);
784 wl->fw_type = fw_type;
786 release_firmware(fw);
791 void wl12xx_queue_recovery_work(struct wl1271 *wl)
793 /* Avoid a recursive recovery */
794 if (wl->state == WLCORE_STATE_ON) {
795 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
798 wl->state = WLCORE_STATE_RESTARTING;
799 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
800 wl1271_ps_elp_wakeup(wl);
801 wlcore_disable_interrupts_nosync(wl);
802 ieee80211_queue_work(wl->hw, &wl->recovery_work);
806 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
810 /* Make sure we have enough room */
811 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
813 /* Fill the FW log file, consumed by the sysfs fwlog entry */
814 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
815 wl->fwlog_size += len;
820 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
824 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
827 wl1271_info("Reading FW panic log");
830 * Make sure the chip is awake and the logger isn't active.
831 * Do not send a stop fwlog command if the fw is hanged or if
832 * dbgpins are used (due to some fw bug).
834 if (wl1271_ps_elp_wakeup(wl))
836 if (!wl->watchdog_recovery &&
837 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
838 wl12xx_cmd_stop_fwlog(wl);
840 /* Traverse the memory blocks linked list */
842 end_of_log = wlcore_event_fw_logger(wl);
843 if (end_of_log == 0) {
845 end_of_log = wlcore_event_fw_logger(wl);
847 } while (end_of_log != 0);
850 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
851 u8 hlid, struct ieee80211_sta *sta)
853 struct wl1271_station *wl_sta;
854 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
856 wl_sta = (void *)sta->drv_priv;
857 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
860 * increment the initial seq number on recovery to account for
861 * transmitted packets that we haven't yet got in the FW status
863 if (wlvif->encryption_type == KEY_GEM)
864 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
866 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
867 wl_sta->total_freed_pkts += sqn_recovery_padding;
870 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
871 struct wl12xx_vif *wlvif,
872 u8 hlid, const u8 *addr)
874 struct ieee80211_sta *sta;
875 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
877 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
878 is_zero_ether_addr(addr)))
882 sta = ieee80211_find_sta(vif, addr);
884 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
888 static void wlcore_print_recovery(struct wl1271 *wl)
894 wl1271_info("Hardware recovery in progress. FW ver: %s",
895 wl->chip.fw_ver_str);
897 /* change partitions momentarily so we can read the FW pc */
898 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
902 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
906 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
910 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
911 pc, hint_sts, ++wl->recovery_count);
913 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
917 static void wl1271_recovery_work(struct work_struct *work)
920 container_of(work, struct wl1271, recovery_work);
921 struct wl12xx_vif *wlvif;
922 struct ieee80211_vif *vif;
924 mutex_lock(&wl->mutex);
926 if (wl->state == WLCORE_STATE_OFF || wl->plt)
929 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
930 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
931 wl12xx_read_fwlog_panic(wl);
932 wlcore_print_recovery(wl);
935 BUG_ON(wl->conf.recovery.bug_on_recovery &&
936 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
938 if (wl->conf.recovery.no_recovery) {
939 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
943 /* Prevent spurious TX during FW restart */
944 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
946 /* reboot the chipset */
947 while (!list_empty(&wl->wlvif_list)) {
948 wlvif = list_first_entry(&wl->wlvif_list,
949 struct wl12xx_vif, list);
950 vif = wl12xx_wlvif_to_vif(wlvif);
952 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
953 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
954 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
955 vif->bss_conf.bssid);
958 __wl1271_op_remove_interface(wl, vif, false);
961 wlcore_op_stop_locked(wl);
963 ieee80211_restart_hw(wl->hw);
966 * Its safe to enable TX now - the queues are stopped after a request
969 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
972 wl->watchdog_recovery = false;
973 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
974 mutex_unlock(&wl->mutex);
977 static int wlcore_fw_wakeup(struct wl1271 *wl)
979 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
982 static int wl1271_setup(struct wl1271 *wl)
984 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
985 if (!wl->raw_fw_status)
988 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
992 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
998 kfree(wl->fw_status);
999 kfree(wl->raw_fw_status);
1003 static int wl12xx_set_power_on(struct wl1271 *wl)
1007 msleep(WL1271_PRE_POWER_ON_SLEEP);
1008 ret = wl1271_power_on(wl);
1011 msleep(WL1271_POWER_ON_SLEEP);
1012 wl1271_io_reset(wl);
1015 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1019 /* ELP module wake up */
1020 ret = wlcore_fw_wakeup(wl);
1028 wl1271_power_off(wl);
1032 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1036 ret = wl12xx_set_power_on(wl);
1041 * For wl127x based devices we could use the default block
1042 * size (512 bytes), but due to a bug in the sdio driver, we
1043 * need to set it explicitly after the chip is powered on. To
1044 * simplify the code and since the performance impact is
1045 * negligible, we use the same block size for all different
1048 * Check if the bus supports blocksize alignment and, if it
1049 * doesn't, make sure we don't have the quirk.
1051 if (!wl1271_set_block_size(wl))
1052 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1054 /* TODO: make sure the lower driver has set things up correctly */
1056 ret = wl1271_setup(wl);
1060 ret = wl12xx_fetch_firmware(wl, plt);
1062 kfree(wl->fw_status);
1063 kfree(wl->raw_fw_status);
1064 kfree(wl->tx_res_if);
1071 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1073 int retries = WL1271_BOOT_RETRIES;
1074 struct wiphy *wiphy = wl->hw->wiphy;
1076 static const char* const PLT_MODE[] = {
1085 mutex_lock(&wl->mutex);
1087 wl1271_notice("power up");
1089 if (wl->state != WLCORE_STATE_OFF) {
1090 wl1271_error("cannot go into PLT state because not "
1091 "in off state: %d", wl->state);
1096 /* Indicate to lower levels that we are now in PLT mode */
1098 wl->plt_mode = plt_mode;
1102 ret = wl12xx_chip_wakeup(wl, true);
1106 if (plt_mode != PLT_CHIP_AWAKE) {
1107 ret = wl->ops->plt_init(wl);
1112 wl->state = WLCORE_STATE_ON;
1113 wl1271_notice("firmware booted in PLT mode %s (%s)",
1115 wl->chip.fw_ver_str);
1117 /* update hw/fw version info in wiphy struct */
1118 wiphy->hw_version = wl->chip.id;
1119 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1120 sizeof(wiphy->fw_version));
1125 wl1271_power_off(wl);
1129 wl->plt_mode = PLT_OFF;
1131 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1132 WL1271_BOOT_RETRIES);
1134 mutex_unlock(&wl->mutex);
1139 int wl1271_plt_stop(struct wl1271 *wl)
1143 wl1271_notice("power down");
1146 * Interrupts must be disabled before setting the state to OFF.
1147 * Otherwise, the interrupt handler might be called and exit without
1148 * reading the interrupt status.
1150 wlcore_disable_interrupts(wl);
1151 mutex_lock(&wl->mutex);
1153 mutex_unlock(&wl->mutex);
1156 * This will not necessarily enable interrupts as interrupts
1157 * may have been disabled when op_stop was called. It will,
1158 * however, balance the above call to disable_interrupts().
1160 wlcore_enable_interrupts(wl);
1162 wl1271_error("cannot power down because not in PLT "
1163 "state: %d", wl->state);
1168 mutex_unlock(&wl->mutex);
1170 wl1271_flush_deferred_work(wl);
1171 cancel_work_sync(&wl->netstack_work);
1172 cancel_work_sync(&wl->recovery_work);
1173 cancel_delayed_work_sync(&wl->elp_work);
1174 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1176 mutex_lock(&wl->mutex);
1177 wl1271_power_off(wl);
1179 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1180 wl->state = WLCORE_STATE_OFF;
1182 wl->plt_mode = PLT_OFF;
1184 mutex_unlock(&wl->mutex);
1190 static void wl1271_op_tx(struct ieee80211_hw *hw,
1191 struct ieee80211_tx_control *control,
1192 struct sk_buff *skb)
1194 struct wl1271 *wl = hw->priv;
1195 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1196 struct ieee80211_vif *vif = info->control.vif;
1197 struct wl12xx_vif *wlvif = NULL;
1198 unsigned long flags;
1203 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1204 ieee80211_free_txskb(hw, skb);
1208 wlvif = wl12xx_vif_to_data(vif);
1209 mapping = skb_get_queue_mapping(skb);
1210 q = wl1271_tx_get_queue(mapping);
1212 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1214 spin_lock_irqsave(&wl->wl_lock, flags);
1217 * drop the packet if the link is invalid or the queue is stopped
1218 * for any reason but watermark. Watermark is a "soft"-stop so we
1219 * allow these packets through.
1221 if (hlid == WL12XX_INVALID_LINK_ID ||
1222 (!test_bit(hlid, wlvif->links_map)) ||
1223 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1224 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1225 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1226 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1227 ieee80211_free_txskb(hw, skb);
1231 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1233 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1235 wl->tx_queue_count[q]++;
1236 wlvif->tx_queue_count[q]++;
1239 * The workqueue is slow to process the tx_queue and we need stop
1240 * the queue here, otherwise the queue will get too long.
1242 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1243 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1244 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1245 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1246 wlcore_stop_queue_locked(wl, wlvif, q,
1247 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1251 * The chip specific setup must run before the first TX packet -
1252 * before that, the tx_work will not be initialized!
1255 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1256 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1257 ieee80211_queue_work(wl->hw, &wl->tx_work);
1260 spin_unlock_irqrestore(&wl->wl_lock, flags);
1263 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1265 unsigned long flags;
1268 /* no need to queue a new dummy packet if one is already pending */
1269 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1272 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1274 spin_lock_irqsave(&wl->wl_lock, flags);
1275 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1276 wl->tx_queue_count[q]++;
1277 spin_unlock_irqrestore(&wl->wl_lock, flags);
1279 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1280 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1281 return wlcore_tx_work_locked(wl);
1284 * If the FW TX is busy, TX work will be scheduled by the threaded
1285 * interrupt handler function
1291 * The size of the dummy packet should be at least 1400 bytes. However, in
1292 * order to minimize the number of bus transactions, aligning it to 512 bytes
1293 * boundaries could be beneficial, performance wise
1295 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1297 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1299 struct sk_buff *skb;
1300 struct ieee80211_hdr_3addr *hdr;
1301 unsigned int dummy_packet_size;
1303 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1304 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1306 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1308 wl1271_warning("Failed to allocate a dummy packet skb");
1312 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1314 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1315 memset(hdr, 0, sizeof(*hdr));
1316 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1317 IEEE80211_STYPE_NULLFUNC |
1318 IEEE80211_FCTL_TODS);
1320 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1322 /* Dummy packets require the TID to be management */
1323 skb->priority = WL1271_TID_MGMT;
1325 /* Initialize all fields that might be used */
1326 skb_set_queue_mapping(skb, 0);
1327 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1335 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1337 int num_fields = 0, in_field = 0, fields_size = 0;
1338 int i, pattern_len = 0;
1341 wl1271_warning("No mask in WoWLAN pattern");
1346 * The pattern is broken up into segments of bytes at different offsets
1347 * that need to be checked by the FW filter. Each segment is called
1348 * a field in the FW API. We verify that the total number of fields
1349 * required for this pattern won't exceed FW limits (8)
1350 * as well as the total fields buffer won't exceed the FW limit.
1351 * Note that if there's a pattern which crosses Ethernet/IP header
1352 * boundary a new field is required.
1354 for (i = 0; i < p->pattern_len; i++) {
1355 if (test_bit(i, (unsigned long *)p->mask)) {
1360 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1362 fields_size += pattern_len +
1363 RX_FILTER_FIELD_OVERHEAD;
1371 fields_size += pattern_len +
1372 RX_FILTER_FIELD_OVERHEAD;
1379 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1383 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1384 wl1271_warning("RX Filter too complex. Too many segments");
1388 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1389 wl1271_warning("RX filter pattern is too big");
1396 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1398 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1401 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1408 for (i = 0; i < filter->num_fields; i++)
1409 kfree(filter->fields[i].pattern);
1414 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1415 u16 offset, u8 flags,
1416 const u8 *pattern, u8 len)
1418 struct wl12xx_rx_filter_field *field;
1420 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1421 wl1271_warning("Max fields per RX filter. can't alloc another");
1425 field = &filter->fields[filter->num_fields];
1427 field->pattern = kzalloc(len, GFP_KERNEL);
1428 if (!field->pattern) {
1429 wl1271_warning("Failed to allocate RX filter pattern");
1433 filter->num_fields++;
1435 field->offset = cpu_to_le16(offset);
1436 field->flags = flags;
1438 memcpy(field->pattern, pattern, len);
1443 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1445 int i, fields_size = 0;
1447 for (i = 0; i < filter->num_fields; i++)
1448 fields_size += filter->fields[i].len +
1449 sizeof(struct wl12xx_rx_filter_field) -
1455 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1459 struct wl12xx_rx_filter_field *field;
1461 for (i = 0; i < filter->num_fields; i++) {
1462 field = (struct wl12xx_rx_filter_field *)buf;
1464 field->offset = filter->fields[i].offset;
1465 field->flags = filter->fields[i].flags;
1466 field->len = filter->fields[i].len;
1468 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1469 buf += sizeof(struct wl12xx_rx_filter_field) -
1470 sizeof(u8 *) + field->len;
1475 * Allocates an RX filter returned through f
1476 * which needs to be freed using rx_filter_free()
1479 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1480 struct wl12xx_rx_filter **f)
1483 struct wl12xx_rx_filter *filter;
1487 filter = wl1271_rx_filter_alloc();
1489 wl1271_warning("Failed to alloc rx filter");
1495 while (i < p->pattern_len) {
1496 if (!test_bit(i, (unsigned long *)p->mask)) {
1501 for (j = i; j < p->pattern_len; j++) {
1502 if (!test_bit(j, (unsigned long *)p->mask))
1505 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1506 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1510 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1512 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1514 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1515 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1520 ret = wl1271_rx_filter_alloc_field(filter,
1523 &p->pattern[i], len);
1530 filter->action = FILTER_SIGNAL;
1536 wl1271_rx_filter_free(filter);
1542 static int wl1271_configure_wowlan(struct wl1271 *wl,
1543 struct cfg80211_wowlan *wow)
1547 if (!wow || wow->any || !wow->n_patterns) {
1548 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1553 ret = wl1271_rx_filter_clear_all(wl);
1560 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1563 /* Validate all incoming patterns before clearing current FW state */
1564 for (i = 0; i < wow->n_patterns; i++) {
1565 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1567 wl1271_warning("Bad wowlan pattern %d", i);
1572 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1576 ret = wl1271_rx_filter_clear_all(wl);
1580 /* Translate WoWLAN patterns into filters */
1581 for (i = 0; i < wow->n_patterns; i++) {
1582 struct cfg80211_pkt_pattern *p;
1583 struct wl12xx_rx_filter *filter = NULL;
1585 p = &wow->patterns[i];
1587 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1589 wl1271_warning("Failed to create an RX filter from "
1590 "wowlan pattern %d", i);
1594 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1596 wl1271_rx_filter_free(filter);
1601 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1607 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1608 struct wl12xx_vif *wlvif,
1609 struct cfg80211_wowlan *wow)
1613 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1616 ret = wl1271_configure_wowlan(wl, wow);
1620 if ((wl->conf.conn.suspend_wake_up_event ==
1621 wl->conf.conn.wake_up_event) &&
1622 (wl->conf.conn.suspend_listen_interval ==
1623 wl->conf.conn.listen_interval))
1626 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1627 wl->conf.conn.suspend_wake_up_event,
1628 wl->conf.conn.suspend_listen_interval);
1631 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1637 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1638 struct wl12xx_vif *wlvif,
1639 struct cfg80211_wowlan *wow)
1643 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1646 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1650 ret = wl1271_configure_wowlan(wl, wow);
1659 static int wl1271_configure_suspend(struct wl1271 *wl,
1660 struct wl12xx_vif *wlvif,
1661 struct cfg80211_wowlan *wow)
1663 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1664 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1665 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1666 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1670 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1673 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1674 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1676 if ((!is_ap) && (!is_sta))
1679 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1680 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1683 wl1271_configure_wowlan(wl, NULL);
1686 if ((wl->conf.conn.suspend_wake_up_event ==
1687 wl->conf.conn.wake_up_event) &&
1688 (wl->conf.conn.suspend_listen_interval ==
1689 wl->conf.conn.listen_interval))
1692 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1693 wl->conf.conn.wake_up_event,
1694 wl->conf.conn.listen_interval);
1697 wl1271_error("resume: wake up conditions failed: %d",
1701 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1705 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1706 struct cfg80211_wowlan *wow)
1708 struct wl1271 *wl = hw->priv;
1709 struct wl12xx_vif *wlvif;
1712 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1715 /* we want to perform the recovery before suspending */
1716 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1717 wl1271_warning("postponing suspend to perform recovery");
1721 wl1271_tx_flush(wl);
1723 mutex_lock(&wl->mutex);
1725 ret = wl1271_ps_elp_wakeup(wl);
1727 mutex_unlock(&wl->mutex);
1731 wl->wow_enabled = true;
1732 wl12xx_for_each_wlvif(wl, wlvif) {
1733 if (wlcore_is_p2p_mgmt(wlvif))
1736 ret = wl1271_configure_suspend(wl, wlvif, wow);
1738 mutex_unlock(&wl->mutex);
1739 wl1271_warning("couldn't prepare device to suspend");
1744 /* disable fast link flow control notifications from FW */
1745 ret = wlcore_hw_interrupt_notify(wl, false);
1749 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1750 ret = wlcore_hw_rx_ba_filter(wl,
1751 !!wl->conf.conn.suspend_rx_ba_activity);
1756 wl1271_ps_elp_sleep(wl);
1757 mutex_unlock(&wl->mutex);
1760 wl1271_warning("couldn't prepare device to suspend");
1764 /* flush any remaining work */
1765 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1768 * disable and re-enable interrupts in order to flush
1771 wlcore_disable_interrupts(wl);
1774 * set suspended flag to avoid triggering a new threaded_irq
1775 * work. no need for spinlock as interrupts are disabled.
1777 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1779 wlcore_enable_interrupts(wl);
1780 flush_work(&wl->tx_work);
1781 flush_delayed_work(&wl->elp_work);
1784 * Cancel the watchdog even if above tx_flush failed. We will detect
1785 * it on resume anyway.
1787 cancel_delayed_work(&wl->tx_watchdog_work);
1792 static int wl1271_op_resume(struct ieee80211_hw *hw)
1794 struct wl1271 *wl = hw->priv;
1795 struct wl12xx_vif *wlvif;
1796 unsigned long flags;
1797 bool run_irq_work = false, pending_recovery;
1800 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1802 WARN_ON(!wl->wow_enabled);
1805 * re-enable irq_work enqueuing, and call irq_work directly if
1806 * there is a pending work.
1808 spin_lock_irqsave(&wl->wl_lock, flags);
1809 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1810 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1811 run_irq_work = true;
1812 spin_unlock_irqrestore(&wl->wl_lock, flags);
1814 mutex_lock(&wl->mutex);
1816 /* test the recovery flag before calling any SDIO functions */
1817 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1821 wl1271_debug(DEBUG_MAC80211,
1822 "run postponed irq_work directly");
1824 /* don't talk to the HW if recovery is pending */
1825 if (!pending_recovery) {
1826 ret = wlcore_irq_locked(wl);
1828 wl12xx_queue_recovery_work(wl);
1831 wlcore_enable_interrupts(wl);
1834 if (pending_recovery) {
1835 wl1271_warning("queuing forgotten recovery on resume");
1836 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1840 ret = wl1271_ps_elp_wakeup(wl);
1844 wl12xx_for_each_wlvif(wl, wlvif) {
1845 if (wlcore_is_p2p_mgmt(wlvif))
1848 wl1271_configure_resume(wl, wlvif);
1851 ret = wlcore_hw_interrupt_notify(wl, true);
1855 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1856 ret = wlcore_hw_rx_ba_filter(wl, false);
1861 wl1271_ps_elp_sleep(wl);
1864 wl->wow_enabled = false;
1867 * Set a flag to re-init the watchdog on the first Tx after resume.
1868 * That way we avoid possible conditions where Tx-complete interrupts
1869 * fail to arrive and we perform a spurious recovery.
1871 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1872 mutex_unlock(&wl->mutex);
1878 static int wl1271_op_start(struct ieee80211_hw *hw)
1880 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1883 * We have to delay the booting of the hardware because
1884 * we need to know the local MAC address before downloading and
1885 * initializing the firmware. The MAC address cannot be changed
1886 * after boot, and without the proper MAC address, the firmware
1887 * will not function properly.
1889 * The MAC address is first known when the corresponding interface
1890 * is added. That is where we will initialize the hardware.
1896 static void wlcore_op_stop_locked(struct wl1271 *wl)
1900 if (wl->state == WLCORE_STATE_OFF) {
1901 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1903 wlcore_enable_interrupts(wl);
1909 * this must be before the cancel_work calls below, so that the work
1910 * functions don't perform further work.
1912 wl->state = WLCORE_STATE_OFF;
1915 * Use the nosync variant to disable interrupts, so the mutex could be
1916 * held while doing so without deadlocking.
1918 wlcore_disable_interrupts_nosync(wl);
1920 mutex_unlock(&wl->mutex);
1922 wlcore_synchronize_interrupts(wl);
1923 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1924 cancel_work_sync(&wl->recovery_work);
1925 wl1271_flush_deferred_work(wl);
1926 cancel_delayed_work_sync(&wl->scan_complete_work);
1927 cancel_work_sync(&wl->netstack_work);
1928 cancel_work_sync(&wl->tx_work);
1929 cancel_delayed_work_sync(&wl->elp_work);
1930 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1932 /* let's notify MAC80211 about the remaining pending TX frames */
1933 mutex_lock(&wl->mutex);
1934 wl12xx_tx_reset(wl);
1936 wl1271_power_off(wl);
1938 * In case a recovery was scheduled, interrupts were disabled to avoid
1939 * an interrupt storm. Now that the power is down, it is safe to
1940 * re-enable interrupts to balance the disable depth
1942 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1943 wlcore_enable_interrupts(wl);
1945 wl->band = NL80211_BAND_2GHZ;
1948 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1949 wl->channel_type = NL80211_CHAN_NO_HT;
1950 wl->tx_blocks_available = 0;
1951 wl->tx_allocated_blocks = 0;
1952 wl->tx_results_count = 0;
1953 wl->tx_packets_count = 0;
1954 wl->time_offset = 0;
1955 wl->ap_fw_ps_map = 0;
1957 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1958 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1959 memset(wl->links_map, 0, sizeof(wl->links_map));
1960 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1961 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1962 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1963 wl->active_sta_count = 0;
1964 wl->active_link_count = 0;
1966 /* The system link is always allocated */
1967 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1968 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1969 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1972 * this is performed after the cancel_work calls and the associated
1973 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1974 * get executed before all these vars have been reset.
1978 wl->tx_blocks_freed = 0;
1980 for (i = 0; i < NUM_TX_QUEUES; i++) {
1981 wl->tx_pkts_freed[i] = 0;
1982 wl->tx_allocated_pkts[i] = 0;
1985 wl1271_debugfs_reset(wl);
1987 kfree(wl->raw_fw_status);
1988 wl->raw_fw_status = NULL;
1989 kfree(wl->fw_status);
1990 wl->fw_status = NULL;
1991 kfree(wl->tx_res_if);
1992 wl->tx_res_if = NULL;
1993 kfree(wl->target_mem_map);
1994 wl->target_mem_map = NULL;
1997 * FW channels must be re-calibrated after recovery,
1998 * save current Reg-Domain channel configuration and clear it.
2000 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2001 sizeof(wl->reg_ch_conf_pending));
2002 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2005 static void wlcore_op_stop(struct ieee80211_hw *hw)
2007 struct wl1271 *wl = hw->priv;
2009 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2011 mutex_lock(&wl->mutex);
2013 wlcore_op_stop_locked(wl);
2015 mutex_unlock(&wl->mutex);
2018 static void wlcore_channel_switch_work(struct work_struct *work)
2020 struct delayed_work *dwork;
2022 struct ieee80211_vif *vif;
2023 struct wl12xx_vif *wlvif;
2026 dwork = to_delayed_work(work);
2027 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2030 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2032 mutex_lock(&wl->mutex);
2034 if (unlikely(wl->state != WLCORE_STATE_ON))
2037 /* check the channel switch is still ongoing */
2038 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2041 vif = wl12xx_wlvif_to_vif(wlvif);
2042 ieee80211_chswitch_done(vif, false);
2044 ret = wl1271_ps_elp_wakeup(wl);
2048 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2050 wl1271_ps_elp_sleep(wl);
2052 mutex_unlock(&wl->mutex);
2055 static void wlcore_connection_loss_work(struct work_struct *work)
2057 struct delayed_work *dwork;
2059 struct ieee80211_vif *vif;
2060 struct wl12xx_vif *wlvif;
2062 dwork = to_delayed_work(work);
2063 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2066 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2068 mutex_lock(&wl->mutex);
2070 if (unlikely(wl->state != WLCORE_STATE_ON))
2073 /* Call mac80211 connection loss */
2074 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2077 vif = wl12xx_wlvif_to_vif(wlvif);
2078 ieee80211_connection_loss(vif);
2080 mutex_unlock(&wl->mutex);
2083 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2085 struct delayed_work *dwork;
2087 struct wl12xx_vif *wlvif;
2088 unsigned long time_spare;
2091 dwork = to_delayed_work(work);
2092 wlvif = container_of(dwork, struct wl12xx_vif,
2093 pending_auth_complete_work);
2096 mutex_lock(&wl->mutex);
2098 if (unlikely(wl->state != WLCORE_STATE_ON))
2102 * Make sure a second really passed since the last auth reply. Maybe
2103 * a second auth reply arrived while we were stuck on the mutex.
2104 * Check for a little less than the timeout to protect from scheduler
2107 time_spare = jiffies +
2108 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2109 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2112 ret = wl1271_ps_elp_wakeup(wl);
2116 /* cancel the ROC if active */
2117 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2119 wl1271_ps_elp_sleep(wl);
2121 mutex_unlock(&wl->mutex);
2124 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2126 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2127 WL12XX_MAX_RATE_POLICIES);
2128 if (policy >= WL12XX_MAX_RATE_POLICIES)
2131 __set_bit(policy, wl->rate_policies_map);
2136 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2138 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2141 __clear_bit(*idx, wl->rate_policies_map);
2142 *idx = WL12XX_MAX_RATE_POLICIES;
2145 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2147 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2148 WLCORE_MAX_KLV_TEMPLATES);
2149 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2152 __set_bit(policy, wl->klv_templates_map);
2157 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2159 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2162 __clear_bit(*idx, wl->klv_templates_map);
2163 *idx = WLCORE_MAX_KLV_TEMPLATES;
2166 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2168 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2170 switch (wlvif->bss_type) {
2171 case BSS_TYPE_AP_BSS:
2173 return WL1271_ROLE_P2P_GO;
2174 else if (ieee80211_vif_is_mesh(vif))
2175 return WL1271_ROLE_MESH_POINT;
2177 return WL1271_ROLE_AP;
2179 case BSS_TYPE_STA_BSS:
2181 return WL1271_ROLE_P2P_CL;
2183 return WL1271_ROLE_STA;
2186 return WL1271_ROLE_IBSS;
2189 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2191 return WL12XX_INVALID_ROLE_TYPE;
2194 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2196 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2199 /* clear everything but the persistent data */
2200 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2202 switch (ieee80211_vif_type_p2p(vif)) {
2203 case NL80211_IFTYPE_P2P_CLIENT:
2206 case NL80211_IFTYPE_STATION:
2207 case NL80211_IFTYPE_P2P_DEVICE:
2208 wlvif->bss_type = BSS_TYPE_STA_BSS;
2210 case NL80211_IFTYPE_ADHOC:
2211 wlvif->bss_type = BSS_TYPE_IBSS;
2213 case NL80211_IFTYPE_P2P_GO:
2216 case NL80211_IFTYPE_AP:
2217 case NL80211_IFTYPE_MESH_POINT:
2218 wlvif->bss_type = BSS_TYPE_AP_BSS;
2221 wlvif->bss_type = MAX_BSS_TYPE;
2225 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2226 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2227 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2229 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2230 wlvif->bss_type == BSS_TYPE_IBSS) {
2231 /* init sta/ibss data */
2232 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2233 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2234 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2235 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2236 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2237 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2238 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2239 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2242 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2243 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2244 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2245 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2246 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2247 wl12xx_allocate_rate_policy(wl,
2248 &wlvif->ap.ucast_rate_idx[i]);
2249 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2251 * TODO: check if basic_rate shouldn't be
2252 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2253 * instead (the same thing for STA above).
2255 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2256 /* TODO: this seems to be used only for STA, check it */
2257 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2260 wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2261 wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2262 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2265 * mac80211 configures some values globally, while we treat them
2266 * per-interface. thus, on init, we have to copy them from wl
2268 wlvif->band = wl->band;
2269 wlvif->channel = wl->channel;
2270 wlvif->power_level = wl->power_level;
2271 wlvif->channel_type = wl->channel_type;
2273 INIT_WORK(&wlvif->rx_streaming_enable_work,
2274 wl1271_rx_streaming_enable_work);
2275 INIT_WORK(&wlvif->rx_streaming_disable_work,
2276 wl1271_rx_streaming_disable_work);
2277 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2278 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2279 wlcore_channel_switch_work);
2280 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2281 wlcore_connection_loss_work);
2282 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2283 wlcore_pending_auth_complete_work);
2284 INIT_LIST_HEAD(&wlvif->list);
2286 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2287 (unsigned long) wlvif);
2291 static int wl12xx_init_fw(struct wl1271 *wl)
2293 int retries = WL1271_BOOT_RETRIES;
2294 bool booted = false;
2295 struct wiphy *wiphy = wl->hw->wiphy;
2300 ret = wl12xx_chip_wakeup(wl, false);
2304 ret = wl->ops->boot(wl);
2308 ret = wl1271_hw_init(wl);
2316 mutex_unlock(&wl->mutex);
2317 /* Unlocking the mutex in the middle of handling is
2318 inherently unsafe. In this case we deem it safe to do,
2319 because we need to let any possibly pending IRQ out of
2320 the system (and while we are WLCORE_STATE_OFF the IRQ
2321 work function will not do anything.) Also, any other
2322 possible concurrent operations will fail due to the
2323 current state, hence the wl1271 struct should be safe. */
2324 wlcore_disable_interrupts(wl);
2325 wl1271_flush_deferred_work(wl);
2326 cancel_work_sync(&wl->netstack_work);
2327 mutex_lock(&wl->mutex);
2329 wl1271_power_off(wl);
2333 wl1271_error("firmware boot failed despite %d retries",
2334 WL1271_BOOT_RETRIES);
2338 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2340 /* update hw/fw version info in wiphy struct */
2341 wiphy->hw_version = wl->chip.id;
2342 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2343 sizeof(wiphy->fw_version));
2346 * Now we know if 11a is supported (info from the NVS), so disable
2347 * 11a channels if not supported
2349 if (!wl->enable_11a)
2350 wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2352 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2353 wl->enable_11a ? "" : "not ");
2355 wl->state = WLCORE_STATE_ON;
2360 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2362 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2366 * Check whether a fw switch (i.e. moving from one loaded
2367 * fw to another) is needed. This function is also responsible
2368 * for updating wl->last_vif_count, so it must be called before
2369 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2372 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2373 struct vif_counter_data vif_counter_data,
2376 enum wl12xx_fw_type current_fw = wl->fw_type;
2377 u8 vif_count = vif_counter_data.counter;
2379 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2382 /* increase the vif count if this is a new vif */
2383 if (add && !vif_counter_data.cur_vif_running)
2386 wl->last_vif_count = vif_count;
2388 /* no need for fw change if the device is OFF */
2389 if (wl->state == WLCORE_STATE_OFF)
2392 /* no need for fw change if a single fw is used */
2393 if (!wl->mr_fw_name)
2396 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2398 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2405 * Enter "forced psm". Make sure the sta is in psm against the ap,
2406 * to make the fw switch a bit more disconnection-persistent.
2408 static void wl12xx_force_active_psm(struct wl1271 *wl)
2410 struct wl12xx_vif *wlvif;
2412 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2413 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2417 struct wlcore_hw_queue_iter_data {
2418 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2420 struct ieee80211_vif *vif;
2421 /* is the current vif among those iterated */
2425 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2426 struct ieee80211_vif *vif)
2428 struct wlcore_hw_queue_iter_data *iter_data = data;
2430 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2431 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2434 if (iter_data->cur_running || vif == iter_data->vif) {
2435 iter_data->cur_running = true;
2439 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2442 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2443 struct wl12xx_vif *wlvif)
2445 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2446 struct wlcore_hw_queue_iter_data iter_data = {};
2449 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2450 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2454 iter_data.vif = vif;
2456 /* mark all bits taken by active interfaces */
2457 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2458 IEEE80211_IFACE_ITER_RESUME_ALL,
2459 wlcore_hw_queue_iter, &iter_data);
2461 /* the current vif is already running in mac80211 (resume/recovery) */
2462 if (iter_data.cur_running) {
2463 wlvif->hw_queue_base = vif->hw_queue[0];
2464 wl1271_debug(DEBUG_MAC80211,
2465 "using pre-allocated hw queue base %d",
2466 wlvif->hw_queue_base);
2468 /* interface type might have changed type */
2469 goto adjust_cab_queue;
2472 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2473 WLCORE_NUM_MAC_ADDRESSES);
2474 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2477 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2478 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2479 wlvif->hw_queue_base);
2481 for (i = 0; i < NUM_TX_QUEUES; i++) {
2482 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2483 /* register hw queues in mac80211 */
2484 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2488 /* the last places are reserved for cab queues per interface */
2489 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2490 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2491 wlvif->hw_queue_base / NUM_TX_QUEUES;
2493 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2498 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2499 struct ieee80211_vif *vif)
2501 struct wl1271 *wl = hw->priv;
2502 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2503 struct vif_counter_data vif_count;
2508 wl1271_error("Adding Interface not allowed while in PLT mode");
2512 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2513 IEEE80211_VIF_SUPPORTS_UAPSD |
2514 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2516 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2517 ieee80211_vif_type_p2p(vif), vif->addr);
2519 wl12xx_get_vif_count(hw, vif, &vif_count);
2521 mutex_lock(&wl->mutex);
2522 ret = wl1271_ps_elp_wakeup(wl);
2527 * in some very corner case HW recovery scenarios its possible to
2528 * get here before __wl1271_op_remove_interface is complete, so
2529 * opt out if that is the case.
2531 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2532 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2538 ret = wl12xx_init_vif_data(wl, vif);
2543 role_type = wl12xx_get_role_type(wl, wlvif);
2544 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2549 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2553 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2554 wl12xx_force_active_psm(wl);
2555 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2556 mutex_unlock(&wl->mutex);
2557 wl1271_recovery_work(&wl->recovery_work);
2562 * TODO: after the nvs issue will be solved, move this block
2563 * to start(), and make sure here the driver is ON.
2565 if (wl->state == WLCORE_STATE_OFF) {
2567 * we still need this in order to configure the fw
2568 * while uploading the nvs
2570 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2572 ret = wl12xx_init_fw(wl);
2577 if (!wlcore_is_p2p_mgmt(wlvif)) {
2578 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2579 role_type, &wlvif->role_id);
2583 ret = wl1271_init_vif_specific(wl, vif);
2588 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2589 &wlvif->dev_role_id);
2593 /* needed mainly for configuring rate policies */
2594 ret = wl1271_sta_hw_init(wl, wlvif);
2599 list_add(&wlvif->list, &wl->wlvif_list);
2600 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2602 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2607 wl1271_ps_elp_sleep(wl);
2609 mutex_unlock(&wl->mutex);
2614 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2615 struct ieee80211_vif *vif,
2616 bool reset_tx_queues)
2618 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2620 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2622 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2624 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2627 /* because of hardware recovery, we may get here twice */
2628 if (wl->state == WLCORE_STATE_OFF)
2631 wl1271_info("down");
2633 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2634 wl->scan_wlvif == wlvif) {
2635 struct cfg80211_scan_info info = {
2640 * Rearm the tx watchdog just before idling scan. This
2641 * prevents just-finished scans from triggering the watchdog
2643 wl12xx_rearm_tx_watchdog_locked(wl);
2645 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2646 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2647 wl->scan_wlvif = NULL;
2648 wl->scan.req = NULL;
2649 ieee80211_scan_completed(wl->hw, &info);
2652 if (wl->sched_vif == wlvif)
2653 wl->sched_vif = NULL;
2655 if (wl->roc_vif == vif) {
2657 ieee80211_remain_on_channel_expired(wl->hw);
2660 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2661 /* disable active roles */
2662 ret = wl1271_ps_elp_wakeup(wl);
2666 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2667 wlvif->bss_type == BSS_TYPE_IBSS) {
2668 if (wl12xx_dev_role_started(wlvif))
2669 wl12xx_stop_dev(wl, wlvif);
2672 if (!wlcore_is_p2p_mgmt(wlvif)) {
2673 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2677 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2682 wl1271_ps_elp_sleep(wl);
2685 wl12xx_tx_reset_wlvif(wl, wlvif);
2687 /* clear all hlids (except system_hlid) */
2688 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2690 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2691 wlvif->bss_type == BSS_TYPE_IBSS) {
2692 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2693 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2694 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2695 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2696 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2698 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2699 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2700 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2701 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2702 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2703 wl12xx_free_rate_policy(wl,
2704 &wlvif->ap.ucast_rate_idx[i]);
2705 wl1271_free_ap_keys(wl, wlvif);
2708 dev_kfree_skb(wlvif->probereq);
2709 wlvif->probereq = NULL;
2710 if (wl->last_wlvif == wlvif)
2711 wl->last_wlvif = NULL;
2712 list_del(&wlvif->list);
2713 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2714 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2715 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2723 * Last AP, have more stations. Configure sleep auth according to STA.
2724 * Don't do thin on unintended recovery.
2726 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2727 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2730 if (wl->ap_count == 0 && is_ap) {
2731 /* mask ap events */
2732 wl->event_mask &= ~wl->ap_event_mask;
2733 wl1271_event_unmask(wl);
2736 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2737 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2738 /* Configure for power according to debugfs */
2739 if (sta_auth != WL1271_PSM_ILLEGAL)
2740 wl1271_acx_sleep_auth(wl, sta_auth);
2741 /* Configure for ELP power saving */
2743 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2747 mutex_unlock(&wl->mutex);
2749 del_timer_sync(&wlvif->rx_streaming_timer);
2750 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2751 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2752 cancel_work_sync(&wlvif->rc_update_work);
2753 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2754 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2755 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2757 mutex_lock(&wl->mutex);
2760 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2761 struct ieee80211_vif *vif)
2763 struct wl1271 *wl = hw->priv;
2764 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2765 struct wl12xx_vif *iter;
2766 struct vif_counter_data vif_count;
2768 wl12xx_get_vif_count(hw, vif, &vif_count);
2769 mutex_lock(&wl->mutex);
2771 if (wl->state == WLCORE_STATE_OFF ||
2772 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2776 * wl->vif can be null here if someone shuts down the interface
2777 * just when hardware recovery has been started.
2779 wl12xx_for_each_wlvif(wl, iter) {
2783 __wl1271_op_remove_interface(wl, vif, true);
2786 WARN_ON(iter != wlvif);
2787 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2788 wl12xx_force_active_psm(wl);
2789 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2790 wl12xx_queue_recovery_work(wl);
2793 mutex_unlock(&wl->mutex);
2796 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2797 struct ieee80211_vif *vif,
2798 enum nl80211_iftype new_type, bool p2p)
2800 struct wl1271 *wl = hw->priv;
2803 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2804 wl1271_op_remove_interface(hw, vif);
2806 vif->type = new_type;
2808 ret = wl1271_op_add_interface(hw, vif);
2810 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2814 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2817 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2820 * One of the side effects of the JOIN command is that is clears
2821 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2822 * to a WPA/WPA2 access point will therefore kill the data-path.
2823 * Currently the only valid scenario for JOIN during association
2824 * is on roaming, in which case we will also be given new keys.
2825 * Keep the below message for now, unless it starts bothering
2826 * users who really like to roam a lot :)
2828 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2829 wl1271_info("JOIN while associated.");
2831 /* clear encryption type */
2832 wlvif->encryption_type = KEY_NONE;
2835 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2837 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2842 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2846 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2850 wl1271_error("No SSID in IEs!");
2855 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2856 wl1271_error("SSID is too long!");
2860 wlvif->ssid_len = ssid_len;
2861 memcpy(wlvif->ssid, ptr+2, ssid_len);
2865 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2867 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2868 struct sk_buff *skb;
2871 /* we currently only support setting the ssid from the ap probe req */
2872 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2875 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2879 ieoffset = offsetof(struct ieee80211_mgmt,
2880 u.probe_req.variable);
2881 wl1271_ssid_set(wlvif, skb, ieoffset);
2887 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2888 struct ieee80211_bss_conf *bss_conf,
2894 wlvif->aid = bss_conf->aid;
2895 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2896 wlvif->beacon_int = bss_conf->beacon_int;
2897 wlvif->wmm_enabled = bss_conf->qos;
2899 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2902 * with wl1271, we don't need to update the
2903 * beacon_int and dtim_period, because the firmware
2904 * updates it by itself when the first beacon is
2905 * received after a join.
2907 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2912 * Get a template for hardware connection maintenance
2914 dev_kfree_skb(wlvif->probereq);
2915 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2918 ieoffset = offsetof(struct ieee80211_mgmt,
2919 u.probe_req.variable);
2920 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2922 /* enable the connection monitoring feature */
2923 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2928 * The join command disable the keep-alive mode, shut down its process,
2929 * and also clear the template config, so we need to reset it all after
2930 * the join. The acx_aid starts the keep-alive process, and the order
2931 * of the commands below is relevant.
2933 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2937 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2941 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2945 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2946 wlvif->sta.klv_template_id,
2947 ACX_KEEP_ALIVE_TPL_VALID);
2952 * The default fw psm configuration is AUTO, while mac80211 default
2953 * setting is off (ACTIVE), so sync the fw with the correct value.
2955 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2961 wl1271_tx_enabled_rates_get(wl,
2964 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2972 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2975 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2977 /* make sure we are connected (sta) joined */
2979 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2982 /* make sure we are joined (ibss) */
2984 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2988 /* use defaults when not associated */
2991 /* free probe-request template */
2992 dev_kfree_skb(wlvif->probereq);
2993 wlvif->probereq = NULL;
2995 /* disable connection monitor features */
2996 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3000 /* Disable the keep-alive feature */
3001 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3005 /* disable beacon filtering */
3006 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3011 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3012 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3014 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3015 ieee80211_chswitch_done(vif, false);
3016 cancel_delayed_work(&wlvif->channel_switch_work);
3019 /* invalidate keep-alive template */
3020 wl1271_acx_keep_alive_config(wl, wlvif,
3021 wlvif->sta.klv_template_id,
3022 ACX_KEEP_ALIVE_TPL_INVALID);
3027 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3029 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3030 wlvif->rate_set = wlvif->basic_rate_set;
3033 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3036 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3038 if (idle == cur_idle)
3042 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3044 /* The current firmware only supports sched_scan in idle */
3045 if (wl->sched_vif == wlvif)
3046 wl->ops->sched_scan_stop(wl, wlvif);
3048 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3052 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3053 struct ieee80211_conf *conf, u32 changed)
3057 if (wlcore_is_p2p_mgmt(wlvif))
3060 if (conf->power_level != wlvif->power_level) {
3061 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3065 wlvif->power_level = conf->power_level;
3071 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3073 struct wl1271 *wl = hw->priv;
3074 struct wl12xx_vif *wlvif;
3075 struct ieee80211_conf *conf = &hw->conf;
3078 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3080 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3082 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3085 mutex_lock(&wl->mutex);
3087 if (changed & IEEE80211_CONF_CHANGE_POWER)
3088 wl->power_level = conf->power_level;
3090 if (unlikely(wl->state != WLCORE_STATE_ON))
3093 ret = wl1271_ps_elp_wakeup(wl);
3097 /* configure each interface */
3098 wl12xx_for_each_wlvif(wl, wlvif) {
3099 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3105 wl1271_ps_elp_sleep(wl);
3108 mutex_unlock(&wl->mutex);
3113 struct wl1271_filter_params {
3116 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3119 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3120 struct netdev_hw_addr_list *mc_list)
3122 struct wl1271_filter_params *fp;
3123 struct netdev_hw_addr *ha;
3125 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3127 wl1271_error("Out of memory setting filters.");
3131 /* update multicast filtering parameters */
3132 fp->mc_list_length = 0;
3133 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3134 fp->enabled = false;
3137 netdev_hw_addr_list_for_each(ha, mc_list) {
3138 memcpy(fp->mc_list[fp->mc_list_length],
3139 ha->addr, ETH_ALEN);
3140 fp->mc_list_length++;
3144 return (u64)(unsigned long)fp;
3147 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3149 FIF_BCN_PRBRESP_PROMISC | \
3153 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3154 unsigned int changed,
3155 unsigned int *total, u64 multicast)
3157 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3158 struct wl1271 *wl = hw->priv;
3159 struct wl12xx_vif *wlvif;
3163 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3164 " total %x", changed, *total);
3166 mutex_lock(&wl->mutex);
3168 *total &= WL1271_SUPPORTED_FILTERS;
3169 changed &= WL1271_SUPPORTED_FILTERS;
3171 if (unlikely(wl->state != WLCORE_STATE_ON))
3174 ret = wl1271_ps_elp_wakeup(wl);
3178 wl12xx_for_each_wlvif(wl, wlvif) {
3179 if (wlcore_is_p2p_mgmt(wlvif))
3182 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3183 if (*total & FIF_ALLMULTI)
3184 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3188 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3191 fp->mc_list_length);
3198 * the fw doesn't provide an api to configure the filters. instead,
3199 * the filters configuration is based on the active roles / ROC
3204 wl1271_ps_elp_sleep(wl);
3207 mutex_unlock(&wl->mutex);
3211 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3212 u8 id, u8 key_type, u8 key_size,
3213 const u8 *key, u8 hlid, u32 tx_seq_32,
3216 struct wl1271_ap_key *ap_key;
3219 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3221 if (key_size > MAX_KEY_SIZE)
3225 * Find next free entry in ap_keys. Also check we are not replacing
3228 for (i = 0; i < MAX_NUM_KEYS; i++) {
3229 if (wlvif->ap.recorded_keys[i] == NULL)
3232 if (wlvif->ap.recorded_keys[i]->id == id) {
3233 wl1271_warning("trying to record key replacement");
3238 if (i == MAX_NUM_KEYS)
3241 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3246 ap_key->key_type = key_type;
3247 ap_key->key_size = key_size;
3248 memcpy(ap_key->key, key, key_size);
3249 ap_key->hlid = hlid;
3250 ap_key->tx_seq_32 = tx_seq_32;
3251 ap_key->tx_seq_16 = tx_seq_16;
3253 wlvif->ap.recorded_keys[i] = ap_key;
3257 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3261 for (i = 0; i < MAX_NUM_KEYS; i++) {
3262 kfree(wlvif->ap.recorded_keys[i]);
3263 wlvif->ap.recorded_keys[i] = NULL;
3267 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3270 struct wl1271_ap_key *key;
3271 bool wep_key_added = false;
3273 for (i = 0; i < MAX_NUM_KEYS; i++) {
3275 if (wlvif->ap.recorded_keys[i] == NULL)
3278 key = wlvif->ap.recorded_keys[i];
3280 if (hlid == WL12XX_INVALID_LINK_ID)
3281 hlid = wlvif->ap.bcast_hlid;
3283 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3284 key->id, key->key_type,
3285 key->key_size, key->key,
3286 hlid, key->tx_seq_32,
3291 if (key->key_type == KEY_WEP)
3292 wep_key_added = true;
3295 if (wep_key_added) {
3296 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3297 wlvif->ap.bcast_hlid);
3303 wl1271_free_ap_keys(wl, wlvif);
3307 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3308 u16 action, u8 id, u8 key_type,
3309 u8 key_size, const u8 *key, u32 tx_seq_32,
3310 u16 tx_seq_16, struct ieee80211_sta *sta)
3313 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3316 struct wl1271_station *wl_sta;
3320 wl_sta = (struct wl1271_station *)sta->drv_priv;
3321 hlid = wl_sta->hlid;
3323 hlid = wlvif->ap.bcast_hlid;
3326 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3328 * We do not support removing keys after AP shutdown.
3329 * Pretend we do to make mac80211 happy.
3331 if (action != KEY_ADD_OR_REPLACE)
3334 ret = wl1271_record_ap_key(wl, wlvif, id,
3336 key, hlid, tx_seq_32,
3339 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3340 id, key_type, key_size,
3341 key, hlid, tx_seq_32,
3349 static const u8 bcast_addr[ETH_ALEN] = {
3350 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3353 addr = sta ? sta->addr : bcast_addr;
3355 if (is_zero_ether_addr(addr)) {
3356 /* We dont support TX only encryption */
3360 /* The wl1271 does not allow to remove unicast keys - they
3361 will be cleared automatically on next CMD_JOIN. Ignore the
3362 request silently, as we dont want the mac80211 to emit
3363 an error message. */
3364 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3367 /* don't remove key if hlid was already deleted */
3368 if (action == KEY_REMOVE &&
3369 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3372 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3373 id, key_type, key_size,
3374 key, addr, tx_seq_32,
3384 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3385 struct ieee80211_vif *vif,
3386 struct ieee80211_sta *sta,
3387 struct ieee80211_key_conf *key_conf)
3389 struct wl1271 *wl = hw->priv;
3391 bool might_change_spare =
3392 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3393 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3395 if (might_change_spare) {
3397 * stop the queues and flush to ensure the next packets are
3398 * in sync with FW spare block accounting
3400 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3401 wl1271_tx_flush(wl);
3404 mutex_lock(&wl->mutex);
3406 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3408 goto out_wake_queues;
3411 ret = wl1271_ps_elp_wakeup(wl);
3413 goto out_wake_queues;
3415 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3417 wl1271_ps_elp_sleep(wl);
3420 if (might_change_spare)
3421 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3423 mutex_unlock(&wl->mutex);
3428 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3429 struct ieee80211_vif *vif,
3430 struct ieee80211_sta *sta,
3431 struct ieee80211_key_conf *key_conf)
3433 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3440 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3442 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3443 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3444 key_conf->cipher, key_conf->keyidx,
3445 key_conf->keylen, key_conf->flags);
3446 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3448 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3450 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3451 hlid = wl_sta->hlid;
3453 hlid = wlvif->ap.bcast_hlid;
3456 hlid = wlvif->sta.hlid;
3458 if (hlid != WL12XX_INVALID_LINK_ID) {
3459 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3460 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3461 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3464 switch (key_conf->cipher) {
3465 case WLAN_CIPHER_SUITE_WEP40:
3466 case WLAN_CIPHER_SUITE_WEP104:
3469 key_conf->hw_key_idx = key_conf->keyidx;
3471 case WLAN_CIPHER_SUITE_TKIP:
3472 key_type = KEY_TKIP;
3473 key_conf->hw_key_idx = key_conf->keyidx;
3475 case WLAN_CIPHER_SUITE_CCMP:
3477 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3479 case WL1271_CIPHER_SUITE_GEM:
3483 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3490 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3491 key_conf->keyidx, key_type,
3492 key_conf->keylen, key_conf->key,
3493 tx_seq_32, tx_seq_16, sta);
3495 wl1271_error("Could not add or replace key");
3500 * reconfiguring arp response if the unicast (or common)
3501 * encryption key type was changed
3503 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3504 (sta || key_type == KEY_WEP) &&
3505 wlvif->encryption_type != key_type) {
3506 wlvif->encryption_type = key_type;
3507 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3509 wl1271_warning("build arp rsp failed: %d", ret);
3516 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3517 key_conf->keyidx, key_type,
3518 key_conf->keylen, key_conf->key,
3521 wl1271_error("Could not remove key");
3527 wl1271_error("Unsupported key cmd 0x%x", cmd);
3533 EXPORT_SYMBOL_GPL(wlcore_set_key);
3535 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3536 struct ieee80211_vif *vif,
3539 struct wl1271 *wl = hw->priv;
3540 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3543 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3546 /* we don't handle unsetting of default key */
3550 mutex_lock(&wl->mutex);
3552 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3557 ret = wl1271_ps_elp_wakeup(wl);
3561 wlvif->default_key = key_idx;
3563 /* the default WEP key needs to be configured at least once */
3564 if (wlvif->encryption_type == KEY_WEP) {
3565 ret = wl12xx_cmd_set_default_wep_key(wl,
3573 wl1271_ps_elp_sleep(wl);
3576 mutex_unlock(&wl->mutex);
3579 void wlcore_regdomain_config(struct wl1271 *wl)
3583 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3586 mutex_lock(&wl->mutex);
3588 if (unlikely(wl->state != WLCORE_STATE_ON))
3591 ret = wl1271_ps_elp_wakeup(wl);
3595 ret = wlcore_cmd_regdomain_config_locked(wl);
3597 wl12xx_queue_recovery_work(wl);
3601 wl1271_ps_elp_sleep(wl);
3603 mutex_unlock(&wl->mutex);
3606 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3607 struct ieee80211_vif *vif,
3608 struct ieee80211_scan_request *hw_req)
3610 struct cfg80211_scan_request *req = &hw_req->req;
3611 struct wl1271 *wl = hw->priv;
3616 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3619 ssid = req->ssids[0].ssid;
3620 len = req->ssids[0].ssid_len;
3623 mutex_lock(&wl->mutex);
3625 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3627 * We cannot return -EBUSY here because cfg80211 will expect
3628 * a call to ieee80211_scan_completed if we do - in this case
3629 * there won't be any call.
3635 ret = wl1271_ps_elp_wakeup(wl);
3639 /* fail if there is any role in ROC */
3640 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3641 /* don't allow scanning right now */
3646 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3648 wl1271_ps_elp_sleep(wl);
3650 mutex_unlock(&wl->mutex);
3655 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3656 struct ieee80211_vif *vif)
3658 struct wl1271 *wl = hw->priv;
3659 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3660 struct cfg80211_scan_info info = {
3665 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3667 mutex_lock(&wl->mutex);
3669 if (unlikely(wl->state != WLCORE_STATE_ON))
3672 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3675 ret = wl1271_ps_elp_wakeup(wl);
3679 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3680 ret = wl->ops->scan_stop(wl, wlvif);
3686 * Rearm the tx watchdog just before idling scan. This
3687 * prevents just-finished scans from triggering the watchdog
3689 wl12xx_rearm_tx_watchdog_locked(wl);
3691 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3692 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3693 wl->scan_wlvif = NULL;
3694 wl->scan.req = NULL;
3695 ieee80211_scan_completed(wl->hw, &info);
3698 wl1271_ps_elp_sleep(wl);
3700 mutex_unlock(&wl->mutex);
3702 cancel_delayed_work_sync(&wl->scan_complete_work);
3705 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3706 struct ieee80211_vif *vif,
3707 struct cfg80211_sched_scan_request *req,
3708 struct ieee80211_scan_ies *ies)
3710 struct wl1271 *wl = hw->priv;
3711 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3714 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3716 mutex_lock(&wl->mutex);
3718 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3723 ret = wl1271_ps_elp_wakeup(wl);
3727 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3731 wl->sched_vif = wlvif;
3734 wl1271_ps_elp_sleep(wl);
3736 mutex_unlock(&wl->mutex);
3740 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3741 struct ieee80211_vif *vif)
3743 struct wl1271 *wl = hw->priv;
3744 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3747 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3749 mutex_lock(&wl->mutex);
3751 if (unlikely(wl->state != WLCORE_STATE_ON))
3754 ret = wl1271_ps_elp_wakeup(wl);
3758 wl->ops->sched_scan_stop(wl, wlvif);
3760 wl1271_ps_elp_sleep(wl);
3762 mutex_unlock(&wl->mutex);
3767 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3769 struct wl1271 *wl = hw->priv;
3772 mutex_lock(&wl->mutex);
3774 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3779 ret = wl1271_ps_elp_wakeup(wl);
3783 ret = wl1271_acx_frag_threshold(wl, value);
3785 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3787 wl1271_ps_elp_sleep(wl);
3790 mutex_unlock(&wl->mutex);
3795 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3797 struct wl1271 *wl = hw->priv;
3798 struct wl12xx_vif *wlvif;
3801 mutex_lock(&wl->mutex);
3803 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3808 ret = wl1271_ps_elp_wakeup(wl);
3812 wl12xx_for_each_wlvif(wl, wlvif) {
3813 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3815 wl1271_warning("set rts threshold failed: %d", ret);
3817 wl1271_ps_elp_sleep(wl);
3820 mutex_unlock(&wl->mutex);
3825 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3828 const u8 *next, *end = skb->data + skb->len;
3829 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3830 skb->len - ieoffset);
3835 memmove(ie, next, end - next);
3836 skb_trim(skb, skb->len - len);
3839 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3840 unsigned int oui, u8 oui_type,
3844 const u8 *next, *end = skb->data + skb->len;
3845 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3846 skb->data + ieoffset,
3847 skb->len - ieoffset);
3852 memmove(ie, next, end - next);
3853 skb_trim(skb, skb->len - len);
3856 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3857 struct ieee80211_vif *vif)
3859 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3860 struct sk_buff *skb;
3863 skb = ieee80211_proberesp_get(wl->hw, vif);
3867 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3868 CMD_TEMPL_AP_PROBE_RESPONSE,
3877 wl1271_debug(DEBUG_AP, "probe response updated");
3878 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3884 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3885 struct ieee80211_vif *vif,
3887 size_t probe_rsp_len,
3890 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3891 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3892 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3893 int ssid_ie_offset, ie_offset, templ_len;
3896 /* no need to change probe response if the SSID is set correctly */
3897 if (wlvif->ssid_len > 0)
3898 return wl1271_cmd_template_set(wl, wlvif->role_id,
3899 CMD_TEMPL_AP_PROBE_RESPONSE,
3904 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3905 wl1271_error("probe_rsp template too big");
3909 /* start searching from IE offset */
3910 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3912 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3913 probe_rsp_len - ie_offset);
3915 wl1271_error("No SSID in beacon!");
3919 ssid_ie_offset = ptr - probe_rsp_data;
3920 ptr += (ptr[1] + 2);
3922 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3924 /* insert SSID from bss_conf */
3925 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3926 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3927 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3928 bss_conf->ssid, bss_conf->ssid_len);
3929 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3931 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3932 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3933 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3935 return wl1271_cmd_template_set(wl, wlvif->role_id,
3936 CMD_TEMPL_AP_PROBE_RESPONSE,
3942 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3943 struct ieee80211_vif *vif,
3944 struct ieee80211_bss_conf *bss_conf,
3947 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3950 if (changed & BSS_CHANGED_ERP_SLOT) {
3951 if (bss_conf->use_short_slot)
3952 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3954 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3956 wl1271_warning("Set slot time failed %d", ret);
3961 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3962 if (bss_conf->use_short_preamble)
3963 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3965 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3968 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3969 if (bss_conf->use_cts_prot)
3970 ret = wl1271_acx_cts_protect(wl, wlvif,
3973 ret = wl1271_acx_cts_protect(wl, wlvif,
3974 CTSPROTECT_DISABLE);
3976 wl1271_warning("Set ctsprotect failed %d", ret);
3985 static int wlcore_set_beacon_template(struct wl1271 *wl,
3986 struct ieee80211_vif *vif,
3989 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3990 struct ieee80211_hdr *hdr;
3993 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
3994 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4002 wl1271_debug(DEBUG_MASTER, "beacon updated");
4004 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4006 dev_kfree_skb(beacon);
4009 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4010 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4012 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4017 dev_kfree_skb(beacon);
4021 wlvif->wmm_enabled =
4022 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4023 WLAN_OUI_TYPE_MICROSOFT_WMM,
4024 beacon->data + ieoffset,
4025 beacon->len - ieoffset);
4028 * In case we already have a probe-resp beacon set explicitly
4029 * by usermode, don't use the beacon data.
4031 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4034 /* remove TIM ie from probe response */
4035 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4038 * remove p2p ie from probe response.
4039 * the fw reponds to probe requests that don't include
4040 * the p2p ie. probe requests with p2p ie will be passed,
4041 * and will be responded by the supplicant (the spec
4042 * forbids including the p2p ie when responding to probe
4043 * requests that didn't include it).
4045 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4046 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4048 hdr = (struct ieee80211_hdr *) beacon->data;
4049 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4050 IEEE80211_STYPE_PROBE_RESP);
4052 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4057 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4058 CMD_TEMPL_PROBE_RESPONSE,
4063 dev_kfree_skb(beacon);
4071 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4072 struct ieee80211_vif *vif,
4073 struct ieee80211_bss_conf *bss_conf,
4076 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4077 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4080 if (changed & BSS_CHANGED_BEACON_INT) {
4081 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4082 bss_conf->beacon_int);
4084 wlvif->beacon_int = bss_conf->beacon_int;
4087 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4088 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4090 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4093 if (changed & BSS_CHANGED_BEACON) {
4094 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4098 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4100 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4107 wl1271_error("beacon info change failed: %d", ret);
4111 /* AP mode changes */
4112 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4113 struct ieee80211_vif *vif,
4114 struct ieee80211_bss_conf *bss_conf,
4117 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4120 if (changed & BSS_CHANGED_BASIC_RATES) {
4121 u32 rates = bss_conf->basic_rates;
4123 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4125 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4126 wlvif->basic_rate_set);
4128 ret = wl1271_init_ap_rates(wl, wlvif);
4130 wl1271_error("AP rate policy change failed %d", ret);
4134 ret = wl1271_ap_init_templates(wl, vif);
4138 /* No need to set probe resp template for mesh */
4139 if (!ieee80211_vif_is_mesh(vif)) {
4140 ret = wl1271_ap_set_probe_resp_tmpl(wl,
4147 ret = wlcore_set_beacon_template(wl, vif, true);
4152 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4156 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4157 if (bss_conf->enable_beacon) {
4158 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4159 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4163 ret = wl1271_ap_init_hwenc(wl, wlvif);
4167 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4168 wl1271_debug(DEBUG_AP, "started AP");
4171 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4173 * AP might be in ROC in case we have just
4174 * sent auth reply. handle it.
4176 if (test_bit(wlvif->role_id, wl->roc_map))
4177 wl12xx_croc(wl, wlvif->role_id);
4179 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4183 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4184 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4186 wl1271_debug(DEBUG_AP, "stopped AP");
4191 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4195 /* Handle HT information change */
4196 if ((changed & BSS_CHANGED_HT) &&
4197 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4198 ret = wl1271_acx_set_ht_information(wl, wlvif,
4199 bss_conf->ht_operation_mode);
4201 wl1271_warning("Set ht information failed %d", ret);
4210 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4211 struct ieee80211_bss_conf *bss_conf,
4217 wl1271_debug(DEBUG_MAC80211,
4218 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4219 bss_conf->bssid, bss_conf->aid,
4220 bss_conf->beacon_int,
4221 bss_conf->basic_rates, sta_rate_set);
4223 wlvif->beacon_int = bss_conf->beacon_int;
4224 rates = bss_conf->basic_rates;
4225 wlvif->basic_rate_set =
4226 wl1271_tx_enabled_rates_get(wl, rates,
4229 wl1271_tx_min_rate_get(wl,
4230 wlvif->basic_rate_set);
4234 wl1271_tx_enabled_rates_get(wl,
4238 /* we only support sched_scan while not connected */
4239 if (wl->sched_vif == wlvif)
4240 wl->ops->sched_scan_stop(wl, wlvif);
4242 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4246 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4250 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4254 wlcore_set_ssid(wl, wlvif);
4256 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4261 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4265 /* revert back to minimum rates for the current band */
4266 wl1271_set_band_rate(wl, wlvif);
4267 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4269 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4273 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4274 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4275 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4280 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4283 /* STA/IBSS mode changes */
4284 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4285 struct ieee80211_vif *vif,
4286 struct ieee80211_bss_conf *bss_conf,
4289 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4290 bool do_join = false;
4291 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4292 bool ibss_joined = false;
4293 u32 sta_rate_set = 0;
4295 struct ieee80211_sta *sta;
4296 bool sta_exists = false;
4297 struct ieee80211_sta_ht_cap sta_ht_cap;
4300 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4306 if (changed & BSS_CHANGED_IBSS) {
4307 if (bss_conf->ibss_joined) {
4308 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4311 wlcore_unset_assoc(wl, wlvif);
4312 wl12xx_cmd_role_stop_sta(wl, wlvif);
4316 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4319 /* Need to update the SSID (for filtering etc) */
4320 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4323 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4324 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4325 bss_conf->enable_beacon ? "enabled" : "disabled");
4330 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4331 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4333 if (changed & BSS_CHANGED_CQM) {
4334 bool enable = false;
4335 if (bss_conf->cqm_rssi_thold)
4337 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4338 bss_conf->cqm_rssi_thold,
4339 bss_conf->cqm_rssi_hyst);
4342 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4345 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4346 BSS_CHANGED_ASSOC)) {
4348 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4350 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4352 /* save the supp_rates of the ap */
4353 sta_rate_set = sta->supp_rates[wlvif->band];
4354 if (sta->ht_cap.ht_supported)
4356 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4357 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4358 sta_ht_cap = sta->ht_cap;
4365 if (changed & BSS_CHANGED_BSSID) {
4366 if (!is_zero_ether_addr(bss_conf->bssid)) {
4367 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4372 /* Need to update the BSSID (for filtering etc) */
4375 ret = wlcore_clear_bssid(wl, wlvif);
4381 if (changed & BSS_CHANGED_IBSS) {
4382 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4383 bss_conf->ibss_joined);
4385 if (bss_conf->ibss_joined) {
4386 u32 rates = bss_conf->basic_rates;
4387 wlvif->basic_rate_set =
4388 wl1271_tx_enabled_rates_get(wl, rates,
4391 wl1271_tx_min_rate_get(wl,
4392 wlvif->basic_rate_set);
4394 /* by default, use 11b + OFDM rates */
4395 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4396 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4402 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4403 /* enable beacon filtering */
4404 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4409 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4414 ret = wlcore_join(wl, wlvif);
4416 wl1271_warning("cmd join failed %d", ret);
4421 if (changed & BSS_CHANGED_ASSOC) {
4422 if (bss_conf->assoc) {
4423 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4428 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4429 wl12xx_set_authorized(wl, wlvif);
4431 wlcore_unset_assoc(wl, wlvif);
4435 if (changed & BSS_CHANGED_PS) {
4436 if ((bss_conf->ps) &&
4437 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4438 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4442 if (wl->conf.conn.forced_ps) {
4443 ps_mode = STATION_POWER_SAVE_MODE;
4444 ps_mode_str = "forced";
4446 ps_mode = STATION_AUTO_PS_MODE;
4447 ps_mode_str = "auto";
4450 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4452 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4454 wl1271_warning("enter %s ps failed %d",
4456 } else if (!bss_conf->ps &&
4457 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4458 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4460 ret = wl1271_ps_set_mode(wl, wlvif,
4461 STATION_ACTIVE_MODE);
4463 wl1271_warning("exit auto ps failed %d", ret);
4467 /* Handle new association with HT. Do this after join. */
4470 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4472 ret = wlcore_hw_set_peer_cap(wl,
4478 wl1271_warning("Set ht cap failed %d", ret);
4484 ret = wl1271_acx_set_ht_information(wl, wlvif,
4485 bss_conf->ht_operation_mode);
4487 wl1271_warning("Set ht information failed %d",
4494 /* Handle arp filtering. Done after join. */
4495 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4496 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4497 __be32 addr = bss_conf->arp_addr_list[0];
4498 wlvif->sta.qos = bss_conf->qos;
4499 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4501 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4502 wlvif->ip_addr = addr;
4504 * The template should have been configured only upon
4505 * association. however, it seems that the correct ip
4506 * isn't being set (when sending), so we have to
4507 * reconfigure the template upon every ip change.
4509 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4511 wl1271_warning("build arp rsp failed: %d", ret);
4515 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4516 (ACX_ARP_FILTER_ARP_FILTERING |
4517 ACX_ARP_FILTER_AUTO_ARP),
4521 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4532 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4533 struct ieee80211_vif *vif,
4534 struct ieee80211_bss_conf *bss_conf,
4537 struct wl1271 *wl = hw->priv;
4538 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4539 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4542 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4543 wlvif->role_id, (int)changed);
4546 * make sure to cancel pending disconnections if our association
4549 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4550 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4552 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4553 !bss_conf->enable_beacon)
4554 wl1271_tx_flush(wl);
4556 mutex_lock(&wl->mutex);
4558 if (unlikely(wl->state != WLCORE_STATE_ON))
4561 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4564 ret = wl1271_ps_elp_wakeup(wl);
4568 if ((changed & BSS_CHANGED_TXPOWER) &&
4569 bss_conf->txpower != wlvif->power_level) {
4571 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4575 wlvif->power_level = bss_conf->txpower;
4579 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4581 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4583 wl1271_ps_elp_sleep(wl);
4586 mutex_unlock(&wl->mutex);
4589 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4590 struct ieee80211_chanctx_conf *ctx)
4592 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4593 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4594 cfg80211_get_chandef_type(&ctx->def));
4598 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4599 struct ieee80211_chanctx_conf *ctx)
4601 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4602 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4603 cfg80211_get_chandef_type(&ctx->def));
4606 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4607 struct ieee80211_chanctx_conf *ctx,
4610 struct wl1271 *wl = hw->priv;
4611 struct wl12xx_vif *wlvif;
4613 int channel = ieee80211_frequency_to_channel(
4614 ctx->def.chan->center_freq);
4616 wl1271_debug(DEBUG_MAC80211,
4617 "mac80211 change chanctx %d (type %d) changed 0x%x",
4618 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4620 mutex_lock(&wl->mutex);
4622 ret = wl1271_ps_elp_wakeup(wl);
4626 wl12xx_for_each_wlvif(wl, wlvif) {
4627 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4630 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4636 /* start radar if needed */
4637 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4638 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4639 ctx->radar_enabled && !wlvif->radar_enabled &&
4640 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4641 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4642 wlcore_hw_set_cac(wl, wlvif, true);
4643 wlvif->radar_enabled = true;
4647 wl1271_ps_elp_sleep(wl);
4649 mutex_unlock(&wl->mutex);
4652 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4653 struct ieee80211_vif *vif,
4654 struct ieee80211_chanctx_conf *ctx)
4656 struct wl1271 *wl = hw->priv;
4657 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4658 int channel = ieee80211_frequency_to_channel(
4659 ctx->def.chan->center_freq);
4662 wl1271_debug(DEBUG_MAC80211,
4663 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4664 wlvif->role_id, channel,
4665 cfg80211_get_chandef_type(&ctx->def),
4666 ctx->radar_enabled, ctx->def.chan->dfs_state);
4668 mutex_lock(&wl->mutex);
4670 if (unlikely(wl->state != WLCORE_STATE_ON))
4673 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4676 ret = wl1271_ps_elp_wakeup(wl);
4680 wlvif->band = ctx->def.chan->band;
4681 wlvif->channel = channel;
4682 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4684 /* update default rates according to the band */
4685 wl1271_set_band_rate(wl, wlvif);
4687 if (ctx->radar_enabled &&
4688 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4689 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4690 wlcore_hw_set_cac(wl, wlvif, true);
4691 wlvif->radar_enabled = true;
4694 wl1271_ps_elp_sleep(wl);
4696 mutex_unlock(&wl->mutex);
4701 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4702 struct ieee80211_vif *vif,
4703 struct ieee80211_chanctx_conf *ctx)
4705 struct wl1271 *wl = hw->priv;
4706 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4709 wl1271_debug(DEBUG_MAC80211,
4710 "mac80211 unassign chanctx (role %d) %d (type %d)",
4712 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4713 cfg80211_get_chandef_type(&ctx->def));
4715 wl1271_tx_flush(wl);
4717 mutex_lock(&wl->mutex);
4719 if (unlikely(wl->state != WLCORE_STATE_ON))
4722 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4725 ret = wl1271_ps_elp_wakeup(wl);
4729 if (wlvif->radar_enabled) {
4730 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4731 wlcore_hw_set_cac(wl, wlvif, false);
4732 wlvif->radar_enabled = false;
4735 wl1271_ps_elp_sleep(wl);
4737 mutex_unlock(&wl->mutex);
4740 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4741 struct wl12xx_vif *wlvif,
4742 struct ieee80211_chanctx_conf *new_ctx)
4744 int channel = ieee80211_frequency_to_channel(
4745 new_ctx->def.chan->center_freq);
4747 wl1271_debug(DEBUG_MAC80211,
4748 "switch vif (role %d) %d -> %d chan_type: %d",
4749 wlvif->role_id, wlvif->channel, channel,
4750 cfg80211_get_chandef_type(&new_ctx->def));
4752 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4755 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4757 if (wlvif->radar_enabled) {
4758 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4759 wlcore_hw_set_cac(wl, wlvif, false);
4760 wlvif->radar_enabled = false;
4763 wlvif->band = new_ctx->def.chan->band;
4764 wlvif->channel = channel;
4765 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4767 /* start radar if needed */
4768 if (new_ctx->radar_enabled) {
4769 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4770 wlcore_hw_set_cac(wl, wlvif, true);
4771 wlvif->radar_enabled = true;
4778 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4779 struct ieee80211_vif_chanctx_switch *vifs,
4781 enum ieee80211_chanctx_switch_mode mode)
4783 struct wl1271 *wl = hw->priv;
4786 wl1271_debug(DEBUG_MAC80211,
4787 "mac80211 switch chanctx n_vifs %d mode %d",
4790 mutex_lock(&wl->mutex);
4792 ret = wl1271_ps_elp_wakeup(wl);
4796 for (i = 0; i < n_vifs; i++) {
4797 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4799 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4804 wl1271_ps_elp_sleep(wl);
4806 mutex_unlock(&wl->mutex);
4811 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4812 struct ieee80211_vif *vif, u16 queue,
4813 const struct ieee80211_tx_queue_params *params)
4815 struct wl1271 *wl = hw->priv;
4816 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4820 if (wlcore_is_p2p_mgmt(wlvif))
4823 mutex_lock(&wl->mutex);
4825 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4828 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4830 ps_scheme = CONF_PS_SCHEME_LEGACY;
4832 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4835 ret = wl1271_ps_elp_wakeup(wl);
4840 * the txop is confed in units of 32us by the mac80211,
4843 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4844 params->cw_min, params->cw_max,
4845 params->aifs, params->txop << 5);
4849 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4850 CONF_CHANNEL_TYPE_EDCF,
4851 wl1271_tx_get_queue(queue),
4852 ps_scheme, CONF_ACK_POLICY_LEGACY,
4856 wl1271_ps_elp_sleep(wl);
4859 mutex_unlock(&wl->mutex);
4864 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4865 struct ieee80211_vif *vif)
4868 struct wl1271 *wl = hw->priv;
4869 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4870 u64 mactime = ULLONG_MAX;
4873 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4875 mutex_lock(&wl->mutex);
4877 if (unlikely(wl->state != WLCORE_STATE_ON))
4880 ret = wl1271_ps_elp_wakeup(wl);
4884 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4889 wl1271_ps_elp_sleep(wl);
4892 mutex_unlock(&wl->mutex);
4896 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4897 struct survey_info *survey)
4899 struct ieee80211_conf *conf = &hw->conf;
4904 survey->channel = conf->chandef.chan;
4909 static int wl1271_allocate_sta(struct wl1271 *wl,
4910 struct wl12xx_vif *wlvif,
4911 struct ieee80211_sta *sta)
4913 struct wl1271_station *wl_sta;
4917 if (wl->active_sta_count >= wl->max_ap_stations) {
4918 wl1271_warning("could not allocate HLID - too much stations");
4922 wl_sta = (struct wl1271_station *)sta->drv_priv;
4923 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4925 wl1271_warning("could not allocate HLID - too many links");
4929 /* use the previous security seq, if this is a recovery/resume */
4930 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4932 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4933 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4934 wl->active_sta_count++;
4938 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4940 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4943 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4944 __clear_bit(hlid, &wl->ap_ps_map);
4945 __clear_bit(hlid, &wl->ap_fw_ps_map);
4948 * save the last used PN in the private part of iee80211_sta,
4949 * in case of recovery/suspend
4951 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4953 wl12xx_free_link(wl, wlvif, &hlid);
4954 wl->active_sta_count--;
4957 * rearm the tx watchdog when the last STA is freed - give the FW a
4958 * chance to return STA-buffered packets before complaining.
4960 if (wl->active_sta_count == 0)
4961 wl12xx_rearm_tx_watchdog_locked(wl);
4964 static int wl12xx_sta_add(struct wl1271 *wl,
4965 struct wl12xx_vif *wlvif,
4966 struct ieee80211_sta *sta)
4968 struct wl1271_station *wl_sta;
4972 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4974 ret = wl1271_allocate_sta(wl, wlvif, sta);
4978 wl_sta = (struct wl1271_station *)sta->drv_priv;
4979 hlid = wl_sta->hlid;
4981 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4983 wl1271_free_sta(wl, wlvif, hlid);
4988 static int wl12xx_sta_remove(struct wl1271 *wl,
4989 struct wl12xx_vif *wlvif,
4990 struct ieee80211_sta *sta)
4992 struct wl1271_station *wl_sta;
4995 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4997 wl_sta = (struct wl1271_station *)sta->drv_priv;
4999 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5002 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5006 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5010 static void wlcore_roc_if_possible(struct wl1271 *wl,
5011 struct wl12xx_vif *wlvif)
5013 if (find_first_bit(wl->roc_map,
5014 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5017 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5020 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5024 * when wl_sta is NULL, we treat this call as if coming from a
5025 * pending auth reply.
5026 * wl->mutex must be taken and the FW must be awake when the call
5029 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5030 struct wl1271_station *wl_sta, bool in_conn)
5033 if (WARN_ON(wl_sta && wl_sta->in_connection))
5036 if (!wlvif->ap_pending_auth_reply &&
5037 !wlvif->inconn_count)
5038 wlcore_roc_if_possible(wl, wlvif);
5041 wl_sta->in_connection = true;
5042 wlvif->inconn_count++;
5044 wlvif->ap_pending_auth_reply = true;
5047 if (wl_sta && !wl_sta->in_connection)
5050 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5053 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5057 wl_sta->in_connection = false;
5058 wlvif->inconn_count--;
5060 wlvif->ap_pending_auth_reply = false;
5063 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5064 test_bit(wlvif->role_id, wl->roc_map))
5065 wl12xx_croc(wl, wlvif->role_id);
5069 static int wl12xx_update_sta_state(struct wl1271 *wl,
5070 struct wl12xx_vif *wlvif,
5071 struct ieee80211_sta *sta,
5072 enum ieee80211_sta_state old_state,
5073 enum ieee80211_sta_state new_state)
5075 struct wl1271_station *wl_sta;
5076 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5077 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5080 wl_sta = (struct wl1271_station *)sta->drv_priv;
5082 /* Add station (AP mode) */
5084 old_state == IEEE80211_STA_NOTEXIST &&
5085 new_state == IEEE80211_STA_NONE) {
5086 ret = wl12xx_sta_add(wl, wlvif, sta);
5090 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5093 /* Remove station (AP mode) */
5095 old_state == IEEE80211_STA_NONE &&
5096 new_state == IEEE80211_STA_NOTEXIST) {
5098 wl12xx_sta_remove(wl, wlvif, sta);
5100 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5103 /* Authorize station (AP mode) */
5105 new_state == IEEE80211_STA_AUTHORIZED) {
5106 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5110 /* reconfigure rates */
5111 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5115 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5120 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5123 /* Authorize station */
5125 new_state == IEEE80211_STA_AUTHORIZED) {
5126 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5127 ret = wl12xx_set_authorized(wl, wlvif);
5133 old_state == IEEE80211_STA_AUTHORIZED &&
5134 new_state == IEEE80211_STA_ASSOC) {
5135 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5136 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5139 /* save seq number on disassoc (suspend) */
5141 old_state == IEEE80211_STA_ASSOC &&
5142 new_state == IEEE80211_STA_AUTH) {
5143 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5144 wlvif->total_freed_pkts = 0;
5147 /* restore seq number on assoc (resume) */
5149 old_state == IEEE80211_STA_AUTH &&
5150 new_state == IEEE80211_STA_ASSOC) {
5151 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5154 /* clear ROCs on failure or authorization */
5156 (new_state == IEEE80211_STA_AUTHORIZED ||
5157 new_state == IEEE80211_STA_NOTEXIST)) {
5158 if (test_bit(wlvif->role_id, wl->roc_map))
5159 wl12xx_croc(wl, wlvif->role_id);
5163 old_state == IEEE80211_STA_NOTEXIST &&
5164 new_state == IEEE80211_STA_NONE) {
5165 if (find_first_bit(wl->roc_map,
5166 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5167 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5168 wl12xx_roc(wl, wlvif, wlvif->role_id,
5169 wlvif->band, wlvif->channel);
5175 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5176 struct ieee80211_vif *vif,
5177 struct ieee80211_sta *sta,
5178 enum ieee80211_sta_state old_state,
5179 enum ieee80211_sta_state new_state)
5181 struct wl1271 *wl = hw->priv;
5182 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5185 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5186 sta->aid, old_state, new_state);
5188 mutex_lock(&wl->mutex);
5190 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5195 ret = wl1271_ps_elp_wakeup(wl);
5199 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5201 wl1271_ps_elp_sleep(wl);
5203 mutex_unlock(&wl->mutex);
5204 if (new_state < old_state)
5209 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5210 struct ieee80211_vif *vif,
5211 struct ieee80211_ampdu_params *params)
5213 struct wl1271 *wl = hw->priv;
5214 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5216 u8 hlid, *ba_bitmap;
5217 struct ieee80211_sta *sta = params->sta;
5218 enum ieee80211_ampdu_mlme_action action = params->action;
5219 u16 tid = params->tid;
5220 u16 *ssn = ¶ms->ssn;
5222 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5225 /* sanity check - the fields in FW are only 8bits wide */
5226 if (WARN_ON(tid > 0xFF))
5229 mutex_lock(&wl->mutex);
5231 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5236 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5237 hlid = wlvif->sta.hlid;
5238 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5239 struct wl1271_station *wl_sta;
5241 wl_sta = (struct wl1271_station *)sta->drv_priv;
5242 hlid = wl_sta->hlid;
5248 ba_bitmap = &wl->links[hlid].ba_bitmap;
5250 ret = wl1271_ps_elp_wakeup(wl);
5254 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5258 case IEEE80211_AMPDU_RX_START:
5259 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5264 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5266 wl1271_error("exceeded max RX BA sessions");
5270 if (*ba_bitmap & BIT(tid)) {
5272 wl1271_error("cannot enable RX BA session on active "
5277 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5282 *ba_bitmap |= BIT(tid);
5283 wl->ba_rx_session_count++;
5287 case IEEE80211_AMPDU_RX_STOP:
5288 if (!(*ba_bitmap & BIT(tid))) {
5290 * this happens on reconfig - so only output a debug
5291 * message for now, and don't fail the function.
5293 wl1271_debug(DEBUG_MAC80211,
5294 "no active RX BA session on tid: %d",
5300 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5303 *ba_bitmap &= ~BIT(tid);
5304 wl->ba_rx_session_count--;
5309 * The BA initiator session management in FW independently.
5310 * Falling break here on purpose for all TX APDU commands.
5312 case IEEE80211_AMPDU_TX_START:
5313 case IEEE80211_AMPDU_TX_STOP_CONT:
5314 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5315 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5316 case IEEE80211_AMPDU_TX_OPERATIONAL:
5321 wl1271_error("Incorrect ampdu action id=%x\n", action);
5325 wl1271_ps_elp_sleep(wl);
5328 mutex_unlock(&wl->mutex);
5333 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5334 struct ieee80211_vif *vif,
5335 const struct cfg80211_bitrate_mask *mask)
5337 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5338 struct wl1271 *wl = hw->priv;
5341 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5342 mask->control[NL80211_BAND_2GHZ].legacy,
5343 mask->control[NL80211_BAND_5GHZ].legacy);
5345 mutex_lock(&wl->mutex);
5347 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5348 wlvif->bitrate_masks[i] =
5349 wl1271_tx_enabled_rates_get(wl,
5350 mask->control[i].legacy,
5353 if (unlikely(wl->state != WLCORE_STATE_ON))
5356 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5357 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5359 ret = wl1271_ps_elp_wakeup(wl);
5363 wl1271_set_band_rate(wl, wlvif);
5365 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5366 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5368 wl1271_ps_elp_sleep(wl);
5371 mutex_unlock(&wl->mutex);
5376 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5377 struct ieee80211_vif *vif,
5378 struct ieee80211_channel_switch *ch_switch)
5380 struct wl1271 *wl = hw->priv;
5381 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5384 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5386 wl1271_tx_flush(wl);
5388 mutex_lock(&wl->mutex);
5390 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5391 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5392 ieee80211_chswitch_done(vif, false);
5394 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5398 ret = wl1271_ps_elp_wakeup(wl);
5402 /* TODO: change mac80211 to pass vif as param */
5404 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5405 unsigned long delay_usec;
5407 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5411 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5413 /* indicate failure 5 seconds after channel switch time */
5414 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5416 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5417 usecs_to_jiffies(delay_usec) +
5418 msecs_to_jiffies(5000));
5422 wl1271_ps_elp_sleep(wl);
5425 mutex_unlock(&wl->mutex);
5428 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5429 struct wl12xx_vif *wlvif,
5432 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5433 struct sk_buff *beacon =
5434 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5439 return cfg80211_find_ie(eid,
5440 beacon->data + ieoffset,
5441 beacon->len - ieoffset);
5444 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5448 const struct ieee80211_channel_sw_ie *ie_csa;
5450 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5454 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5455 *csa_count = ie_csa->count;
5460 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5461 struct ieee80211_vif *vif,
5462 struct cfg80211_chan_def *chandef)
5464 struct wl1271 *wl = hw->priv;
5465 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5466 struct ieee80211_channel_switch ch_switch = {
5468 .chandef = *chandef,
5472 wl1271_debug(DEBUG_MAC80211,
5473 "mac80211 channel switch beacon (role %d)",
5476 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5478 wl1271_error("error getting beacon (for CSA counter)");
5482 mutex_lock(&wl->mutex);
5484 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5489 ret = wl1271_ps_elp_wakeup(wl);
5493 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5497 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5500 wl1271_ps_elp_sleep(wl);
5502 mutex_unlock(&wl->mutex);
5505 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5506 u32 queues, bool drop)
5508 struct wl1271 *wl = hw->priv;
5510 wl1271_tx_flush(wl);
5513 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5514 struct ieee80211_vif *vif,
5515 struct ieee80211_channel *chan,
5517 enum ieee80211_roc_type type)
5519 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5520 struct wl1271 *wl = hw->priv;
5521 int channel, active_roc, ret = 0;
5523 channel = ieee80211_frequency_to_channel(chan->center_freq);
5525 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5526 channel, wlvif->role_id);
5528 mutex_lock(&wl->mutex);
5530 if (unlikely(wl->state != WLCORE_STATE_ON))
5533 /* return EBUSY if we can't ROC right now */
5534 active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5535 if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5536 wl1271_warning("active roc on role %d", active_roc);
5541 ret = wl1271_ps_elp_wakeup(wl);
5545 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5550 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5551 msecs_to_jiffies(duration));
5553 wl1271_ps_elp_sleep(wl);
5555 mutex_unlock(&wl->mutex);
5559 static int __wlcore_roc_completed(struct wl1271 *wl)
5561 struct wl12xx_vif *wlvif;
5564 /* already completed */
5565 if (unlikely(!wl->roc_vif))
5568 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5570 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5573 ret = wl12xx_stop_dev(wl, wlvif);
5582 static int wlcore_roc_completed(struct wl1271 *wl)
5586 wl1271_debug(DEBUG_MAC80211, "roc complete");
5588 mutex_lock(&wl->mutex);
5590 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5595 ret = wl1271_ps_elp_wakeup(wl);
5599 ret = __wlcore_roc_completed(wl);
5601 wl1271_ps_elp_sleep(wl);
5603 mutex_unlock(&wl->mutex);
5608 static void wlcore_roc_complete_work(struct work_struct *work)
5610 struct delayed_work *dwork;
5614 dwork = to_delayed_work(work);
5615 wl = container_of(dwork, struct wl1271, roc_complete_work);
5617 ret = wlcore_roc_completed(wl);
5619 ieee80211_remain_on_channel_expired(wl->hw);
5622 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5624 struct wl1271 *wl = hw->priv;
5626 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5629 wl1271_tx_flush(wl);
5632 * we can't just flush_work here, because it might deadlock
5633 * (as we might get called from the same workqueue)
5635 cancel_delayed_work_sync(&wl->roc_complete_work);
5636 wlcore_roc_completed(wl);
5641 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5642 struct ieee80211_vif *vif,
5643 struct ieee80211_sta *sta,
5646 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5648 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5650 if (!(changed & IEEE80211_RC_BW_CHANGED))
5653 /* this callback is atomic, so schedule a new work */
5654 wlvif->rc_update_bw = sta->bandwidth;
5655 memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5656 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5659 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5660 struct ieee80211_vif *vif,
5661 struct ieee80211_sta *sta,
5662 struct station_info *sinfo)
5664 struct wl1271 *wl = hw->priv;
5665 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5669 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5671 mutex_lock(&wl->mutex);
5673 if (unlikely(wl->state != WLCORE_STATE_ON))
5676 ret = wl1271_ps_elp_wakeup(wl);
5680 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5684 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5685 sinfo->signal = rssi_dbm;
5688 wl1271_ps_elp_sleep(wl);
5691 mutex_unlock(&wl->mutex);
5694 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5695 struct ieee80211_sta *sta)
5697 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5698 struct wl1271 *wl = hw->priv;
5699 u8 hlid = wl_sta->hlid;
5701 /* return in units of Kbps */
5702 return (wl->links[hlid].fw_rate_mbps * 1000);
5705 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5707 struct wl1271 *wl = hw->priv;
5710 mutex_lock(&wl->mutex);
5712 if (unlikely(wl->state != WLCORE_STATE_ON))
5715 /* packets are considered pending if in the TX queue or the FW */
5716 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5718 mutex_unlock(&wl->mutex);
5723 /* can't be const, mac80211 writes to this */
5724 static struct ieee80211_rate wl1271_rates[] = {
5726 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5727 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5729 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5730 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5731 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5733 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5734 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5735 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5737 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5738 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5739 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5741 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5742 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5744 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5745 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5747 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5748 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5750 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5751 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5753 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5754 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5756 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5757 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5759 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5760 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5762 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5763 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5766 /* can't be const, mac80211 writes to this */
5767 static struct ieee80211_channel wl1271_channels[] = {
5768 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5769 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5770 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5771 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5772 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5773 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5774 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5775 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5776 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5777 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5778 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5779 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5780 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5781 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5784 /* can't be const, mac80211 writes to this */
5785 static struct ieee80211_supported_band wl1271_band_2ghz = {
5786 .channels = wl1271_channels,
5787 .n_channels = ARRAY_SIZE(wl1271_channels),
5788 .bitrates = wl1271_rates,
5789 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5792 /* 5 GHz data rates for WL1273 */
5793 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5795 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5796 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5798 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5799 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5801 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5802 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5804 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5805 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5807 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5808 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5810 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5811 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5813 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5814 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5816 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5817 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5820 /* 5 GHz band channels for WL1273 */
5821 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5822 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5823 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5824 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5825 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5826 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5827 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5828 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5829 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5830 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5831 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5832 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5833 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5834 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5835 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5836 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5837 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5838 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5839 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5840 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5841 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5842 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5843 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5844 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5845 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5846 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5847 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5848 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5849 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5850 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5851 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5852 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5855 static struct ieee80211_supported_band wl1271_band_5ghz = {
5856 .channels = wl1271_channels_5ghz,
5857 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5858 .bitrates = wl1271_rates_5ghz,
5859 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5862 static const struct ieee80211_ops wl1271_ops = {
5863 .start = wl1271_op_start,
5864 .stop = wlcore_op_stop,
5865 .add_interface = wl1271_op_add_interface,
5866 .remove_interface = wl1271_op_remove_interface,
5867 .change_interface = wl12xx_op_change_interface,
5869 .suspend = wl1271_op_suspend,
5870 .resume = wl1271_op_resume,
5872 .config = wl1271_op_config,
5873 .prepare_multicast = wl1271_op_prepare_multicast,
5874 .configure_filter = wl1271_op_configure_filter,
5876 .set_key = wlcore_op_set_key,
5877 .hw_scan = wl1271_op_hw_scan,
5878 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5879 .sched_scan_start = wl1271_op_sched_scan_start,
5880 .sched_scan_stop = wl1271_op_sched_scan_stop,
5881 .bss_info_changed = wl1271_op_bss_info_changed,
5882 .set_frag_threshold = wl1271_op_set_frag_threshold,
5883 .set_rts_threshold = wl1271_op_set_rts_threshold,
5884 .conf_tx = wl1271_op_conf_tx,
5885 .get_tsf = wl1271_op_get_tsf,
5886 .get_survey = wl1271_op_get_survey,
5887 .sta_state = wl12xx_op_sta_state,
5888 .ampdu_action = wl1271_op_ampdu_action,
5889 .tx_frames_pending = wl1271_tx_frames_pending,
5890 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5891 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5892 .channel_switch = wl12xx_op_channel_switch,
5893 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
5894 .flush = wlcore_op_flush,
5895 .remain_on_channel = wlcore_op_remain_on_channel,
5896 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5897 .add_chanctx = wlcore_op_add_chanctx,
5898 .remove_chanctx = wlcore_op_remove_chanctx,
5899 .change_chanctx = wlcore_op_change_chanctx,
5900 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5901 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5902 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5903 .sta_rc_update = wlcore_op_sta_rc_update,
5904 .sta_statistics = wlcore_op_sta_statistics,
5905 .get_expected_throughput = wlcore_op_get_expected_throughput,
5906 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5910 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
5916 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5917 wl1271_error("Illegal RX rate from HW: %d", rate);
5921 idx = wl->band_rate_to_idx[band][rate];
5922 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5923 wl1271_error("Unsupported RX rate from HW: %d", rate);
5930 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5934 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5937 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5938 wl1271_warning("NIC part of the MAC address wraps around!");
5940 for (i = 0; i < wl->num_mac_addr; i++) {
5941 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5942 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5943 wl->addresses[i].addr[2] = (u8) oui;
5944 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5945 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5946 wl->addresses[i].addr[5] = (u8) nic;
5950 /* we may be one address short at the most */
5951 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5954 * turn on the LAA bit in the first address and use it as
5957 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5958 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5959 memcpy(&wl->addresses[idx], &wl->addresses[0],
5960 sizeof(wl->addresses[0]));
5962 wl->addresses[idx].addr[0] |= BIT(1);
5965 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5966 wl->hw->wiphy->addresses = wl->addresses;
5969 static int wl12xx_get_hw_info(struct wl1271 *wl)
5973 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5977 wl->fuse_oui_addr = 0;
5978 wl->fuse_nic_addr = 0;
5980 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5984 if (wl->ops->get_mac)
5985 ret = wl->ops->get_mac(wl);
5991 static int wl1271_register_hw(struct wl1271 *wl)
5994 u32 oui_addr = 0, nic_addr = 0;
5996 if (wl->mac80211_registered)
5999 if (wl->nvs_len >= 12) {
6000 /* NOTE: The wl->nvs->nvs element must be first, in
6001 * order to simplify the casting, we assume it is at
6002 * the beginning of the wl->nvs structure.
6004 u8 *nvs_ptr = (u8 *)wl->nvs;
6007 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6009 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6012 /* if the MAC address is zeroed in the NVS derive from fuse */
6013 if (oui_addr == 0 && nic_addr == 0) {
6014 oui_addr = wl->fuse_oui_addr;
6015 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6016 nic_addr = wl->fuse_nic_addr + 1;
6019 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6021 ret = ieee80211_register_hw(wl->hw);
6023 wl1271_error("unable to register mac80211 hw: %d", ret);
6027 wl->mac80211_registered = true;
6029 wl1271_debugfs_init(wl);
6031 wl1271_notice("loaded");
6037 static void wl1271_unregister_hw(struct wl1271 *wl)
6040 wl1271_plt_stop(wl);
6042 ieee80211_unregister_hw(wl->hw);
6043 wl->mac80211_registered = false;
6047 static int wl1271_init_ieee80211(struct wl1271 *wl)
6050 static const u32 cipher_suites[] = {
6051 WLAN_CIPHER_SUITE_WEP40,
6052 WLAN_CIPHER_SUITE_WEP104,
6053 WLAN_CIPHER_SUITE_TKIP,
6054 WLAN_CIPHER_SUITE_CCMP,
6055 WL1271_CIPHER_SUITE_GEM,
6058 /* The tx descriptor buffer */
6059 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6061 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6062 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6065 /* FIXME: find a proper value */
6066 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6068 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6069 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6070 ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6071 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6072 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6073 ieee80211_hw_set(wl->hw, AP_LINK_PS);
6074 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6075 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6076 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6077 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6078 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6079 ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6080 ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6082 wl->hw->wiphy->cipher_suites = cipher_suites;
6083 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6085 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6086 BIT(NL80211_IFTYPE_AP) |
6087 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6088 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6089 #ifdef CONFIG_MAC80211_MESH
6090 BIT(NL80211_IFTYPE_MESH_POINT) |
6092 BIT(NL80211_IFTYPE_P2P_GO);
6094 wl->hw->wiphy->max_scan_ssids = 1;
6095 wl->hw->wiphy->max_sched_scan_ssids = 16;
6096 wl->hw->wiphy->max_match_sets = 16;
6098 * Maximum length of elements in scanning probe request templates
6099 * should be the maximum length possible for a template, without
6100 * the IEEE80211 header of the template
6102 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6103 sizeof(struct ieee80211_header);
6105 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6106 sizeof(struct ieee80211_header);
6108 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6110 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6111 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6112 WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
6113 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6115 /* make sure all our channels fit in the scanned_ch bitmask */
6116 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6117 ARRAY_SIZE(wl1271_channels_5ghz) >
6118 WL1271_MAX_CHANNELS);
6120 * clear channel flags from the previous usage
6121 * and restore max_power & max_antenna_gain values.
6123 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6124 wl1271_band_2ghz.channels[i].flags = 0;
6125 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6126 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6129 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6130 wl1271_band_5ghz.channels[i].flags = 0;
6131 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6132 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6136 * We keep local copies of the band structs because we need to
6137 * modify them on a per-device basis.
6139 memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6140 sizeof(wl1271_band_2ghz));
6141 memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6142 &wl->ht_cap[NL80211_BAND_2GHZ],
6143 sizeof(*wl->ht_cap));
6144 memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6145 sizeof(wl1271_band_5ghz));
6146 memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6147 &wl->ht_cap[NL80211_BAND_5GHZ],
6148 sizeof(*wl->ht_cap));
6150 wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6151 &wl->bands[NL80211_BAND_2GHZ];
6152 wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6153 &wl->bands[NL80211_BAND_5GHZ];
6156 * allow 4 queues per mac address we support +
6157 * 1 cab queue per mac + one global offchannel Tx queue
6159 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6161 /* the last queue is the offchannel queue */
6162 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6163 wl->hw->max_rates = 1;
6165 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6167 /* the FW answers probe-requests in AP-mode */
6168 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6169 wl->hw->wiphy->probe_resp_offload =
6170 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6171 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6172 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6174 /* allowed interface combinations */
6175 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6176 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6178 /* register vendor commands */
6179 wlcore_set_vendor_commands(wl->hw->wiphy);
6181 SET_IEEE80211_DEV(wl->hw, wl->dev);
6183 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6184 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6186 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6191 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6194 struct ieee80211_hw *hw;
6199 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6201 wl1271_error("could not alloc ieee80211_hw");
6207 memset(wl, 0, sizeof(*wl));
6209 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6211 wl1271_error("could not alloc wl priv");
6213 goto err_priv_alloc;
6216 INIT_LIST_HEAD(&wl->wlvif_list);
6221 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6222 * we don't allocate any additional resource here, so that's fine.
6224 for (i = 0; i < NUM_TX_QUEUES; i++)
6225 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6226 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6228 skb_queue_head_init(&wl->deferred_rx_queue);
6229 skb_queue_head_init(&wl->deferred_tx_queue);
6231 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6232 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6233 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6234 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6235 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6236 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6237 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6239 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6240 if (!wl->freezable_wq) {
6247 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6248 wl->band = NL80211_BAND_2GHZ;
6249 wl->channel_type = NL80211_CHAN_NO_HT;
6251 wl->sg_enabled = true;
6252 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6253 wl->recovery_count = 0;
6256 wl->ap_fw_ps_map = 0;
6258 wl->system_hlid = WL12XX_SYSTEM_HLID;
6259 wl->active_sta_count = 0;
6260 wl->active_link_count = 0;
6263 /* The system link is always allocated */
6264 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6266 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6267 for (i = 0; i < wl->num_tx_desc; i++)
6268 wl->tx_frames[i] = NULL;
6270 spin_lock_init(&wl->wl_lock);
6272 wl->state = WLCORE_STATE_OFF;
6273 wl->fw_type = WL12XX_FW_TYPE_NONE;
6274 mutex_init(&wl->mutex);
6275 mutex_init(&wl->flush_mutex);
6276 init_completion(&wl->nvs_loading_complete);
6278 order = get_order(aggr_buf_size);
6279 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6280 if (!wl->aggr_buf) {
6284 wl->aggr_buf_size = aggr_buf_size;
6286 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6287 if (!wl->dummy_packet) {
6292 /* Allocate one page for the FW log */
6293 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6296 goto err_dummy_packet;
6299 wl->mbox_size = mbox_size;
6300 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6306 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6307 if (!wl->buffer_32) {
6318 free_page((unsigned long)wl->fwlog);
6321 dev_kfree_skb(wl->dummy_packet);
6324 free_pages((unsigned long)wl->aggr_buf, order);
6327 destroy_workqueue(wl->freezable_wq);
6330 wl1271_debugfs_exit(wl);
6334 ieee80211_free_hw(hw);
6338 return ERR_PTR(ret);
6340 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6342 int wlcore_free_hw(struct wl1271 *wl)
6344 /* Unblock any fwlog readers */
6345 mutex_lock(&wl->mutex);
6346 wl->fwlog_size = -1;
6347 mutex_unlock(&wl->mutex);
6349 wlcore_sysfs_free(wl);
6351 kfree(wl->buffer_32);
6353 free_page((unsigned long)wl->fwlog);
6354 dev_kfree_skb(wl->dummy_packet);
6355 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6357 wl1271_debugfs_exit(wl);
6361 wl->fw_type = WL12XX_FW_TYPE_NONE;
6365 kfree(wl->raw_fw_status);
6366 kfree(wl->fw_status);
6367 kfree(wl->tx_res_if);
6368 destroy_workqueue(wl->freezable_wq);
6371 ieee80211_free_hw(wl->hw);
6375 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6378 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6379 .flags = WIPHY_WOWLAN_ANY,
6380 .n_patterns = WL1271_MAX_RX_FILTERS,
6381 .pattern_min_len = 1,
6382 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6386 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6388 return IRQ_WAKE_THREAD;
6391 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6393 struct wl1271 *wl = context;
6394 struct platform_device *pdev = wl->pdev;
6395 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6396 struct resource *res;
6399 irq_handler_t hardirq_fn = NULL;
6402 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6404 wl1271_error("Could not allocate nvs data");
6407 wl->nvs_len = fw->size;
6408 } else if (pdev_data->family->nvs_name) {
6409 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6410 pdev_data->family->nvs_name);
6418 ret = wl->ops->setup(wl);
6422 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6424 /* adjust some runtime configuration parameters */
6425 wlcore_adjust_conf(wl);
6427 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6429 wl1271_error("Could not get IRQ resource");
6433 wl->irq = res->start;
6434 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6435 wl->if_ops = pdev_data->if_ops;
6437 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6438 hardirq_fn = wlcore_hardirq;
6440 wl->irq_flags |= IRQF_ONESHOT;
6442 ret = wl12xx_set_power_on(wl);
6446 ret = wl12xx_get_hw_info(wl);
6448 wl1271_error("couldn't get hw info");
6449 wl1271_power_off(wl);
6453 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6454 wl->irq_flags, pdev->name, wl);
6456 wl1271_error("interrupt configuration failed");
6457 wl1271_power_off(wl);
6462 ret = enable_irq_wake(wl->irq);
6464 wl->irq_wake_enabled = true;
6465 device_init_wakeup(wl->dev, 1);
6466 if (pdev_data->pwr_in_suspend)
6467 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6470 disable_irq(wl->irq);
6471 wl1271_power_off(wl);
6473 ret = wl->ops->identify_chip(wl);
6477 ret = wl1271_init_ieee80211(wl);
6481 ret = wl1271_register_hw(wl);
6485 ret = wlcore_sysfs_init(wl);
6489 wl->initialized = true;
6493 wl1271_unregister_hw(wl);
6496 free_irq(wl->irq, wl);
6502 release_firmware(fw);
6503 complete_all(&wl->nvs_loading_complete);
6506 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6508 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6509 const char *nvs_name;
6512 if (!wl->ops || !wl->ptable || !pdev_data)
6515 wl->dev = &pdev->dev;
6517 platform_set_drvdata(pdev, wl);
6519 if (pdev_data->family && pdev_data->family->nvs_name) {
6520 nvs_name = pdev_data->family->nvs_name;
6521 ret = reject_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6522 nvs_name, &pdev->dev, GFP_KERNEL,
6525 wl1271_error("request_firmware_nowait failed for %s: %d",
6527 complete_all(&wl->nvs_loading_complete);
6530 wlcore_nvs_cb(NULL, wl);
6535 EXPORT_SYMBOL_GPL(wlcore_probe);
6537 int wlcore_remove(struct platform_device *pdev)
6539 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6540 struct wl1271 *wl = platform_get_drvdata(pdev);
6542 if (pdev_data->family && pdev_data->family->nvs_name)
6543 wait_for_completion(&wl->nvs_loading_complete);
6544 if (!wl->initialized)
6547 if (wl->irq_wake_enabled) {
6548 device_init_wakeup(wl->dev, 0);
6549 disable_irq_wake(wl->irq);
6551 wl1271_unregister_hw(wl);
6552 free_irq(wl->irq, wl);
6557 EXPORT_SYMBOL_GPL(wlcore_remove);
6559 u32 wl12xx_debug_level = DEBUG_NONE;
6560 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6561 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6562 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6564 module_param_named(fwlog, fwlog_param, charp, 0);
6565 MODULE_PARM_DESC(fwlog,
6566 "FW logger options: continuous, dbgpins or disable");
6568 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6569 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6571 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6572 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6574 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6575 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6577 MODULE_LICENSE("GPL");
6578 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6579 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");