2 * This file is part of wlcore
4 * Copyright (C) 2008-2010 Nokia Corporation
5 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 #include <linux/module.h>
24 #include <linux/firmware.h>
25 #include <linux/etherdevice.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
32 #include "wl12xx_80211.h"
39 #include "vendor_cmd.h"
44 #define WL1271_BOOT_RETRIES 3
46 static char *fwlog_param;
47 static int fwlog_mem_blocks = -1;
48 static int bug_on_recovery = -1;
49 static int no_recovery = -1;
51 static void __wl1271_op_remove_interface(struct wl1271 *wl,
52 struct ieee80211_vif *vif,
53 bool reset_tx_queues);
54 static void wlcore_op_stop_locked(struct wl1271 *wl);
55 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
57 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
61 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
64 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
67 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
70 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
74 wl1271_info("Association completed.");
78 static void wl1271_reg_notify(struct wiphy *wiphy,
79 struct regulatory_request *request)
81 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
82 struct wl1271 *wl = hw->priv;
84 /* copy the current dfs region */
86 wl->dfs_region = request->dfs_region;
88 wlcore_regdomain_config(wl);
91 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
96 /* we should hold wl->mutex */
97 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
102 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
104 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
110 * this function is being called when the rx_streaming interval
111 * has beed changed or rx_streaming should be disabled
113 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
116 int period = wl->conf.rx_streaming.interval;
118 /* don't reconfigure if rx_streaming is disabled */
119 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
122 /* reconfigure/disable according to new streaming_period */
124 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
125 (wl->conf.rx_streaming.always ||
126 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
127 ret = wl1271_set_rx_streaming(wl, wlvif, true);
129 ret = wl1271_set_rx_streaming(wl, wlvif, false);
130 /* don't cancel_work_sync since we might deadlock */
131 del_timer_sync(&wlvif->rx_streaming_timer);
137 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
140 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
141 rx_streaming_enable_work);
142 struct wl1271 *wl = wlvif->wl;
144 mutex_lock(&wl->mutex);
146 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
147 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
148 (!wl->conf.rx_streaming.always &&
149 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
152 if (!wl->conf.rx_streaming.interval)
155 ret = wl1271_ps_elp_wakeup(wl);
159 ret = wl1271_set_rx_streaming(wl, wlvif, true);
163 /* stop it after some time of inactivity */
164 mod_timer(&wlvif->rx_streaming_timer,
165 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
168 wl1271_ps_elp_sleep(wl);
170 mutex_unlock(&wl->mutex);
173 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
176 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
177 rx_streaming_disable_work);
178 struct wl1271 *wl = wlvif->wl;
180 mutex_lock(&wl->mutex);
182 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
185 ret = wl1271_ps_elp_wakeup(wl);
189 ret = wl1271_set_rx_streaming(wl, wlvif, false);
194 wl1271_ps_elp_sleep(wl);
196 mutex_unlock(&wl->mutex);
199 static void wl1271_rx_streaming_timer(unsigned long data)
201 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
202 struct wl1271 *wl = wlvif->wl;
203 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
206 /* wl->mutex must be taken */
207 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
209 /* if the watchdog is not armed, don't do anything */
210 if (wl->tx_allocated_blocks == 0)
213 cancel_delayed_work(&wl->tx_watchdog_work);
214 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
215 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
218 static void wlcore_rc_update_work(struct work_struct *work)
221 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
223 struct wl1271 *wl = wlvif->wl;
224 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
226 mutex_lock(&wl->mutex);
228 if (unlikely(wl->state != WLCORE_STATE_ON))
231 ret = wl1271_ps_elp_wakeup(wl);
235 if (ieee80211_vif_is_mesh(vif)) {
236 ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
237 true, wlvif->sta.hlid);
241 wlcore_hw_sta_rc_update(wl, wlvif);
245 wl1271_ps_elp_sleep(wl);
247 mutex_unlock(&wl->mutex);
250 static void wl12xx_tx_watchdog_work(struct work_struct *work)
252 struct delayed_work *dwork;
255 dwork = to_delayed_work(work);
256 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
258 mutex_lock(&wl->mutex);
260 if (unlikely(wl->state != WLCORE_STATE_ON))
263 /* Tx went out in the meantime - everything is ok */
264 if (unlikely(wl->tx_allocated_blocks == 0))
268 * if a ROC is in progress, we might not have any Tx for a long
269 * time (e.g. pending Tx on the non-ROC channels)
271 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
272 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
273 wl->conf.tx.tx_watchdog_timeout);
274 wl12xx_rearm_tx_watchdog_locked(wl);
279 * if a scan is in progress, we might not have any Tx for a long
282 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
283 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
284 wl->conf.tx.tx_watchdog_timeout);
285 wl12xx_rearm_tx_watchdog_locked(wl);
290 * AP might cache a frame for a long time for a sleeping station,
291 * so rearm the timer if there's an AP interface with stations. If
292 * Tx is genuinely stuck we will most hopefully discover it when all
293 * stations are removed due to inactivity.
295 if (wl->active_sta_count) {
296 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
298 wl->conf.tx.tx_watchdog_timeout,
299 wl->active_sta_count);
300 wl12xx_rearm_tx_watchdog_locked(wl);
304 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
305 wl->conf.tx.tx_watchdog_timeout);
306 wl12xx_queue_recovery_work(wl);
309 mutex_unlock(&wl->mutex);
312 static void wlcore_adjust_conf(struct wl1271 *wl)
316 if (!strcmp(fwlog_param, "continuous")) {
317 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
318 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
319 } else if (!strcmp(fwlog_param, "dbgpins")) {
320 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
321 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
322 } else if (!strcmp(fwlog_param, "disable")) {
323 wl->conf.fwlog.mem_blocks = 0;
324 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
326 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
330 if (bug_on_recovery != -1)
331 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
333 if (no_recovery != -1)
334 wl->conf.recovery.no_recovery = (u8) no_recovery;
337 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
338 struct wl12xx_vif *wlvif,
343 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
346 * Wake up from high level PS if the STA is asleep with too little
347 * packets in FW or if the STA is awake.
349 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
350 wl12xx_ps_link_end(wl, wlvif, hlid);
353 * Start high-level PS if the STA is asleep with enough blocks in FW.
354 * Make an exception if this is the only connected link. In this
355 * case FW-memory congestion is less of a problem.
356 * Note that a single connected STA means 2*ap_count + 1 active links,
357 * since we must account for the global and broadcast AP links
358 * for each AP. The "fw_ps" check assures us the other link is a STA
359 * connected to the AP. Otherwise the FW would not set the PSM bit.
361 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
362 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
363 wl12xx_ps_link_start(wl, wlvif, hlid, true);
366 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
367 struct wl12xx_vif *wlvif,
368 struct wl_fw_status *status)
370 unsigned long cur_fw_ps_map;
373 cur_fw_ps_map = status->link_ps_bitmap;
374 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
375 wl1271_debug(DEBUG_PSM,
376 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
377 wl->ap_fw_ps_map, cur_fw_ps_map,
378 wl->ap_fw_ps_map ^ cur_fw_ps_map);
380 wl->ap_fw_ps_map = cur_fw_ps_map;
383 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
384 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
385 wl->links[hlid].allocated_pkts);
388 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
390 struct wl12xx_vif *wlvif;
392 u32 old_tx_blk_count = wl->tx_blocks_available;
393 int avail, freed_blocks;
396 struct wl1271_link *lnk;
398 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
400 wl->fw_status_len, false);
404 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
406 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
407 "drv_rx_counter = %d, tx_results_counter = %d)",
409 status->fw_rx_counter,
410 status->drv_rx_counter,
411 status->tx_results_counter);
413 for (i = 0; i < NUM_TX_QUEUES; i++) {
414 /* prevent wrap-around in freed-packets counter */
415 wl->tx_allocated_pkts[i] -=
416 (status->counters.tx_released_pkts[i] -
417 wl->tx_pkts_freed[i]) & 0xff;
419 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
423 for_each_set_bit(i, wl->links_map, wl->num_links) {
427 /* prevent wrap-around in freed-packets counter */
428 diff = (status->counters.tx_lnk_free_pkts[i] -
429 lnk->prev_freed_pkts) & 0xff;
434 lnk->allocated_pkts -= diff;
435 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
437 /* accumulate the prev_freed_pkts counter */
438 lnk->total_freed_pkts += diff;
441 /* prevent wrap-around in total blocks counter */
442 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
443 freed_blocks = status->total_released_blks -
446 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
447 status->total_released_blks;
449 wl->tx_blocks_freed = status->total_released_blks;
451 wl->tx_allocated_blocks -= freed_blocks;
454 * If the FW freed some blocks:
455 * If we still have allocated blocks - re-arm the timer, Tx is
456 * not stuck. Otherwise, cancel the timer (no Tx currently).
459 if (wl->tx_allocated_blocks)
460 wl12xx_rearm_tx_watchdog_locked(wl);
462 cancel_delayed_work(&wl->tx_watchdog_work);
465 avail = status->tx_total - wl->tx_allocated_blocks;
468 * The FW might change the total number of TX memblocks before
469 * we get a notification about blocks being released. Thus, the
470 * available blocks calculation might yield a temporary result
471 * which is lower than the actual available blocks. Keeping in
472 * mind that only blocks that were allocated can be moved from
473 * TX to RX, tx_blocks_available should never decrease here.
475 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
478 /* if more blocks are available now, tx work can be scheduled */
479 if (wl->tx_blocks_available > old_tx_blk_count)
480 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
482 /* for AP update num of allocated TX blocks per link and ps status */
483 wl12xx_for_each_wlvif_ap(wl, wlvif) {
484 wl12xx_irq_update_links_status(wl, wlvif, status);
487 /* update the host-chipset time offset */
489 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
490 (s64)(status->fw_localtime);
492 wl->fw_fast_lnk_map = status->link_fast_bitmap;
497 static void wl1271_flush_deferred_work(struct wl1271 *wl)
501 /* Pass all received frames to the network stack */
502 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
503 ieee80211_rx_ni(wl->hw, skb);
505 /* Return sent skbs to the network stack */
506 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
507 ieee80211_tx_status_ni(wl->hw, skb);
510 static void wl1271_netstack_work(struct work_struct *work)
513 container_of(work, struct wl1271, netstack_work);
516 wl1271_flush_deferred_work(wl);
517 } while (skb_queue_len(&wl->deferred_rx_queue));
520 #define WL1271_IRQ_MAX_LOOPS 256
522 static int wlcore_irq_locked(struct wl1271 *wl)
526 int loopcount = WL1271_IRQ_MAX_LOOPS;
528 unsigned int defer_count;
532 * In case edge triggered interrupt must be used, we cannot iterate
533 * more than once without introducing race conditions with the hardirq.
535 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
538 wl1271_debug(DEBUG_IRQ, "IRQ work");
540 if (unlikely(wl->state != WLCORE_STATE_ON))
543 ret = wl1271_ps_elp_wakeup(wl);
547 while (!done && loopcount--) {
549 * In order to avoid a race with the hardirq, clear the flag
550 * before acknowledging the chip. Since the mutex is held,
551 * wl1271_ps_elp_wakeup cannot be called concurrently.
553 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
554 smp_mb__after_atomic();
556 ret = wlcore_fw_status(wl, wl->fw_status);
560 wlcore_hw_tx_immediate_compl(wl);
562 intr = wl->fw_status->intr;
563 intr &= WLCORE_ALL_INTR_MASK;
569 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
570 wl1271_error("HW watchdog interrupt received! starting recovery.");
571 wl->watchdog_recovery = true;
574 /* restarting the chip. ignore any other interrupt. */
578 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
579 wl1271_error("SW watchdog interrupt received! "
580 "starting recovery.");
581 wl->watchdog_recovery = true;
584 /* restarting the chip. ignore any other interrupt. */
588 if (likely(intr & WL1271_ACX_INTR_DATA)) {
589 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
591 ret = wlcore_rx(wl, wl->fw_status);
595 /* Check if any tx blocks were freed */
596 spin_lock_irqsave(&wl->wl_lock, flags);
597 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
598 wl1271_tx_total_queue_count(wl) > 0) {
599 spin_unlock_irqrestore(&wl->wl_lock, flags);
601 * In order to avoid starvation of the TX path,
602 * call the work function directly.
604 ret = wlcore_tx_work_locked(wl);
608 spin_unlock_irqrestore(&wl->wl_lock, flags);
611 /* check for tx results */
612 ret = wlcore_hw_tx_delayed_compl(wl);
616 /* Make sure the deferred queues don't get too long */
617 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
618 skb_queue_len(&wl->deferred_rx_queue);
619 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
620 wl1271_flush_deferred_work(wl);
623 if (intr & WL1271_ACX_INTR_EVENT_A) {
624 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
625 ret = wl1271_event_handle(wl, 0);
630 if (intr & WL1271_ACX_INTR_EVENT_B) {
631 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
632 ret = wl1271_event_handle(wl, 1);
637 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
638 wl1271_debug(DEBUG_IRQ,
639 "WL1271_ACX_INTR_INIT_COMPLETE");
641 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
642 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
645 wl1271_ps_elp_sleep(wl);
651 static irqreturn_t wlcore_irq(int irq, void *cookie)
655 struct wl1271 *wl = cookie;
657 /* complete the ELP completion */
658 spin_lock_irqsave(&wl->wl_lock, flags);
659 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
661 complete(wl->elp_compl);
662 wl->elp_compl = NULL;
665 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
666 /* don't enqueue a work right now. mark it as pending */
667 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
668 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
669 disable_irq_nosync(wl->irq);
670 pm_wakeup_event(wl->dev, 0);
671 spin_unlock_irqrestore(&wl->wl_lock, flags);
674 spin_unlock_irqrestore(&wl->wl_lock, flags);
676 /* TX might be handled here, avoid redundant work */
677 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
678 cancel_work_sync(&wl->tx_work);
680 mutex_lock(&wl->mutex);
682 ret = wlcore_irq_locked(wl);
684 wl12xx_queue_recovery_work(wl);
686 spin_lock_irqsave(&wl->wl_lock, flags);
687 /* In case TX was not handled here, queue TX work */
688 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
689 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
690 wl1271_tx_total_queue_count(wl) > 0)
691 ieee80211_queue_work(wl->hw, &wl->tx_work);
692 spin_unlock_irqrestore(&wl->wl_lock, flags);
694 mutex_unlock(&wl->mutex);
699 struct vif_counter_data {
702 struct ieee80211_vif *cur_vif;
703 bool cur_vif_running;
706 static void wl12xx_vif_count_iter(void *data, u8 *mac,
707 struct ieee80211_vif *vif)
709 struct vif_counter_data *counter = data;
712 if (counter->cur_vif == vif)
713 counter->cur_vif_running = true;
716 /* caller must not hold wl->mutex, as it might deadlock */
717 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
718 struct ieee80211_vif *cur_vif,
719 struct vif_counter_data *data)
721 memset(data, 0, sizeof(*data));
722 data->cur_vif = cur_vif;
724 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
725 wl12xx_vif_count_iter, data);
728 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
730 const struct firmware *fw;
732 enum wl12xx_fw_type fw_type;
736 fw_type = WL12XX_FW_TYPE_PLT;
737 fw_name = wl->plt_fw_name;
740 * we can't call wl12xx_get_vif_count() here because
741 * wl->mutex is taken, so use the cached last_vif_count value
743 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
744 fw_type = WL12XX_FW_TYPE_MULTI;
745 fw_name = wl->mr_fw_name;
747 fw_type = WL12XX_FW_TYPE_NORMAL;
748 fw_name = wl->sr_fw_name;
752 if (wl->fw_type == fw_type)
755 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
757 ret = reject_firmware(&fw, fw_name, wl->dev);
760 wl1271_error("could not get firmware %s: %d", fw_name, ret);
765 wl1271_error("firmware size is not multiple of 32 bits: %zu",
772 wl->fw_type = WL12XX_FW_TYPE_NONE;
773 wl->fw_len = fw->size;
774 wl->fw = vmalloc(wl->fw_len);
777 wl1271_error("could not allocate memory for the firmware");
782 memcpy(wl->fw, fw->data, wl->fw_len);
784 wl->fw_type = fw_type;
786 release_firmware(fw);
791 void wl12xx_queue_recovery_work(struct wl1271 *wl)
793 /* Avoid a recursive recovery */
794 if (wl->state == WLCORE_STATE_ON) {
795 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
798 wl->state = WLCORE_STATE_RESTARTING;
799 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
800 wl1271_ps_elp_wakeup(wl);
801 wlcore_disable_interrupts_nosync(wl);
802 ieee80211_queue_work(wl->hw, &wl->recovery_work);
806 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
810 /* Make sure we have enough room */
811 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
813 /* Fill the FW log file, consumed by the sysfs fwlog entry */
814 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
815 wl->fwlog_size += len;
820 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
824 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
827 wl1271_info("Reading FW panic log");
830 * Make sure the chip is awake and the logger isn't active.
831 * Do not send a stop fwlog command if the fw is hanged or if
832 * dbgpins are used (due to some fw bug).
834 if (wl1271_ps_elp_wakeup(wl))
836 if (!wl->watchdog_recovery &&
837 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
838 wl12xx_cmd_stop_fwlog(wl);
840 /* Traverse the memory blocks linked list */
842 end_of_log = wlcore_event_fw_logger(wl);
843 if (end_of_log == 0) {
845 end_of_log = wlcore_event_fw_logger(wl);
847 } while (end_of_log != 0);
850 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
851 u8 hlid, struct ieee80211_sta *sta)
853 struct wl1271_station *wl_sta;
854 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
856 wl_sta = (void *)sta->drv_priv;
857 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
860 * increment the initial seq number on recovery to account for
861 * transmitted packets that we haven't yet got in the FW status
863 if (wlvif->encryption_type == KEY_GEM)
864 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
866 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
867 wl_sta->total_freed_pkts += sqn_recovery_padding;
870 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
871 struct wl12xx_vif *wlvif,
872 u8 hlid, const u8 *addr)
874 struct ieee80211_sta *sta;
875 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
877 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
878 is_zero_ether_addr(addr)))
882 sta = ieee80211_find_sta(vif, addr);
884 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
888 static void wlcore_print_recovery(struct wl1271 *wl)
894 wl1271_info("Hardware recovery in progress. FW ver: %s",
895 wl->chip.fw_ver_str);
897 /* change partitions momentarily so we can read the FW pc */
898 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
902 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
906 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
910 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
911 pc, hint_sts, ++wl->recovery_count);
913 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
917 static void wl1271_recovery_work(struct work_struct *work)
920 container_of(work, struct wl1271, recovery_work);
921 struct wl12xx_vif *wlvif;
922 struct ieee80211_vif *vif;
924 mutex_lock(&wl->mutex);
926 if (wl->state == WLCORE_STATE_OFF || wl->plt)
929 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
930 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
931 wl12xx_read_fwlog_panic(wl);
932 wlcore_print_recovery(wl);
935 BUG_ON(wl->conf.recovery.bug_on_recovery &&
936 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
938 if (wl->conf.recovery.no_recovery) {
939 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
943 /* Prevent spurious TX during FW restart */
944 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
946 /* reboot the chipset */
947 while (!list_empty(&wl->wlvif_list)) {
948 wlvif = list_first_entry(&wl->wlvif_list,
949 struct wl12xx_vif, list);
950 vif = wl12xx_wlvif_to_vif(wlvif);
952 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
953 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
954 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
955 vif->bss_conf.bssid);
958 __wl1271_op_remove_interface(wl, vif, false);
961 wlcore_op_stop_locked(wl);
963 ieee80211_restart_hw(wl->hw);
966 * Its safe to enable TX now - the queues are stopped after a request
969 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
972 wl->watchdog_recovery = false;
973 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
974 mutex_unlock(&wl->mutex);
977 static int wlcore_fw_wakeup(struct wl1271 *wl)
979 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
982 static int wl1271_setup(struct wl1271 *wl)
984 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
985 if (!wl->raw_fw_status)
988 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
992 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
998 kfree(wl->fw_status);
999 kfree(wl->raw_fw_status);
1003 static int wl12xx_set_power_on(struct wl1271 *wl)
1007 msleep(WL1271_PRE_POWER_ON_SLEEP);
1008 ret = wl1271_power_on(wl);
1011 msleep(WL1271_POWER_ON_SLEEP);
1012 wl1271_io_reset(wl);
1015 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1019 /* ELP module wake up */
1020 ret = wlcore_fw_wakeup(wl);
1028 wl1271_power_off(wl);
1032 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1036 ret = wl12xx_set_power_on(wl);
1041 * For wl127x based devices we could use the default block
1042 * size (512 bytes), but due to a bug in the sdio driver, we
1043 * need to set it explicitly after the chip is powered on. To
1044 * simplify the code and since the performance impact is
1045 * negligible, we use the same block size for all different
1048 * Check if the bus supports blocksize alignment and, if it
1049 * doesn't, make sure we don't have the quirk.
1051 if (!wl1271_set_block_size(wl))
1052 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1054 /* TODO: make sure the lower driver has set things up correctly */
1056 ret = wl1271_setup(wl);
1060 ret = wl12xx_fetch_firmware(wl, plt);
1062 kfree(wl->fw_status);
1063 kfree(wl->raw_fw_status);
1064 kfree(wl->tx_res_if);
1071 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1073 int retries = WL1271_BOOT_RETRIES;
1074 struct wiphy *wiphy = wl->hw->wiphy;
1076 static const char* const PLT_MODE[] = {
1085 mutex_lock(&wl->mutex);
1087 wl1271_notice("power up");
1089 if (wl->state != WLCORE_STATE_OFF) {
1090 wl1271_error("cannot go into PLT state because not "
1091 "in off state: %d", wl->state);
1096 /* Indicate to lower levels that we are now in PLT mode */
1098 wl->plt_mode = plt_mode;
1102 ret = wl12xx_chip_wakeup(wl, true);
1106 if (plt_mode != PLT_CHIP_AWAKE) {
1107 ret = wl->ops->plt_init(wl);
1112 wl->state = WLCORE_STATE_ON;
1113 wl1271_notice("firmware booted in PLT mode %s (%s)",
1115 wl->chip.fw_ver_str);
1117 /* update hw/fw version info in wiphy struct */
1118 wiphy->hw_version = wl->chip.id;
1119 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1120 sizeof(wiphy->fw_version));
1125 wl1271_power_off(wl);
1129 wl->plt_mode = PLT_OFF;
1131 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1132 WL1271_BOOT_RETRIES);
1134 mutex_unlock(&wl->mutex);
1139 int wl1271_plt_stop(struct wl1271 *wl)
1143 wl1271_notice("power down");
1146 * Interrupts must be disabled before setting the state to OFF.
1147 * Otherwise, the interrupt handler might be called and exit without
1148 * reading the interrupt status.
1150 wlcore_disable_interrupts(wl);
1151 mutex_lock(&wl->mutex);
1153 mutex_unlock(&wl->mutex);
1156 * This will not necessarily enable interrupts as interrupts
1157 * may have been disabled when op_stop was called. It will,
1158 * however, balance the above call to disable_interrupts().
1160 wlcore_enable_interrupts(wl);
1162 wl1271_error("cannot power down because not in PLT "
1163 "state: %d", wl->state);
1168 mutex_unlock(&wl->mutex);
1170 wl1271_flush_deferred_work(wl);
1171 cancel_work_sync(&wl->netstack_work);
1172 cancel_work_sync(&wl->recovery_work);
1173 cancel_delayed_work_sync(&wl->elp_work);
1174 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1176 mutex_lock(&wl->mutex);
1177 wl1271_power_off(wl);
1179 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1180 wl->state = WLCORE_STATE_OFF;
1182 wl->plt_mode = PLT_OFF;
1184 mutex_unlock(&wl->mutex);
1190 static void wl1271_op_tx(struct ieee80211_hw *hw,
1191 struct ieee80211_tx_control *control,
1192 struct sk_buff *skb)
1194 struct wl1271 *wl = hw->priv;
1195 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1196 struct ieee80211_vif *vif = info->control.vif;
1197 struct wl12xx_vif *wlvif = NULL;
1198 unsigned long flags;
1203 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1204 ieee80211_free_txskb(hw, skb);
1208 wlvif = wl12xx_vif_to_data(vif);
1209 mapping = skb_get_queue_mapping(skb);
1210 q = wl1271_tx_get_queue(mapping);
1212 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1214 spin_lock_irqsave(&wl->wl_lock, flags);
1217 * drop the packet if the link is invalid or the queue is stopped
1218 * for any reason but watermark. Watermark is a "soft"-stop so we
1219 * allow these packets through.
1221 if (hlid == WL12XX_INVALID_LINK_ID ||
1222 (!test_bit(hlid, wlvif->links_map)) ||
1223 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1224 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1225 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1226 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1227 ieee80211_free_txskb(hw, skb);
1231 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1233 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1235 wl->tx_queue_count[q]++;
1236 wlvif->tx_queue_count[q]++;
1239 * The workqueue is slow to process the tx_queue and we need stop
1240 * the queue here, otherwise the queue will get too long.
1242 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1243 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1244 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1245 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1246 wlcore_stop_queue_locked(wl, wlvif, q,
1247 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1251 * The chip specific setup must run before the first TX packet -
1252 * before that, the tx_work will not be initialized!
1255 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1256 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1257 ieee80211_queue_work(wl->hw, &wl->tx_work);
1260 spin_unlock_irqrestore(&wl->wl_lock, flags);
1263 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1265 unsigned long flags;
1268 /* no need to queue a new dummy packet if one is already pending */
1269 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1272 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1274 spin_lock_irqsave(&wl->wl_lock, flags);
1275 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1276 wl->tx_queue_count[q]++;
1277 spin_unlock_irqrestore(&wl->wl_lock, flags);
1279 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1280 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1281 return wlcore_tx_work_locked(wl);
1284 * If the FW TX is busy, TX work will be scheduled by the threaded
1285 * interrupt handler function
1291 * The size of the dummy packet should be at least 1400 bytes. However, in
1292 * order to minimize the number of bus transactions, aligning it to 512 bytes
1293 * boundaries could be beneficial, performance wise
1295 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1297 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1299 struct sk_buff *skb;
1300 struct ieee80211_hdr_3addr *hdr;
1301 unsigned int dummy_packet_size;
1303 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1304 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1306 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1308 wl1271_warning("Failed to allocate a dummy packet skb");
1312 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1314 hdr = skb_put_zero(skb, sizeof(*hdr));
1315 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1316 IEEE80211_STYPE_NULLFUNC |
1317 IEEE80211_FCTL_TODS);
1319 skb_put_zero(skb, dummy_packet_size);
1321 /* Dummy packets require the TID to be management */
1322 skb->priority = WL1271_TID_MGMT;
1324 /* Initialize all fields that might be used */
1325 skb_set_queue_mapping(skb, 0);
1326 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1334 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1336 int num_fields = 0, in_field = 0, fields_size = 0;
1337 int i, pattern_len = 0;
1340 wl1271_warning("No mask in WoWLAN pattern");
1345 * The pattern is broken up into segments of bytes at different offsets
1346 * that need to be checked by the FW filter. Each segment is called
1347 * a field in the FW API. We verify that the total number of fields
1348 * required for this pattern won't exceed FW limits (8)
1349 * as well as the total fields buffer won't exceed the FW limit.
1350 * Note that if there's a pattern which crosses Ethernet/IP header
1351 * boundary a new field is required.
1353 for (i = 0; i < p->pattern_len; i++) {
1354 if (test_bit(i, (unsigned long *)p->mask)) {
1359 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1361 fields_size += pattern_len +
1362 RX_FILTER_FIELD_OVERHEAD;
1370 fields_size += pattern_len +
1371 RX_FILTER_FIELD_OVERHEAD;
1378 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1382 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1383 wl1271_warning("RX Filter too complex. Too many segments");
1387 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1388 wl1271_warning("RX filter pattern is too big");
1395 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1397 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1400 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1407 for (i = 0; i < filter->num_fields; i++)
1408 kfree(filter->fields[i].pattern);
1413 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1414 u16 offset, u8 flags,
1415 const u8 *pattern, u8 len)
1417 struct wl12xx_rx_filter_field *field;
1419 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1420 wl1271_warning("Max fields per RX filter. can't alloc another");
1424 field = &filter->fields[filter->num_fields];
1426 field->pattern = kzalloc(len, GFP_KERNEL);
1427 if (!field->pattern) {
1428 wl1271_warning("Failed to allocate RX filter pattern");
1432 filter->num_fields++;
1434 field->offset = cpu_to_le16(offset);
1435 field->flags = flags;
1437 memcpy(field->pattern, pattern, len);
1442 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1444 int i, fields_size = 0;
1446 for (i = 0; i < filter->num_fields; i++)
1447 fields_size += filter->fields[i].len +
1448 sizeof(struct wl12xx_rx_filter_field) -
1454 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1458 struct wl12xx_rx_filter_field *field;
1460 for (i = 0; i < filter->num_fields; i++) {
1461 field = (struct wl12xx_rx_filter_field *)buf;
1463 field->offset = filter->fields[i].offset;
1464 field->flags = filter->fields[i].flags;
1465 field->len = filter->fields[i].len;
1467 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1468 buf += sizeof(struct wl12xx_rx_filter_field) -
1469 sizeof(u8 *) + field->len;
1474 * Allocates an RX filter returned through f
1475 * which needs to be freed using rx_filter_free()
1478 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1479 struct wl12xx_rx_filter **f)
1482 struct wl12xx_rx_filter *filter;
1486 filter = wl1271_rx_filter_alloc();
1488 wl1271_warning("Failed to alloc rx filter");
1494 while (i < p->pattern_len) {
1495 if (!test_bit(i, (unsigned long *)p->mask)) {
1500 for (j = i; j < p->pattern_len; j++) {
1501 if (!test_bit(j, (unsigned long *)p->mask))
1504 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1505 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1509 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1511 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1513 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1514 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1519 ret = wl1271_rx_filter_alloc_field(filter,
1522 &p->pattern[i], len);
1529 filter->action = FILTER_SIGNAL;
1535 wl1271_rx_filter_free(filter);
1541 static int wl1271_configure_wowlan(struct wl1271 *wl,
1542 struct cfg80211_wowlan *wow)
1546 if (!wow || wow->any || !wow->n_patterns) {
1547 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1552 ret = wl1271_rx_filter_clear_all(wl);
1559 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1562 /* Validate all incoming patterns before clearing current FW state */
1563 for (i = 0; i < wow->n_patterns; i++) {
1564 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1566 wl1271_warning("Bad wowlan pattern %d", i);
1571 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1575 ret = wl1271_rx_filter_clear_all(wl);
1579 /* Translate WoWLAN patterns into filters */
1580 for (i = 0; i < wow->n_patterns; i++) {
1581 struct cfg80211_pkt_pattern *p;
1582 struct wl12xx_rx_filter *filter = NULL;
1584 p = &wow->patterns[i];
1586 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1588 wl1271_warning("Failed to create an RX filter from "
1589 "wowlan pattern %d", i);
1593 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1595 wl1271_rx_filter_free(filter);
1600 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1606 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1607 struct wl12xx_vif *wlvif,
1608 struct cfg80211_wowlan *wow)
1612 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1615 ret = wl1271_configure_wowlan(wl, wow);
1619 if ((wl->conf.conn.suspend_wake_up_event ==
1620 wl->conf.conn.wake_up_event) &&
1621 (wl->conf.conn.suspend_listen_interval ==
1622 wl->conf.conn.listen_interval))
1625 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1626 wl->conf.conn.suspend_wake_up_event,
1627 wl->conf.conn.suspend_listen_interval);
1630 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1636 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1637 struct wl12xx_vif *wlvif,
1638 struct cfg80211_wowlan *wow)
1642 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1645 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1649 ret = wl1271_configure_wowlan(wl, wow);
1658 static int wl1271_configure_suspend(struct wl1271 *wl,
1659 struct wl12xx_vif *wlvif,
1660 struct cfg80211_wowlan *wow)
1662 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1663 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1664 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1665 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1669 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1672 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1673 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1675 if ((!is_ap) && (!is_sta))
1678 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1679 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1682 wl1271_configure_wowlan(wl, NULL);
1685 if ((wl->conf.conn.suspend_wake_up_event ==
1686 wl->conf.conn.wake_up_event) &&
1687 (wl->conf.conn.suspend_listen_interval ==
1688 wl->conf.conn.listen_interval))
1691 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1692 wl->conf.conn.wake_up_event,
1693 wl->conf.conn.listen_interval);
1696 wl1271_error("resume: wake up conditions failed: %d",
1700 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1704 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1705 struct cfg80211_wowlan *wow)
1707 struct wl1271 *wl = hw->priv;
1708 struct wl12xx_vif *wlvif;
1711 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1714 /* we want to perform the recovery before suspending */
1715 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1716 wl1271_warning("postponing suspend to perform recovery");
1720 wl1271_tx_flush(wl);
1722 mutex_lock(&wl->mutex);
1724 ret = wl1271_ps_elp_wakeup(wl);
1726 mutex_unlock(&wl->mutex);
1730 wl->wow_enabled = true;
1731 wl12xx_for_each_wlvif(wl, wlvif) {
1732 if (wlcore_is_p2p_mgmt(wlvif))
1735 ret = wl1271_configure_suspend(wl, wlvif, wow);
1737 mutex_unlock(&wl->mutex);
1738 wl1271_warning("couldn't prepare device to suspend");
1743 /* disable fast link flow control notifications from FW */
1744 ret = wlcore_hw_interrupt_notify(wl, false);
1748 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1749 ret = wlcore_hw_rx_ba_filter(wl,
1750 !!wl->conf.conn.suspend_rx_ba_activity);
1755 wl1271_ps_elp_sleep(wl);
1756 mutex_unlock(&wl->mutex);
1759 wl1271_warning("couldn't prepare device to suspend");
1763 /* flush any remaining work */
1764 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1767 * disable and re-enable interrupts in order to flush
1770 wlcore_disable_interrupts(wl);
1773 * set suspended flag to avoid triggering a new threaded_irq
1774 * work. no need for spinlock as interrupts are disabled.
1776 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1778 wlcore_enable_interrupts(wl);
1779 flush_work(&wl->tx_work);
1780 flush_delayed_work(&wl->elp_work);
1783 * Cancel the watchdog even if above tx_flush failed. We will detect
1784 * it on resume anyway.
1786 cancel_delayed_work(&wl->tx_watchdog_work);
1791 static int wl1271_op_resume(struct ieee80211_hw *hw)
1793 struct wl1271 *wl = hw->priv;
1794 struct wl12xx_vif *wlvif;
1795 unsigned long flags;
1796 bool run_irq_work = false, pending_recovery;
1799 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1801 WARN_ON(!wl->wow_enabled);
1804 * re-enable irq_work enqueuing, and call irq_work directly if
1805 * there is a pending work.
1807 spin_lock_irqsave(&wl->wl_lock, flags);
1808 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1809 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1810 run_irq_work = true;
1811 spin_unlock_irqrestore(&wl->wl_lock, flags);
1813 mutex_lock(&wl->mutex);
1815 /* test the recovery flag before calling any SDIO functions */
1816 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1820 wl1271_debug(DEBUG_MAC80211,
1821 "run postponed irq_work directly");
1823 /* don't talk to the HW if recovery is pending */
1824 if (!pending_recovery) {
1825 ret = wlcore_irq_locked(wl);
1827 wl12xx_queue_recovery_work(wl);
1830 wlcore_enable_interrupts(wl);
1833 if (pending_recovery) {
1834 wl1271_warning("queuing forgotten recovery on resume");
1835 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1839 ret = wl1271_ps_elp_wakeup(wl);
1843 wl12xx_for_each_wlvif(wl, wlvif) {
1844 if (wlcore_is_p2p_mgmt(wlvif))
1847 wl1271_configure_resume(wl, wlvif);
1850 ret = wlcore_hw_interrupt_notify(wl, true);
1854 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1855 ret = wlcore_hw_rx_ba_filter(wl, false);
1860 wl1271_ps_elp_sleep(wl);
1863 wl->wow_enabled = false;
1866 * Set a flag to re-init the watchdog on the first Tx after resume.
1867 * That way we avoid possible conditions where Tx-complete interrupts
1868 * fail to arrive and we perform a spurious recovery.
1870 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1871 mutex_unlock(&wl->mutex);
1877 static int wl1271_op_start(struct ieee80211_hw *hw)
1879 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1882 * We have to delay the booting of the hardware because
1883 * we need to know the local MAC address before downloading and
1884 * initializing the firmware. The MAC address cannot be changed
1885 * after boot, and without the proper MAC address, the firmware
1886 * will not function properly.
1888 * The MAC address is first known when the corresponding interface
1889 * is added. That is where we will initialize the hardware.
1895 static void wlcore_op_stop_locked(struct wl1271 *wl)
1899 if (wl->state == WLCORE_STATE_OFF) {
1900 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1902 wlcore_enable_interrupts(wl);
1908 * this must be before the cancel_work calls below, so that the work
1909 * functions don't perform further work.
1911 wl->state = WLCORE_STATE_OFF;
1914 * Use the nosync variant to disable interrupts, so the mutex could be
1915 * held while doing so without deadlocking.
1917 wlcore_disable_interrupts_nosync(wl);
1919 mutex_unlock(&wl->mutex);
1921 wlcore_synchronize_interrupts(wl);
1922 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1923 cancel_work_sync(&wl->recovery_work);
1924 wl1271_flush_deferred_work(wl);
1925 cancel_delayed_work_sync(&wl->scan_complete_work);
1926 cancel_work_sync(&wl->netstack_work);
1927 cancel_work_sync(&wl->tx_work);
1928 cancel_delayed_work_sync(&wl->elp_work);
1929 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1931 /* let's notify MAC80211 about the remaining pending TX frames */
1932 mutex_lock(&wl->mutex);
1933 wl12xx_tx_reset(wl);
1935 wl1271_power_off(wl);
1937 * In case a recovery was scheduled, interrupts were disabled to avoid
1938 * an interrupt storm. Now that the power is down, it is safe to
1939 * re-enable interrupts to balance the disable depth
1941 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1942 wlcore_enable_interrupts(wl);
1944 wl->band = NL80211_BAND_2GHZ;
1947 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1948 wl->channel_type = NL80211_CHAN_NO_HT;
1949 wl->tx_blocks_available = 0;
1950 wl->tx_allocated_blocks = 0;
1951 wl->tx_results_count = 0;
1952 wl->tx_packets_count = 0;
1953 wl->time_offset = 0;
1954 wl->ap_fw_ps_map = 0;
1956 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1957 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1958 memset(wl->links_map, 0, sizeof(wl->links_map));
1959 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1960 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1961 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1962 wl->active_sta_count = 0;
1963 wl->active_link_count = 0;
1965 /* The system link is always allocated */
1966 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1967 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1968 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1971 * this is performed after the cancel_work calls and the associated
1972 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1973 * get executed before all these vars have been reset.
1977 wl->tx_blocks_freed = 0;
1979 for (i = 0; i < NUM_TX_QUEUES; i++) {
1980 wl->tx_pkts_freed[i] = 0;
1981 wl->tx_allocated_pkts[i] = 0;
1984 wl1271_debugfs_reset(wl);
1986 kfree(wl->raw_fw_status);
1987 wl->raw_fw_status = NULL;
1988 kfree(wl->fw_status);
1989 wl->fw_status = NULL;
1990 kfree(wl->tx_res_if);
1991 wl->tx_res_if = NULL;
1992 kfree(wl->target_mem_map);
1993 wl->target_mem_map = NULL;
1996 * FW channels must be re-calibrated after recovery,
1997 * save current Reg-Domain channel configuration and clear it.
1999 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2000 sizeof(wl->reg_ch_conf_pending));
2001 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2004 static void wlcore_op_stop(struct ieee80211_hw *hw)
2006 struct wl1271 *wl = hw->priv;
2008 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2010 mutex_lock(&wl->mutex);
2012 wlcore_op_stop_locked(wl);
2014 mutex_unlock(&wl->mutex);
2017 static void wlcore_channel_switch_work(struct work_struct *work)
2019 struct delayed_work *dwork;
2021 struct ieee80211_vif *vif;
2022 struct wl12xx_vif *wlvif;
2025 dwork = to_delayed_work(work);
2026 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2029 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2031 mutex_lock(&wl->mutex);
2033 if (unlikely(wl->state != WLCORE_STATE_ON))
2036 /* check the channel switch is still ongoing */
2037 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2040 vif = wl12xx_wlvif_to_vif(wlvif);
2041 ieee80211_chswitch_done(vif, false);
2043 ret = wl1271_ps_elp_wakeup(wl);
2047 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2049 wl1271_ps_elp_sleep(wl);
2051 mutex_unlock(&wl->mutex);
2054 static void wlcore_connection_loss_work(struct work_struct *work)
2056 struct delayed_work *dwork;
2058 struct ieee80211_vif *vif;
2059 struct wl12xx_vif *wlvif;
2061 dwork = to_delayed_work(work);
2062 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2065 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2067 mutex_lock(&wl->mutex);
2069 if (unlikely(wl->state != WLCORE_STATE_ON))
2072 /* Call mac80211 connection loss */
2073 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2076 vif = wl12xx_wlvif_to_vif(wlvif);
2077 ieee80211_connection_loss(vif);
2079 mutex_unlock(&wl->mutex);
2082 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2084 struct delayed_work *dwork;
2086 struct wl12xx_vif *wlvif;
2087 unsigned long time_spare;
2090 dwork = to_delayed_work(work);
2091 wlvif = container_of(dwork, struct wl12xx_vif,
2092 pending_auth_complete_work);
2095 mutex_lock(&wl->mutex);
2097 if (unlikely(wl->state != WLCORE_STATE_ON))
2101 * Make sure a second really passed since the last auth reply. Maybe
2102 * a second auth reply arrived while we were stuck on the mutex.
2103 * Check for a little less than the timeout to protect from scheduler
2106 time_spare = jiffies +
2107 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2108 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2111 ret = wl1271_ps_elp_wakeup(wl);
2115 /* cancel the ROC if active */
2116 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2118 wl1271_ps_elp_sleep(wl);
2120 mutex_unlock(&wl->mutex);
2123 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2125 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2126 WL12XX_MAX_RATE_POLICIES);
2127 if (policy >= WL12XX_MAX_RATE_POLICIES)
2130 __set_bit(policy, wl->rate_policies_map);
2135 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2137 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2140 __clear_bit(*idx, wl->rate_policies_map);
2141 *idx = WL12XX_MAX_RATE_POLICIES;
2144 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2146 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2147 WLCORE_MAX_KLV_TEMPLATES);
2148 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2151 __set_bit(policy, wl->klv_templates_map);
2156 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2158 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2161 __clear_bit(*idx, wl->klv_templates_map);
2162 *idx = WLCORE_MAX_KLV_TEMPLATES;
2165 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2167 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2169 switch (wlvif->bss_type) {
2170 case BSS_TYPE_AP_BSS:
2172 return WL1271_ROLE_P2P_GO;
2173 else if (ieee80211_vif_is_mesh(vif))
2174 return WL1271_ROLE_MESH_POINT;
2176 return WL1271_ROLE_AP;
2178 case BSS_TYPE_STA_BSS:
2180 return WL1271_ROLE_P2P_CL;
2182 return WL1271_ROLE_STA;
2185 return WL1271_ROLE_IBSS;
2188 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2190 return WL12XX_INVALID_ROLE_TYPE;
2193 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2195 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2198 /* clear everything but the persistent data */
2199 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2201 switch (ieee80211_vif_type_p2p(vif)) {
2202 case NL80211_IFTYPE_P2P_CLIENT:
2205 case NL80211_IFTYPE_STATION:
2206 case NL80211_IFTYPE_P2P_DEVICE:
2207 wlvif->bss_type = BSS_TYPE_STA_BSS;
2209 case NL80211_IFTYPE_ADHOC:
2210 wlvif->bss_type = BSS_TYPE_IBSS;
2212 case NL80211_IFTYPE_P2P_GO:
2215 case NL80211_IFTYPE_AP:
2216 case NL80211_IFTYPE_MESH_POINT:
2217 wlvif->bss_type = BSS_TYPE_AP_BSS;
2220 wlvif->bss_type = MAX_BSS_TYPE;
2224 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2225 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2226 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2228 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2229 wlvif->bss_type == BSS_TYPE_IBSS) {
2230 /* init sta/ibss data */
2231 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2232 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2233 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2234 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2235 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2236 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2237 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2238 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2241 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2242 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2243 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2244 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2245 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2246 wl12xx_allocate_rate_policy(wl,
2247 &wlvif->ap.ucast_rate_idx[i]);
2248 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2250 * TODO: check if basic_rate shouldn't be
2251 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2252 * instead (the same thing for STA above).
2254 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2255 /* TODO: this seems to be used only for STA, check it */
2256 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2259 wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2260 wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2261 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2264 * mac80211 configures some values globally, while we treat them
2265 * per-interface. thus, on init, we have to copy them from wl
2267 wlvif->band = wl->band;
2268 wlvif->channel = wl->channel;
2269 wlvif->power_level = wl->power_level;
2270 wlvif->channel_type = wl->channel_type;
2272 INIT_WORK(&wlvif->rx_streaming_enable_work,
2273 wl1271_rx_streaming_enable_work);
2274 INIT_WORK(&wlvif->rx_streaming_disable_work,
2275 wl1271_rx_streaming_disable_work);
2276 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2277 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2278 wlcore_channel_switch_work);
2279 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2280 wlcore_connection_loss_work);
2281 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2282 wlcore_pending_auth_complete_work);
2283 INIT_LIST_HEAD(&wlvif->list);
2285 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2286 (unsigned long) wlvif);
2290 static int wl12xx_init_fw(struct wl1271 *wl)
2292 int retries = WL1271_BOOT_RETRIES;
2293 bool booted = false;
2294 struct wiphy *wiphy = wl->hw->wiphy;
2299 ret = wl12xx_chip_wakeup(wl, false);
2303 ret = wl->ops->boot(wl);
2307 ret = wl1271_hw_init(wl);
2315 mutex_unlock(&wl->mutex);
2316 /* Unlocking the mutex in the middle of handling is
2317 inherently unsafe. In this case we deem it safe to do,
2318 because we need to let any possibly pending IRQ out of
2319 the system (and while we are WLCORE_STATE_OFF the IRQ
2320 work function will not do anything.) Also, any other
2321 possible concurrent operations will fail due to the
2322 current state, hence the wl1271 struct should be safe. */
2323 wlcore_disable_interrupts(wl);
2324 wl1271_flush_deferred_work(wl);
2325 cancel_work_sync(&wl->netstack_work);
2326 mutex_lock(&wl->mutex);
2328 wl1271_power_off(wl);
2332 wl1271_error("firmware boot failed despite %d retries",
2333 WL1271_BOOT_RETRIES);
2337 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2339 /* update hw/fw version info in wiphy struct */
2340 wiphy->hw_version = wl->chip.id;
2341 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2342 sizeof(wiphy->fw_version));
2345 * Now we know if 11a is supported (info from the NVS), so disable
2346 * 11a channels if not supported
2348 if (!wl->enable_11a)
2349 wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2351 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2352 wl->enable_11a ? "" : "not ");
2354 wl->state = WLCORE_STATE_ON;
2359 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2361 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2365 * Check whether a fw switch (i.e. moving from one loaded
2366 * fw to another) is needed. This function is also responsible
2367 * for updating wl->last_vif_count, so it must be called before
2368 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2371 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2372 struct vif_counter_data vif_counter_data,
2375 enum wl12xx_fw_type current_fw = wl->fw_type;
2376 u8 vif_count = vif_counter_data.counter;
2378 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2381 /* increase the vif count if this is a new vif */
2382 if (add && !vif_counter_data.cur_vif_running)
2385 wl->last_vif_count = vif_count;
2387 /* no need for fw change if the device is OFF */
2388 if (wl->state == WLCORE_STATE_OFF)
2391 /* no need for fw change if a single fw is used */
2392 if (!wl->mr_fw_name)
2395 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2397 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2404 * Enter "forced psm". Make sure the sta is in psm against the ap,
2405 * to make the fw switch a bit more disconnection-persistent.
2407 static void wl12xx_force_active_psm(struct wl1271 *wl)
2409 struct wl12xx_vif *wlvif;
2411 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2412 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2416 struct wlcore_hw_queue_iter_data {
2417 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2419 struct ieee80211_vif *vif;
2420 /* is the current vif among those iterated */
2424 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2425 struct ieee80211_vif *vif)
2427 struct wlcore_hw_queue_iter_data *iter_data = data;
2429 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2430 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2433 if (iter_data->cur_running || vif == iter_data->vif) {
2434 iter_data->cur_running = true;
2438 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2441 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2442 struct wl12xx_vif *wlvif)
2444 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2445 struct wlcore_hw_queue_iter_data iter_data = {};
2448 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2449 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2453 iter_data.vif = vif;
2455 /* mark all bits taken by active interfaces */
2456 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2457 IEEE80211_IFACE_ITER_RESUME_ALL,
2458 wlcore_hw_queue_iter, &iter_data);
2460 /* the current vif is already running in mac80211 (resume/recovery) */
2461 if (iter_data.cur_running) {
2462 wlvif->hw_queue_base = vif->hw_queue[0];
2463 wl1271_debug(DEBUG_MAC80211,
2464 "using pre-allocated hw queue base %d",
2465 wlvif->hw_queue_base);
2467 /* interface type might have changed type */
2468 goto adjust_cab_queue;
2471 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2472 WLCORE_NUM_MAC_ADDRESSES);
2473 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2476 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2477 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2478 wlvif->hw_queue_base);
2480 for (i = 0; i < NUM_TX_QUEUES; i++) {
2481 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2482 /* register hw queues in mac80211 */
2483 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2487 /* the last places are reserved for cab queues per interface */
2488 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2489 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2490 wlvif->hw_queue_base / NUM_TX_QUEUES;
2492 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2497 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2498 struct ieee80211_vif *vif)
2500 struct wl1271 *wl = hw->priv;
2501 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2502 struct vif_counter_data vif_count;
2507 wl1271_error("Adding Interface not allowed while in PLT mode");
2511 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2512 IEEE80211_VIF_SUPPORTS_UAPSD |
2513 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2515 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2516 ieee80211_vif_type_p2p(vif), vif->addr);
2518 wl12xx_get_vif_count(hw, vif, &vif_count);
2520 mutex_lock(&wl->mutex);
2521 ret = wl1271_ps_elp_wakeup(wl);
2526 * in some very corner case HW recovery scenarios its possible to
2527 * get here before __wl1271_op_remove_interface is complete, so
2528 * opt out if that is the case.
2530 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2531 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2537 ret = wl12xx_init_vif_data(wl, vif);
2542 role_type = wl12xx_get_role_type(wl, wlvif);
2543 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2548 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2552 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2553 wl12xx_force_active_psm(wl);
2554 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2555 mutex_unlock(&wl->mutex);
2556 wl1271_recovery_work(&wl->recovery_work);
2561 * TODO: after the nvs issue will be solved, move this block
2562 * to start(), and make sure here the driver is ON.
2564 if (wl->state == WLCORE_STATE_OFF) {
2566 * we still need this in order to configure the fw
2567 * while uploading the nvs
2569 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2571 ret = wl12xx_init_fw(wl);
2576 if (!wlcore_is_p2p_mgmt(wlvif)) {
2577 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2578 role_type, &wlvif->role_id);
2582 ret = wl1271_init_vif_specific(wl, vif);
2587 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2588 &wlvif->dev_role_id);
2592 /* needed mainly for configuring rate policies */
2593 ret = wl1271_sta_hw_init(wl, wlvif);
2598 list_add(&wlvif->list, &wl->wlvif_list);
2599 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2601 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2606 wl1271_ps_elp_sleep(wl);
2608 mutex_unlock(&wl->mutex);
2613 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2614 struct ieee80211_vif *vif,
2615 bool reset_tx_queues)
2617 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2619 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2621 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2623 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2626 /* because of hardware recovery, we may get here twice */
2627 if (wl->state == WLCORE_STATE_OFF)
2630 wl1271_info("down");
2632 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2633 wl->scan_wlvif == wlvif) {
2634 struct cfg80211_scan_info info = {
2639 * Rearm the tx watchdog just before idling scan. This
2640 * prevents just-finished scans from triggering the watchdog
2642 wl12xx_rearm_tx_watchdog_locked(wl);
2644 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2645 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2646 wl->scan_wlvif = NULL;
2647 wl->scan.req = NULL;
2648 ieee80211_scan_completed(wl->hw, &info);
2651 if (wl->sched_vif == wlvif)
2652 wl->sched_vif = NULL;
2654 if (wl->roc_vif == vif) {
2656 ieee80211_remain_on_channel_expired(wl->hw);
2659 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2660 /* disable active roles */
2661 ret = wl1271_ps_elp_wakeup(wl);
2665 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2666 wlvif->bss_type == BSS_TYPE_IBSS) {
2667 if (wl12xx_dev_role_started(wlvif))
2668 wl12xx_stop_dev(wl, wlvif);
2671 if (!wlcore_is_p2p_mgmt(wlvif)) {
2672 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2676 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2681 wl1271_ps_elp_sleep(wl);
2684 wl12xx_tx_reset_wlvif(wl, wlvif);
2686 /* clear all hlids (except system_hlid) */
2687 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2689 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2690 wlvif->bss_type == BSS_TYPE_IBSS) {
2691 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2692 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2693 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2694 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2695 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2697 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2698 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2699 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2700 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2701 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2702 wl12xx_free_rate_policy(wl,
2703 &wlvif->ap.ucast_rate_idx[i]);
2704 wl1271_free_ap_keys(wl, wlvif);
2707 dev_kfree_skb(wlvif->probereq);
2708 wlvif->probereq = NULL;
2709 if (wl->last_wlvif == wlvif)
2710 wl->last_wlvif = NULL;
2711 list_del(&wlvif->list);
2712 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2713 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2714 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2722 * Last AP, have more stations. Configure sleep auth according to STA.
2723 * Don't do thin on unintended recovery.
2725 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2726 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2729 if (wl->ap_count == 0 && is_ap) {
2730 /* mask ap events */
2731 wl->event_mask &= ~wl->ap_event_mask;
2732 wl1271_event_unmask(wl);
2735 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2736 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2737 /* Configure for power according to debugfs */
2738 if (sta_auth != WL1271_PSM_ILLEGAL)
2739 wl1271_acx_sleep_auth(wl, sta_auth);
2740 /* Configure for ELP power saving */
2742 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2746 mutex_unlock(&wl->mutex);
2748 del_timer_sync(&wlvif->rx_streaming_timer);
2749 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2750 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2751 cancel_work_sync(&wlvif->rc_update_work);
2752 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2753 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2754 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2756 mutex_lock(&wl->mutex);
2759 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2760 struct ieee80211_vif *vif)
2762 struct wl1271 *wl = hw->priv;
2763 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2764 struct wl12xx_vif *iter;
2765 struct vif_counter_data vif_count;
2767 wl12xx_get_vif_count(hw, vif, &vif_count);
2768 mutex_lock(&wl->mutex);
2770 if (wl->state == WLCORE_STATE_OFF ||
2771 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2775 * wl->vif can be null here if someone shuts down the interface
2776 * just when hardware recovery has been started.
2778 wl12xx_for_each_wlvif(wl, iter) {
2782 __wl1271_op_remove_interface(wl, vif, true);
2785 WARN_ON(iter != wlvif);
2786 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2787 wl12xx_force_active_psm(wl);
2788 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2789 wl12xx_queue_recovery_work(wl);
2792 mutex_unlock(&wl->mutex);
2795 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2796 struct ieee80211_vif *vif,
2797 enum nl80211_iftype new_type, bool p2p)
2799 struct wl1271 *wl = hw->priv;
2802 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2803 wl1271_op_remove_interface(hw, vif);
2805 vif->type = new_type;
2807 ret = wl1271_op_add_interface(hw, vif);
2809 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2813 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2816 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2819 * One of the side effects of the JOIN command is that is clears
2820 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2821 * to a WPA/WPA2 access point will therefore kill the data-path.
2822 * Currently the only valid scenario for JOIN during association
2823 * is on roaming, in which case we will also be given new keys.
2824 * Keep the below message for now, unless it starts bothering
2825 * users who really like to roam a lot :)
2827 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2828 wl1271_info("JOIN while associated.");
2830 /* clear encryption type */
2831 wlvif->encryption_type = KEY_NONE;
2834 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2836 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2841 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2845 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2849 wl1271_error("No SSID in IEs!");
2854 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2855 wl1271_error("SSID is too long!");
2859 wlvif->ssid_len = ssid_len;
2860 memcpy(wlvif->ssid, ptr+2, ssid_len);
2864 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2866 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2867 struct sk_buff *skb;
2870 /* we currently only support setting the ssid from the ap probe req */
2871 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2874 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2878 ieoffset = offsetof(struct ieee80211_mgmt,
2879 u.probe_req.variable);
2880 wl1271_ssid_set(wlvif, skb, ieoffset);
2886 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2887 struct ieee80211_bss_conf *bss_conf,
2893 wlvif->aid = bss_conf->aid;
2894 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2895 wlvif->beacon_int = bss_conf->beacon_int;
2896 wlvif->wmm_enabled = bss_conf->qos;
2898 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2901 * with wl1271, we don't need to update the
2902 * beacon_int and dtim_period, because the firmware
2903 * updates it by itself when the first beacon is
2904 * received after a join.
2906 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2911 * Get a template for hardware connection maintenance
2913 dev_kfree_skb(wlvif->probereq);
2914 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2917 ieoffset = offsetof(struct ieee80211_mgmt,
2918 u.probe_req.variable);
2919 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2921 /* enable the connection monitoring feature */
2922 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2927 * The join command disable the keep-alive mode, shut down its process,
2928 * and also clear the template config, so we need to reset it all after
2929 * the join. The acx_aid starts the keep-alive process, and the order
2930 * of the commands below is relevant.
2932 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2936 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2940 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2944 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2945 wlvif->sta.klv_template_id,
2946 ACX_KEEP_ALIVE_TPL_VALID);
2951 * The default fw psm configuration is AUTO, while mac80211 default
2952 * setting is off (ACTIVE), so sync the fw with the correct value.
2954 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2960 wl1271_tx_enabled_rates_get(wl,
2963 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2971 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2974 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2976 /* make sure we are connected (sta) joined */
2978 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2981 /* make sure we are joined (ibss) */
2983 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2987 /* use defaults when not associated */
2990 /* free probe-request template */
2991 dev_kfree_skb(wlvif->probereq);
2992 wlvif->probereq = NULL;
2994 /* disable connection monitor features */
2995 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2999 /* Disable the keep-alive feature */
3000 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3004 /* disable beacon filtering */
3005 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3010 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3011 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3013 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3014 ieee80211_chswitch_done(vif, false);
3015 cancel_delayed_work(&wlvif->channel_switch_work);
3018 /* invalidate keep-alive template */
3019 wl1271_acx_keep_alive_config(wl, wlvif,
3020 wlvif->sta.klv_template_id,
3021 ACX_KEEP_ALIVE_TPL_INVALID);
3026 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3028 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3029 wlvif->rate_set = wlvif->basic_rate_set;
3032 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3035 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3037 if (idle == cur_idle)
3041 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3043 /* The current firmware only supports sched_scan in idle */
3044 if (wl->sched_vif == wlvif)
3045 wl->ops->sched_scan_stop(wl, wlvif);
3047 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3051 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3052 struct ieee80211_conf *conf, u32 changed)
3056 if (wlcore_is_p2p_mgmt(wlvif))
3059 if (conf->power_level != wlvif->power_level) {
3060 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3064 wlvif->power_level = conf->power_level;
3070 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3072 struct wl1271 *wl = hw->priv;
3073 struct wl12xx_vif *wlvif;
3074 struct ieee80211_conf *conf = &hw->conf;
3077 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3079 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3081 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3084 mutex_lock(&wl->mutex);
3086 if (changed & IEEE80211_CONF_CHANGE_POWER)
3087 wl->power_level = conf->power_level;
3089 if (unlikely(wl->state != WLCORE_STATE_ON))
3092 ret = wl1271_ps_elp_wakeup(wl);
3096 /* configure each interface */
3097 wl12xx_for_each_wlvif(wl, wlvif) {
3098 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3104 wl1271_ps_elp_sleep(wl);
3107 mutex_unlock(&wl->mutex);
3112 struct wl1271_filter_params {
3115 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3118 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3119 struct netdev_hw_addr_list *mc_list)
3121 struct wl1271_filter_params *fp;
3122 struct netdev_hw_addr *ha;
3124 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3126 wl1271_error("Out of memory setting filters.");
3130 /* update multicast filtering parameters */
3131 fp->mc_list_length = 0;
3132 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3133 fp->enabled = false;
3136 netdev_hw_addr_list_for_each(ha, mc_list) {
3137 memcpy(fp->mc_list[fp->mc_list_length],
3138 ha->addr, ETH_ALEN);
3139 fp->mc_list_length++;
3143 return (u64)(unsigned long)fp;
3146 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3148 FIF_BCN_PRBRESP_PROMISC | \
3152 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3153 unsigned int changed,
3154 unsigned int *total, u64 multicast)
3156 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3157 struct wl1271 *wl = hw->priv;
3158 struct wl12xx_vif *wlvif;
3162 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3163 " total %x", changed, *total);
3165 mutex_lock(&wl->mutex);
3167 *total &= WL1271_SUPPORTED_FILTERS;
3168 changed &= WL1271_SUPPORTED_FILTERS;
3170 if (unlikely(wl->state != WLCORE_STATE_ON))
3173 ret = wl1271_ps_elp_wakeup(wl);
3177 wl12xx_for_each_wlvif(wl, wlvif) {
3178 if (wlcore_is_p2p_mgmt(wlvif))
3181 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3182 if (*total & FIF_ALLMULTI)
3183 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3187 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3190 fp->mc_list_length);
3196 * If interface in AP mode and created with allmulticast then disable
3197 * the firmware filters so that all multicast packets are passed
3198 * This is mandatory for MDNS based discovery protocols
3200 if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3201 if (*total & FIF_ALLMULTI) {
3202 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3212 * the fw doesn't provide an api to configure the filters. instead,
3213 * the filters configuration is based on the active roles / ROC
3218 wl1271_ps_elp_sleep(wl);
3221 mutex_unlock(&wl->mutex);
3225 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3226 u8 id, u8 key_type, u8 key_size,
3227 const u8 *key, u8 hlid, u32 tx_seq_32,
3230 struct wl1271_ap_key *ap_key;
3233 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3235 if (key_size > MAX_KEY_SIZE)
3239 * Find next free entry in ap_keys. Also check we are not replacing
3242 for (i = 0; i < MAX_NUM_KEYS; i++) {
3243 if (wlvif->ap.recorded_keys[i] == NULL)
3246 if (wlvif->ap.recorded_keys[i]->id == id) {
3247 wl1271_warning("trying to record key replacement");
3252 if (i == MAX_NUM_KEYS)
3255 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3260 ap_key->key_type = key_type;
3261 ap_key->key_size = key_size;
3262 memcpy(ap_key->key, key, key_size);
3263 ap_key->hlid = hlid;
3264 ap_key->tx_seq_32 = tx_seq_32;
3265 ap_key->tx_seq_16 = tx_seq_16;
3267 wlvif->ap.recorded_keys[i] = ap_key;
3271 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3275 for (i = 0; i < MAX_NUM_KEYS; i++) {
3276 kfree(wlvif->ap.recorded_keys[i]);
3277 wlvif->ap.recorded_keys[i] = NULL;
3281 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3284 struct wl1271_ap_key *key;
3285 bool wep_key_added = false;
3287 for (i = 0; i < MAX_NUM_KEYS; i++) {
3289 if (wlvif->ap.recorded_keys[i] == NULL)
3292 key = wlvif->ap.recorded_keys[i];
3294 if (hlid == WL12XX_INVALID_LINK_ID)
3295 hlid = wlvif->ap.bcast_hlid;
3297 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3298 key->id, key->key_type,
3299 key->key_size, key->key,
3300 hlid, key->tx_seq_32,
3305 if (key->key_type == KEY_WEP)
3306 wep_key_added = true;
3309 if (wep_key_added) {
3310 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3311 wlvif->ap.bcast_hlid);
3317 wl1271_free_ap_keys(wl, wlvif);
3321 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3322 u16 action, u8 id, u8 key_type,
3323 u8 key_size, const u8 *key, u32 tx_seq_32,
3324 u16 tx_seq_16, struct ieee80211_sta *sta)
3327 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3330 struct wl1271_station *wl_sta;
3334 wl_sta = (struct wl1271_station *)sta->drv_priv;
3335 hlid = wl_sta->hlid;
3337 hlid = wlvif->ap.bcast_hlid;
3340 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3342 * We do not support removing keys after AP shutdown.
3343 * Pretend we do to make mac80211 happy.
3345 if (action != KEY_ADD_OR_REPLACE)
3348 ret = wl1271_record_ap_key(wl, wlvif, id,
3350 key, hlid, tx_seq_32,
3353 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3354 id, key_type, key_size,
3355 key, hlid, tx_seq_32,
3363 static const u8 bcast_addr[ETH_ALEN] = {
3364 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3367 addr = sta ? sta->addr : bcast_addr;
3369 if (is_zero_ether_addr(addr)) {
3370 /* We dont support TX only encryption */
3374 /* The wl1271 does not allow to remove unicast keys - they
3375 will be cleared automatically on next CMD_JOIN. Ignore the
3376 request silently, as we dont want the mac80211 to emit
3377 an error message. */
3378 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3381 /* don't remove key if hlid was already deleted */
3382 if (action == KEY_REMOVE &&
3383 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3386 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3387 id, key_type, key_size,
3388 key, addr, tx_seq_32,
3398 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3399 struct ieee80211_vif *vif,
3400 struct ieee80211_sta *sta,
3401 struct ieee80211_key_conf *key_conf)
3403 struct wl1271 *wl = hw->priv;
3405 bool might_change_spare =
3406 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3407 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3409 if (might_change_spare) {
3411 * stop the queues and flush to ensure the next packets are
3412 * in sync with FW spare block accounting
3414 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3415 wl1271_tx_flush(wl);
3418 mutex_lock(&wl->mutex);
3420 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3422 goto out_wake_queues;
3425 ret = wl1271_ps_elp_wakeup(wl);
3427 goto out_wake_queues;
3429 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3431 wl1271_ps_elp_sleep(wl);
3434 if (might_change_spare)
3435 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3437 mutex_unlock(&wl->mutex);
3442 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3443 struct ieee80211_vif *vif,
3444 struct ieee80211_sta *sta,
3445 struct ieee80211_key_conf *key_conf)
3447 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3454 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3456 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3457 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3458 key_conf->cipher, key_conf->keyidx,
3459 key_conf->keylen, key_conf->flags);
3460 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3462 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3464 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3465 hlid = wl_sta->hlid;
3467 hlid = wlvif->ap.bcast_hlid;
3470 hlid = wlvif->sta.hlid;
3472 if (hlid != WL12XX_INVALID_LINK_ID) {
3473 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3474 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3475 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3478 switch (key_conf->cipher) {
3479 case WLAN_CIPHER_SUITE_WEP40:
3480 case WLAN_CIPHER_SUITE_WEP104:
3483 key_conf->hw_key_idx = key_conf->keyidx;
3485 case WLAN_CIPHER_SUITE_TKIP:
3486 key_type = KEY_TKIP;
3487 key_conf->hw_key_idx = key_conf->keyidx;
3489 case WLAN_CIPHER_SUITE_CCMP:
3491 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3493 case WL1271_CIPHER_SUITE_GEM:
3497 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3504 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3505 key_conf->keyidx, key_type,
3506 key_conf->keylen, key_conf->key,
3507 tx_seq_32, tx_seq_16, sta);
3509 wl1271_error("Could not add or replace key");
3514 * reconfiguring arp response if the unicast (or common)
3515 * encryption key type was changed
3517 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3518 (sta || key_type == KEY_WEP) &&
3519 wlvif->encryption_type != key_type) {
3520 wlvif->encryption_type = key_type;
3521 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3523 wl1271_warning("build arp rsp failed: %d", ret);
3530 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3531 key_conf->keyidx, key_type,
3532 key_conf->keylen, key_conf->key,
3535 wl1271_error("Could not remove key");
3541 wl1271_error("Unsupported key cmd 0x%x", cmd);
3547 EXPORT_SYMBOL_GPL(wlcore_set_key);
3549 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3550 struct ieee80211_vif *vif,
3553 struct wl1271 *wl = hw->priv;
3554 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3557 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3560 /* we don't handle unsetting of default key */
3564 mutex_lock(&wl->mutex);
3566 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3571 ret = wl1271_ps_elp_wakeup(wl);
3575 wlvif->default_key = key_idx;
3577 /* the default WEP key needs to be configured at least once */
3578 if (wlvif->encryption_type == KEY_WEP) {
3579 ret = wl12xx_cmd_set_default_wep_key(wl,
3587 wl1271_ps_elp_sleep(wl);
3590 mutex_unlock(&wl->mutex);
3593 void wlcore_regdomain_config(struct wl1271 *wl)
3597 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3600 mutex_lock(&wl->mutex);
3602 if (unlikely(wl->state != WLCORE_STATE_ON))
3605 ret = wl1271_ps_elp_wakeup(wl);
3609 ret = wlcore_cmd_regdomain_config_locked(wl);
3611 wl12xx_queue_recovery_work(wl);
3615 wl1271_ps_elp_sleep(wl);
3617 mutex_unlock(&wl->mutex);
3620 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3621 struct ieee80211_vif *vif,
3622 struct ieee80211_scan_request *hw_req)
3624 struct cfg80211_scan_request *req = &hw_req->req;
3625 struct wl1271 *wl = hw->priv;
3630 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3633 ssid = req->ssids[0].ssid;
3634 len = req->ssids[0].ssid_len;
3637 mutex_lock(&wl->mutex);
3639 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3641 * We cannot return -EBUSY here because cfg80211 will expect
3642 * a call to ieee80211_scan_completed if we do - in this case
3643 * there won't be any call.
3649 ret = wl1271_ps_elp_wakeup(wl);
3653 /* fail if there is any role in ROC */
3654 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3655 /* don't allow scanning right now */
3660 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3662 wl1271_ps_elp_sleep(wl);
3664 mutex_unlock(&wl->mutex);
3669 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3670 struct ieee80211_vif *vif)
3672 struct wl1271 *wl = hw->priv;
3673 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3674 struct cfg80211_scan_info info = {
3679 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3681 mutex_lock(&wl->mutex);
3683 if (unlikely(wl->state != WLCORE_STATE_ON))
3686 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3689 ret = wl1271_ps_elp_wakeup(wl);
3693 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3694 ret = wl->ops->scan_stop(wl, wlvif);
3700 * Rearm the tx watchdog just before idling scan. This
3701 * prevents just-finished scans from triggering the watchdog
3703 wl12xx_rearm_tx_watchdog_locked(wl);
3705 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3706 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3707 wl->scan_wlvif = NULL;
3708 wl->scan.req = NULL;
3709 ieee80211_scan_completed(wl->hw, &info);
3712 wl1271_ps_elp_sleep(wl);
3714 mutex_unlock(&wl->mutex);
3716 cancel_delayed_work_sync(&wl->scan_complete_work);
3719 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3720 struct ieee80211_vif *vif,
3721 struct cfg80211_sched_scan_request *req,
3722 struct ieee80211_scan_ies *ies)
3724 struct wl1271 *wl = hw->priv;
3725 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3728 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3730 mutex_lock(&wl->mutex);
3732 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3737 ret = wl1271_ps_elp_wakeup(wl);
3741 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3745 wl->sched_vif = wlvif;
3748 wl1271_ps_elp_sleep(wl);
3750 mutex_unlock(&wl->mutex);
3754 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3755 struct ieee80211_vif *vif)
3757 struct wl1271 *wl = hw->priv;
3758 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3761 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3763 mutex_lock(&wl->mutex);
3765 if (unlikely(wl->state != WLCORE_STATE_ON))
3768 ret = wl1271_ps_elp_wakeup(wl);
3772 wl->ops->sched_scan_stop(wl, wlvif);
3774 wl1271_ps_elp_sleep(wl);
3776 mutex_unlock(&wl->mutex);
3781 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3783 struct wl1271 *wl = hw->priv;
3786 mutex_lock(&wl->mutex);
3788 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3793 ret = wl1271_ps_elp_wakeup(wl);
3797 ret = wl1271_acx_frag_threshold(wl, value);
3799 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3801 wl1271_ps_elp_sleep(wl);
3804 mutex_unlock(&wl->mutex);
3809 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3811 struct wl1271 *wl = hw->priv;
3812 struct wl12xx_vif *wlvif;
3815 mutex_lock(&wl->mutex);
3817 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3822 ret = wl1271_ps_elp_wakeup(wl);
3826 wl12xx_for_each_wlvif(wl, wlvif) {
3827 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3829 wl1271_warning("set rts threshold failed: %d", ret);
3831 wl1271_ps_elp_sleep(wl);
3834 mutex_unlock(&wl->mutex);
3839 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3842 const u8 *next, *end = skb->data + skb->len;
3843 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3844 skb->len - ieoffset);
3849 memmove(ie, next, end - next);
3850 skb_trim(skb, skb->len - len);
3853 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3854 unsigned int oui, u8 oui_type,
3858 const u8 *next, *end = skb->data + skb->len;
3859 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3860 skb->data + ieoffset,
3861 skb->len - ieoffset);
3866 memmove(ie, next, end - next);
3867 skb_trim(skb, skb->len - len);
3870 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3871 struct ieee80211_vif *vif)
3873 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3874 struct sk_buff *skb;
3877 skb = ieee80211_proberesp_get(wl->hw, vif);
3881 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3882 CMD_TEMPL_AP_PROBE_RESPONSE,
3891 wl1271_debug(DEBUG_AP, "probe response updated");
3892 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3898 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3899 struct ieee80211_vif *vif,
3901 size_t probe_rsp_len,
3904 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3905 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3906 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3907 int ssid_ie_offset, ie_offset, templ_len;
3910 /* no need to change probe response if the SSID is set correctly */
3911 if (wlvif->ssid_len > 0)
3912 return wl1271_cmd_template_set(wl, wlvif->role_id,
3913 CMD_TEMPL_AP_PROBE_RESPONSE,
3918 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3919 wl1271_error("probe_rsp template too big");
3923 /* start searching from IE offset */
3924 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3926 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3927 probe_rsp_len - ie_offset);
3929 wl1271_error("No SSID in beacon!");
3933 ssid_ie_offset = ptr - probe_rsp_data;
3934 ptr += (ptr[1] + 2);
3936 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3938 /* insert SSID from bss_conf */
3939 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3940 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3941 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3942 bss_conf->ssid, bss_conf->ssid_len);
3943 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3945 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3946 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3947 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3949 return wl1271_cmd_template_set(wl, wlvif->role_id,
3950 CMD_TEMPL_AP_PROBE_RESPONSE,
3956 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3957 struct ieee80211_vif *vif,
3958 struct ieee80211_bss_conf *bss_conf,
3961 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3964 if (changed & BSS_CHANGED_ERP_SLOT) {
3965 if (bss_conf->use_short_slot)
3966 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3968 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3970 wl1271_warning("Set slot time failed %d", ret);
3975 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3976 if (bss_conf->use_short_preamble)
3977 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3979 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3982 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3983 if (bss_conf->use_cts_prot)
3984 ret = wl1271_acx_cts_protect(wl, wlvif,
3987 ret = wl1271_acx_cts_protect(wl, wlvif,
3988 CTSPROTECT_DISABLE);
3990 wl1271_warning("Set ctsprotect failed %d", ret);
3999 static int wlcore_set_beacon_template(struct wl1271 *wl,
4000 struct ieee80211_vif *vif,
4003 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4004 struct ieee80211_hdr *hdr;
4007 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4008 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4016 wl1271_debug(DEBUG_MASTER, "beacon updated");
4018 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4020 dev_kfree_skb(beacon);
4023 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4024 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4026 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4031 dev_kfree_skb(beacon);
4035 wlvif->wmm_enabled =
4036 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4037 WLAN_OUI_TYPE_MICROSOFT_WMM,
4038 beacon->data + ieoffset,
4039 beacon->len - ieoffset);
4042 * In case we already have a probe-resp beacon set explicitly
4043 * by usermode, don't use the beacon data.
4045 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4048 /* remove TIM ie from probe response */
4049 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4052 * remove p2p ie from probe response.
4053 * the fw reponds to probe requests that don't include
4054 * the p2p ie. probe requests with p2p ie will be passed,
4055 * and will be responded by the supplicant (the spec
4056 * forbids including the p2p ie when responding to probe
4057 * requests that didn't include it).
4059 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4060 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4062 hdr = (struct ieee80211_hdr *) beacon->data;
4063 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4064 IEEE80211_STYPE_PROBE_RESP);
4066 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4071 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4072 CMD_TEMPL_PROBE_RESPONSE,
4077 dev_kfree_skb(beacon);
4085 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4086 struct ieee80211_vif *vif,
4087 struct ieee80211_bss_conf *bss_conf,
4090 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4091 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4094 if (changed & BSS_CHANGED_BEACON_INT) {
4095 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4096 bss_conf->beacon_int);
4098 wlvif->beacon_int = bss_conf->beacon_int;
4101 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4102 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4104 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4107 if (changed & BSS_CHANGED_BEACON) {
4108 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4112 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4114 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4121 wl1271_error("beacon info change failed: %d", ret);
4125 /* AP mode changes */
4126 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4127 struct ieee80211_vif *vif,
4128 struct ieee80211_bss_conf *bss_conf,
4131 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4134 if (changed & BSS_CHANGED_BASIC_RATES) {
4135 u32 rates = bss_conf->basic_rates;
4137 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4139 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4140 wlvif->basic_rate_set);
4142 ret = wl1271_init_ap_rates(wl, wlvif);
4144 wl1271_error("AP rate policy change failed %d", ret);
4148 ret = wl1271_ap_init_templates(wl, vif);
4152 /* No need to set probe resp template for mesh */
4153 if (!ieee80211_vif_is_mesh(vif)) {
4154 ret = wl1271_ap_set_probe_resp_tmpl(wl,
4161 ret = wlcore_set_beacon_template(wl, vif, true);
4166 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4170 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4171 if (bss_conf->enable_beacon) {
4172 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4173 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4177 ret = wl1271_ap_init_hwenc(wl, wlvif);
4181 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4182 wl1271_debug(DEBUG_AP, "started AP");
4185 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4187 * AP might be in ROC in case we have just
4188 * sent auth reply. handle it.
4190 if (test_bit(wlvif->role_id, wl->roc_map))
4191 wl12xx_croc(wl, wlvif->role_id);
4193 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4197 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4198 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4200 wl1271_debug(DEBUG_AP, "stopped AP");
4205 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4209 /* Handle HT information change */
4210 if ((changed & BSS_CHANGED_HT) &&
4211 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4212 ret = wl1271_acx_set_ht_information(wl, wlvif,
4213 bss_conf->ht_operation_mode);
4215 wl1271_warning("Set ht information failed %d", ret);
4224 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4225 struct ieee80211_bss_conf *bss_conf,
4231 wl1271_debug(DEBUG_MAC80211,
4232 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4233 bss_conf->bssid, bss_conf->aid,
4234 bss_conf->beacon_int,
4235 bss_conf->basic_rates, sta_rate_set);
4237 wlvif->beacon_int = bss_conf->beacon_int;
4238 rates = bss_conf->basic_rates;
4239 wlvif->basic_rate_set =
4240 wl1271_tx_enabled_rates_get(wl, rates,
4243 wl1271_tx_min_rate_get(wl,
4244 wlvif->basic_rate_set);
4248 wl1271_tx_enabled_rates_get(wl,
4252 /* we only support sched_scan while not connected */
4253 if (wl->sched_vif == wlvif)
4254 wl->ops->sched_scan_stop(wl, wlvif);
4256 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4260 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4264 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4268 wlcore_set_ssid(wl, wlvif);
4270 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4275 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4279 /* revert back to minimum rates for the current band */
4280 wl1271_set_band_rate(wl, wlvif);
4281 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4283 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4287 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4288 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4289 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4294 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4297 /* STA/IBSS mode changes */
4298 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4299 struct ieee80211_vif *vif,
4300 struct ieee80211_bss_conf *bss_conf,
4303 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4304 bool do_join = false;
4305 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4306 bool ibss_joined = false;
4307 u32 sta_rate_set = 0;
4309 struct ieee80211_sta *sta;
4310 bool sta_exists = false;
4311 struct ieee80211_sta_ht_cap sta_ht_cap;
4314 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4320 if (changed & BSS_CHANGED_IBSS) {
4321 if (bss_conf->ibss_joined) {
4322 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4325 wlcore_unset_assoc(wl, wlvif);
4326 wl12xx_cmd_role_stop_sta(wl, wlvif);
4330 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4333 /* Need to update the SSID (for filtering etc) */
4334 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4337 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4338 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4339 bss_conf->enable_beacon ? "enabled" : "disabled");
4344 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4345 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4347 if (changed & BSS_CHANGED_CQM) {
4348 bool enable = false;
4349 if (bss_conf->cqm_rssi_thold)
4351 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4352 bss_conf->cqm_rssi_thold,
4353 bss_conf->cqm_rssi_hyst);
4356 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4359 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4360 BSS_CHANGED_ASSOC)) {
4362 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4364 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4366 /* save the supp_rates of the ap */
4367 sta_rate_set = sta->supp_rates[wlvif->band];
4368 if (sta->ht_cap.ht_supported)
4370 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4371 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4372 sta_ht_cap = sta->ht_cap;
4379 if (changed & BSS_CHANGED_BSSID) {
4380 if (!is_zero_ether_addr(bss_conf->bssid)) {
4381 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4386 /* Need to update the BSSID (for filtering etc) */
4389 ret = wlcore_clear_bssid(wl, wlvif);
4395 if (changed & BSS_CHANGED_IBSS) {
4396 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4397 bss_conf->ibss_joined);
4399 if (bss_conf->ibss_joined) {
4400 u32 rates = bss_conf->basic_rates;
4401 wlvif->basic_rate_set =
4402 wl1271_tx_enabled_rates_get(wl, rates,
4405 wl1271_tx_min_rate_get(wl,
4406 wlvif->basic_rate_set);
4408 /* by default, use 11b + OFDM rates */
4409 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4410 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4416 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4417 /* enable beacon filtering */
4418 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4423 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4428 ret = wlcore_join(wl, wlvif);
4430 wl1271_warning("cmd join failed %d", ret);
4435 if (changed & BSS_CHANGED_ASSOC) {
4436 if (bss_conf->assoc) {
4437 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4442 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4443 wl12xx_set_authorized(wl, wlvif);
4445 wlcore_unset_assoc(wl, wlvif);
4449 if (changed & BSS_CHANGED_PS) {
4450 if ((bss_conf->ps) &&
4451 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4452 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4456 if (wl->conf.conn.forced_ps) {
4457 ps_mode = STATION_POWER_SAVE_MODE;
4458 ps_mode_str = "forced";
4460 ps_mode = STATION_AUTO_PS_MODE;
4461 ps_mode_str = "auto";
4464 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4466 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4468 wl1271_warning("enter %s ps failed %d",
4470 } else if (!bss_conf->ps &&
4471 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4472 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4474 ret = wl1271_ps_set_mode(wl, wlvif,
4475 STATION_ACTIVE_MODE);
4477 wl1271_warning("exit auto ps failed %d", ret);
4481 /* Handle new association with HT. Do this after join. */
4484 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4486 ret = wlcore_hw_set_peer_cap(wl,
4492 wl1271_warning("Set ht cap failed %d", ret);
4498 ret = wl1271_acx_set_ht_information(wl, wlvif,
4499 bss_conf->ht_operation_mode);
4501 wl1271_warning("Set ht information failed %d",
4508 /* Handle arp filtering. Done after join. */
4509 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4510 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4511 __be32 addr = bss_conf->arp_addr_list[0];
4512 wlvif->sta.qos = bss_conf->qos;
4513 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4515 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4516 wlvif->ip_addr = addr;
4518 * The template should have been configured only upon
4519 * association. however, it seems that the correct ip
4520 * isn't being set (when sending), so we have to
4521 * reconfigure the template upon every ip change.
4523 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4525 wl1271_warning("build arp rsp failed: %d", ret);
4529 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4530 (ACX_ARP_FILTER_ARP_FILTERING |
4531 ACX_ARP_FILTER_AUTO_ARP),
4535 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4546 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4547 struct ieee80211_vif *vif,
4548 struct ieee80211_bss_conf *bss_conf,
4551 struct wl1271 *wl = hw->priv;
4552 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4553 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4556 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4557 wlvif->role_id, (int)changed);
4560 * make sure to cancel pending disconnections if our association
4563 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4564 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4566 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4567 !bss_conf->enable_beacon)
4568 wl1271_tx_flush(wl);
4570 mutex_lock(&wl->mutex);
4572 if (unlikely(wl->state != WLCORE_STATE_ON))
4575 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4578 ret = wl1271_ps_elp_wakeup(wl);
4582 if ((changed & BSS_CHANGED_TXPOWER) &&
4583 bss_conf->txpower != wlvif->power_level) {
4585 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4589 wlvif->power_level = bss_conf->txpower;
4593 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4595 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4597 wl1271_ps_elp_sleep(wl);
4600 mutex_unlock(&wl->mutex);
4603 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4604 struct ieee80211_chanctx_conf *ctx)
4606 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4607 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4608 cfg80211_get_chandef_type(&ctx->def));
4612 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4613 struct ieee80211_chanctx_conf *ctx)
4615 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4616 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4617 cfg80211_get_chandef_type(&ctx->def));
4620 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4621 struct ieee80211_chanctx_conf *ctx,
4624 struct wl1271 *wl = hw->priv;
4625 struct wl12xx_vif *wlvif;
4627 int channel = ieee80211_frequency_to_channel(
4628 ctx->def.chan->center_freq);
4630 wl1271_debug(DEBUG_MAC80211,
4631 "mac80211 change chanctx %d (type %d) changed 0x%x",
4632 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4634 mutex_lock(&wl->mutex);
4636 ret = wl1271_ps_elp_wakeup(wl);
4640 wl12xx_for_each_wlvif(wl, wlvif) {
4641 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4644 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4650 /* start radar if needed */
4651 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4652 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4653 ctx->radar_enabled && !wlvif->radar_enabled &&
4654 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4655 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4656 wlcore_hw_set_cac(wl, wlvif, true);
4657 wlvif->radar_enabled = true;
4661 wl1271_ps_elp_sleep(wl);
4663 mutex_unlock(&wl->mutex);
4666 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4667 struct ieee80211_vif *vif,
4668 struct ieee80211_chanctx_conf *ctx)
4670 struct wl1271 *wl = hw->priv;
4671 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4672 int channel = ieee80211_frequency_to_channel(
4673 ctx->def.chan->center_freq);
4676 wl1271_debug(DEBUG_MAC80211,
4677 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4678 wlvif->role_id, channel,
4679 cfg80211_get_chandef_type(&ctx->def),
4680 ctx->radar_enabled, ctx->def.chan->dfs_state);
4682 mutex_lock(&wl->mutex);
4684 if (unlikely(wl->state != WLCORE_STATE_ON))
4687 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4690 ret = wl1271_ps_elp_wakeup(wl);
4694 wlvif->band = ctx->def.chan->band;
4695 wlvif->channel = channel;
4696 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4698 /* update default rates according to the band */
4699 wl1271_set_band_rate(wl, wlvif);
4701 if (ctx->radar_enabled &&
4702 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4703 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4704 wlcore_hw_set_cac(wl, wlvif, true);
4705 wlvif->radar_enabled = true;
4708 wl1271_ps_elp_sleep(wl);
4710 mutex_unlock(&wl->mutex);
4715 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4716 struct ieee80211_vif *vif,
4717 struct ieee80211_chanctx_conf *ctx)
4719 struct wl1271 *wl = hw->priv;
4720 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4723 wl1271_debug(DEBUG_MAC80211,
4724 "mac80211 unassign chanctx (role %d) %d (type %d)",
4726 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4727 cfg80211_get_chandef_type(&ctx->def));
4729 wl1271_tx_flush(wl);
4731 mutex_lock(&wl->mutex);
4733 if (unlikely(wl->state != WLCORE_STATE_ON))
4736 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4739 ret = wl1271_ps_elp_wakeup(wl);
4743 if (wlvif->radar_enabled) {
4744 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4745 wlcore_hw_set_cac(wl, wlvif, false);
4746 wlvif->radar_enabled = false;
4749 wl1271_ps_elp_sleep(wl);
4751 mutex_unlock(&wl->mutex);
4754 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4755 struct wl12xx_vif *wlvif,
4756 struct ieee80211_chanctx_conf *new_ctx)
4758 int channel = ieee80211_frequency_to_channel(
4759 new_ctx->def.chan->center_freq);
4761 wl1271_debug(DEBUG_MAC80211,
4762 "switch vif (role %d) %d -> %d chan_type: %d",
4763 wlvif->role_id, wlvif->channel, channel,
4764 cfg80211_get_chandef_type(&new_ctx->def));
4766 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4769 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4771 if (wlvif->radar_enabled) {
4772 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4773 wlcore_hw_set_cac(wl, wlvif, false);
4774 wlvif->radar_enabled = false;
4777 wlvif->band = new_ctx->def.chan->band;
4778 wlvif->channel = channel;
4779 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4781 /* start radar if needed */
4782 if (new_ctx->radar_enabled) {
4783 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4784 wlcore_hw_set_cac(wl, wlvif, true);
4785 wlvif->radar_enabled = true;
4792 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4793 struct ieee80211_vif_chanctx_switch *vifs,
4795 enum ieee80211_chanctx_switch_mode mode)
4797 struct wl1271 *wl = hw->priv;
4800 wl1271_debug(DEBUG_MAC80211,
4801 "mac80211 switch chanctx n_vifs %d mode %d",
4804 mutex_lock(&wl->mutex);
4806 ret = wl1271_ps_elp_wakeup(wl);
4810 for (i = 0; i < n_vifs; i++) {
4811 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4813 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4818 wl1271_ps_elp_sleep(wl);
4820 mutex_unlock(&wl->mutex);
4825 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4826 struct ieee80211_vif *vif, u16 queue,
4827 const struct ieee80211_tx_queue_params *params)
4829 struct wl1271 *wl = hw->priv;
4830 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4834 if (wlcore_is_p2p_mgmt(wlvif))
4837 mutex_lock(&wl->mutex);
4839 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4842 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4844 ps_scheme = CONF_PS_SCHEME_LEGACY;
4846 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4849 ret = wl1271_ps_elp_wakeup(wl);
4854 * the txop is confed in units of 32us by the mac80211,
4857 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4858 params->cw_min, params->cw_max,
4859 params->aifs, params->txop << 5);
4863 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4864 CONF_CHANNEL_TYPE_EDCF,
4865 wl1271_tx_get_queue(queue),
4866 ps_scheme, CONF_ACK_POLICY_LEGACY,
4870 wl1271_ps_elp_sleep(wl);
4873 mutex_unlock(&wl->mutex);
4878 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4879 struct ieee80211_vif *vif)
4882 struct wl1271 *wl = hw->priv;
4883 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4884 u64 mactime = ULLONG_MAX;
4887 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4889 mutex_lock(&wl->mutex);
4891 if (unlikely(wl->state != WLCORE_STATE_ON))
4894 ret = wl1271_ps_elp_wakeup(wl);
4898 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4903 wl1271_ps_elp_sleep(wl);
4906 mutex_unlock(&wl->mutex);
4910 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4911 struct survey_info *survey)
4913 struct ieee80211_conf *conf = &hw->conf;
4918 survey->channel = conf->chandef.chan;
4923 static int wl1271_allocate_sta(struct wl1271 *wl,
4924 struct wl12xx_vif *wlvif,
4925 struct ieee80211_sta *sta)
4927 struct wl1271_station *wl_sta;
4931 if (wl->active_sta_count >= wl->max_ap_stations) {
4932 wl1271_warning("could not allocate HLID - too much stations");
4936 wl_sta = (struct wl1271_station *)sta->drv_priv;
4937 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4939 wl1271_warning("could not allocate HLID - too many links");
4943 /* use the previous security seq, if this is a recovery/resume */
4944 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4946 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4947 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4948 wl->active_sta_count++;
4952 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4954 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4957 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4958 __clear_bit(hlid, &wl->ap_ps_map);
4959 __clear_bit(hlid, &wl->ap_fw_ps_map);
4962 * save the last used PN in the private part of iee80211_sta,
4963 * in case of recovery/suspend
4965 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4967 wl12xx_free_link(wl, wlvif, &hlid);
4968 wl->active_sta_count--;
4971 * rearm the tx watchdog when the last STA is freed - give the FW a
4972 * chance to return STA-buffered packets before complaining.
4974 if (wl->active_sta_count == 0)
4975 wl12xx_rearm_tx_watchdog_locked(wl);
4978 static int wl12xx_sta_add(struct wl1271 *wl,
4979 struct wl12xx_vif *wlvif,
4980 struct ieee80211_sta *sta)
4982 struct wl1271_station *wl_sta;
4986 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4988 ret = wl1271_allocate_sta(wl, wlvif, sta);
4992 wl_sta = (struct wl1271_station *)sta->drv_priv;
4993 hlid = wl_sta->hlid;
4995 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4997 wl1271_free_sta(wl, wlvif, hlid);
5002 static int wl12xx_sta_remove(struct wl1271 *wl,
5003 struct wl12xx_vif *wlvif,
5004 struct ieee80211_sta *sta)
5006 struct wl1271_station *wl_sta;
5009 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5011 wl_sta = (struct wl1271_station *)sta->drv_priv;
5013 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5016 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5020 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5024 static void wlcore_roc_if_possible(struct wl1271 *wl,
5025 struct wl12xx_vif *wlvif)
5027 if (find_first_bit(wl->roc_map,
5028 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5031 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5034 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5038 * when wl_sta is NULL, we treat this call as if coming from a
5039 * pending auth reply.
5040 * wl->mutex must be taken and the FW must be awake when the call
5043 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5044 struct wl1271_station *wl_sta, bool in_conn)
5047 if (WARN_ON(wl_sta && wl_sta->in_connection))
5050 if (!wlvif->ap_pending_auth_reply &&
5051 !wlvif->inconn_count)
5052 wlcore_roc_if_possible(wl, wlvif);
5055 wl_sta->in_connection = true;
5056 wlvif->inconn_count++;
5058 wlvif->ap_pending_auth_reply = true;
5061 if (wl_sta && !wl_sta->in_connection)
5064 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5067 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5071 wl_sta->in_connection = false;
5072 wlvif->inconn_count--;
5074 wlvif->ap_pending_auth_reply = false;
5077 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5078 test_bit(wlvif->role_id, wl->roc_map))
5079 wl12xx_croc(wl, wlvif->role_id);
5083 static int wl12xx_update_sta_state(struct wl1271 *wl,
5084 struct wl12xx_vif *wlvif,
5085 struct ieee80211_sta *sta,
5086 enum ieee80211_sta_state old_state,
5087 enum ieee80211_sta_state new_state)
5089 struct wl1271_station *wl_sta;
5090 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5091 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5094 wl_sta = (struct wl1271_station *)sta->drv_priv;
5096 /* Add station (AP mode) */
5098 old_state == IEEE80211_STA_NOTEXIST &&
5099 new_state == IEEE80211_STA_NONE) {
5100 ret = wl12xx_sta_add(wl, wlvif, sta);
5104 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5107 /* Remove station (AP mode) */
5109 old_state == IEEE80211_STA_NONE &&
5110 new_state == IEEE80211_STA_NOTEXIST) {
5112 wl12xx_sta_remove(wl, wlvif, sta);
5114 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5117 /* Authorize station (AP mode) */
5119 new_state == IEEE80211_STA_AUTHORIZED) {
5120 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5124 /* reconfigure rates */
5125 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5129 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5134 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5137 /* Authorize station */
5139 new_state == IEEE80211_STA_AUTHORIZED) {
5140 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5141 ret = wl12xx_set_authorized(wl, wlvif);
5147 old_state == IEEE80211_STA_AUTHORIZED &&
5148 new_state == IEEE80211_STA_ASSOC) {
5149 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5150 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5153 /* save seq number on disassoc (suspend) */
5155 old_state == IEEE80211_STA_ASSOC &&
5156 new_state == IEEE80211_STA_AUTH) {
5157 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5158 wlvif->total_freed_pkts = 0;
5161 /* restore seq number on assoc (resume) */
5163 old_state == IEEE80211_STA_AUTH &&
5164 new_state == IEEE80211_STA_ASSOC) {
5165 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5168 /* clear ROCs on failure or authorization */
5170 (new_state == IEEE80211_STA_AUTHORIZED ||
5171 new_state == IEEE80211_STA_NOTEXIST)) {
5172 if (test_bit(wlvif->role_id, wl->roc_map))
5173 wl12xx_croc(wl, wlvif->role_id);
5177 old_state == IEEE80211_STA_NOTEXIST &&
5178 new_state == IEEE80211_STA_NONE) {
5179 if (find_first_bit(wl->roc_map,
5180 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5181 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5182 wl12xx_roc(wl, wlvif, wlvif->role_id,
5183 wlvif->band, wlvif->channel);
5189 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5190 struct ieee80211_vif *vif,
5191 struct ieee80211_sta *sta,
5192 enum ieee80211_sta_state old_state,
5193 enum ieee80211_sta_state new_state)
5195 struct wl1271 *wl = hw->priv;
5196 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5199 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5200 sta->aid, old_state, new_state);
5202 mutex_lock(&wl->mutex);
5204 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5209 ret = wl1271_ps_elp_wakeup(wl);
5213 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5215 wl1271_ps_elp_sleep(wl);
5217 mutex_unlock(&wl->mutex);
5218 if (new_state < old_state)
5223 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5224 struct ieee80211_vif *vif,
5225 struct ieee80211_ampdu_params *params)
5227 struct wl1271 *wl = hw->priv;
5228 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5230 u8 hlid, *ba_bitmap;
5231 struct ieee80211_sta *sta = params->sta;
5232 enum ieee80211_ampdu_mlme_action action = params->action;
5233 u16 tid = params->tid;
5234 u16 *ssn = ¶ms->ssn;
5236 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5239 /* sanity check - the fields in FW are only 8bits wide */
5240 if (WARN_ON(tid > 0xFF))
5243 mutex_lock(&wl->mutex);
5245 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5250 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5251 hlid = wlvif->sta.hlid;
5252 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5253 struct wl1271_station *wl_sta;
5255 wl_sta = (struct wl1271_station *)sta->drv_priv;
5256 hlid = wl_sta->hlid;
5262 ba_bitmap = &wl->links[hlid].ba_bitmap;
5264 ret = wl1271_ps_elp_wakeup(wl);
5268 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5272 case IEEE80211_AMPDU_RX_START:
5273 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5278 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5280 wl1271_error("exceeded max RX BA sessions");
5284 if (*ba_bitmap & BIT(tid)) {
5286 wl1271_error("cannot enable RX BA session on active "
5291 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5296 *ba_bitmap |= BIT(tid);
5297 wl->ba_rx_session_count++;
5301 case IEEE80211_AMPDU_RX_STOP:
5302 if (!(*ba_bitmap & BIT(tid))) {
5304 * this happens on reconfig - so only output a debug
5305 * message for now, and don't fail the function.
5307 wl1271_debug(DEBUG_MAC80211,
5308 "no active RX BA session on tid: %d",
5314 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5317 *ba_bitmap &= ~BIT(tid);
5318 wl->ba_rx_session_count--;
5323 * The BA initiator session management in FW independently.
5324 * Falling break here on purpose for all TX APDU commands.
5326 case IEEE80211_AMPDU_TX_START:
5327 case IEEE80211_AMPDU_TX_STOP_CONT:
5328 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5329 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5330 case IEEE80211_AMPDU_TX_OPERATIONAL:
5335 wl1271_error("Incorrect ampdu action id=%x\n", action);
5339 wl1271_ps_elp_sleep(wl);
5342 mutex_unlock(&wl->mutex);
5347 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5348 struct ieee80211_vif *vif,
5349 const struct cfg80211_bitrate_mask *mask)
5351 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5352 struct wl1271 *wl = hw->priv;
5355 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5356 mask->control[NL80211_BAND_2GHZ].legacy,
5357 mask->control[NL80211_BAND_5GHZ].legacy);
5359 mutex_lock(&wl->mutex);
5361 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5362 wlvif->bitrate_masks[i] =
5363 wl1271_tx_enabled_rates_get(wl,
5364 mask->control[i].legacy,
5367 if (unlikely(wl->state != WLCORE_STATE_ON))
5370 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5371 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5373 ret = wl1271_ps_elp_wakeup(wl);
5377 wl1271_set_band_rate(wl, wlvif);
5379 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5380 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5382 wl1271_ps_elp_sleep(wl);
5385 mutex_unlock(&wl->mutex);
5390 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5391 struct ieee80211_vif *vif,
5392 struct ieee80211_channel_switch *ch_switch)
5394 struct wl1271 *wl = hw->priv;
5395 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5398 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5400 wl1271_tx_flush(wl);
5402 mutex_lock(&wl->mutex);
5404 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5405 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5406 ieee80211_chswitch_done(vif, false);
5408 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5412 ret = wl1271_ps_elp_wakeup(wl);
5416 /* TODO: change mac80211 to pass vif as param */
5418 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5419 unsigned long delay_usec;
5421 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5425 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5427 /* indicate failure 5 seconds after channel switch time */
5428 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5430 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5431 usecs_to_jiffies(delay_usec) +
5432 msecs_to_jiffies(5000));
5436 wl1271_ps_elp_sleep(wl);
5439 mutex_unlock(&wl->mutex);
5442 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5443 struct wl12xx_vif *wlvif,
5446 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5447 struct sk_buff *beacon =
5448 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5453 return cfg80211_find_ie(eid,
5454 beacon->data + ieoffset,
5455 beacon->len - ieoffset);
5458 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5462 const struct ieee80211_channel_sw_ie *ie_csa;
5464 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5468 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5469 *csa_count = ie_csa->count;
5474 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5475 struct ieee80211_vif *vif,
5476 struct cfg80211_chan_def *chandef)
5478 struct wl1271 *wl = hw->priv;
5479 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5480 struct ieee80211_channel_switch ch_switch = {
5482 .chandef = *chandef,
5486 wl1271_debug(DEBUG_MAC80211,
5487 "mac80211 channel switch beacon (role %d)",
5490 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5492 wl1271_error("error getting beacon (for CSA counter)");
5496 mutex_lock(&wl->mutex);
5498 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5503 ret = wl1271_ps_elp_wakeup(wl);
5507 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5511 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5514 wl1271_ps_elp_sleep(wl);
5516 mutex_unlock(&wl->mutex);
5519 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5520 u32 queues, bool drop)
5522 struct wl1271 *wl = hw->priv;
5524 wl1271_tx_flush(wl);
5527 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5528 struct ieee80211_vif *vif,
5529 struct ieee80211_channel *chan,
5531 enum ieee80211_roc_type type)
5533 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5534 struct wl1271 *wl = hw->priv;
5535 int channel, active_roc, ret = 0;
5537 channel = ieee80211_frequency_to_channel(chan->center_freq);
5539 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5540 channel, wlvif->role_id);
5542 mutex_lock(&wl->mutex);
5544 if (unlikely(wl->state != WLCORE_STATE_ON))
5547 /* return EBUSY if we can't ROC right now */
5548 active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5549 if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5550 wl1271_warning("active roc on role %d", active_roc);
5555 ret = wl1271_ps_elp_wakeup(wl);
5559 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5564 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5565 msecs_to_jiffies(duration));
5567 wl1271_ps_elp_sleep(wl);
5569 mutex_unlock(&wl->mutex);
5573 static int __wlcore_roc_completed(struct wl1271 *wl)
5575 struct wl12xx_vif *wlvif;
5578 /* already completed */
5579 if (unlikely(!wl->roc_vif))
5582 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5584 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5587 ret = wl12xx_stop_dev(wl, wlvif);
5596 static int wlcore_roc_completed(struct wl1271 *wl)
5600 wl1271_debug(DEBUG_MAC80211, "roc complete");
5602 mutex_lock(&wl->mutex);
5604 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5609 ret = wl1271_ps_elp_wakeup(wl);
5613 ret = __wlcore_roc_completed(wl);
5615 wl1271_ps_elp_sleep(wl);
5617 mutex_unlock(&wl->mutex);
5622 static void wlcore_roc_complete_work(struct work_struct *work)
5624 struct delayed_work *dwork;
5628 dwork = to_delayed_work(work);
5629 wl = container_of(dwork, struct wl1271, roc_complete_work);
5631 ret = wlcore_roc_completed(wl);
5633 ieee80211_remain_on_channel_expired(wl->hw);
5636 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5638 struct wl1271 *wl = hw->priv;
5640 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5643 wl1271_tx_flush(wl);
5646 * we can't just flush_work here, because it might deadlock
5647 * (as we might get called from the same workqueue)
5649 cancel_delayed_work_sync(&wl->roc_complete_work);
5650 wlcore_roc_completed(wl);
5655 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5656 struct ieee80211_vif *vif,
5657 struct ieee80211_sta *sta,
5660 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5662 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5664 if (!(changed & IEEE80211_RC_BW_CHANGED))
5667 /* this callback is atomic, so schedule a new work */
5668 wlvif->rc_update_bw = sta->bandwidth;
5669 memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5670 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5673 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5674 struct ieee80211_vif *vif,
5675 struct ieee80211_sta *sta,
5676 struct station_info *sinfo)
5678 struct wl1271 *wl = hw->priv;
5679 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5683 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5685 mutex_lock(&wl->mutex);
5687 if (unlikely(wl->state != WLCORE_STATE_ON))
5690 ret = wl1271_ps_elp_wakeup(wl);
5694 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5698 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5699 sinfo->signal = rssi_dbm;
5702 wl1271_ps_elp_sleep(wl);
5705 mutex_unlock(&wl->mutex);
5708 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5709 struct ieee80211_sta *sta)
5711 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5712 struct wl1271 *wl = hw->priv;
5713 u8 hlid = wl_sta->hlid;
5715 /* return in units of Kbps */
5716 return (wl->links[hlid].fw_rate_mbps * 1000);
5719 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5721 struct wl1271 *wl = hw->priv;
5724 mutex_lock(&wl->mutex);
5726 if (unlikely(wl->state != WLCORE_STATE_ON))
5729 /* packets are considered pending if in the TX queue or the FW */
5730 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5732 mutex_unlock(&wl->mutex);
5737 /* can't be const, mac80211 writes to this */
5738 static struct ieee80211_rate wl1271_rates[] = {
5740 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5741 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5743 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5744 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5745 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5747 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5748 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5749 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5751 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5752 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5753 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5755 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5756 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5758 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5759 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5761 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5762 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5764 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5765 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5767 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5768 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5770 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5771 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5773 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5774 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5776 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5777 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5780 /* can't be const, mac80211 writes to this */
5781 static struct ieee80211_channel wl1271_channels[] = {
5782 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5783 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5784 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5785 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5786 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5787 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5788 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5789 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5790 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5791 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5792 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5793 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5794 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5795 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5798 /* can't be const, mac80211 writes to this */
5799 static struct ieee80211_supported_band wl1271_band_2ghz = {
5800 .channels = wl1271_channels,
5801 .n_channels = ARRAY_SIZE(wl1271_channels),
5802 .bitrates = wl1271_rates,
5803 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5806 /* 5 GHz data rates for WL1273 */
5807 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5809 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5810 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5812 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5813 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5815 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5816 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5818 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5819 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5821 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5822 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5824 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5825 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5827 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5828 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5830 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5831 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5834 /* 5 GHz band channels for WL1273 */
5835 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5836 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5837 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5838 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5839 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5840 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5841 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5842 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5843 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5844 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5845 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5846 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5847 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5848 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5849 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5850 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5851 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5852 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5853 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5854 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5855 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5856 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5857 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5858 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5859 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5860 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5861 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5862 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5863 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5864 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5865 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5866 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5869 static struct ieee80211_supported_band wl1271_band_5ghz = {
5870 .channels = wl1271_channels_5ghz,
5871 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5872 .bitrates = wl1271_rates_5ghz,
5873 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5876 static const struct ieee80211_ops wl1271_ops = {
5877 .start = wl1271_op_start,
5878 .stop = wlcore_op_stop,
5879 .add_interface = wl1271_op_add_interface,
5880 .remove_interface = wl1271_op_remove_interface,
5881 .change_interface = wl12xx_op_change_interface,
5883 .suspend = wl1271_op_suspend,
5884 .resume = wl1271_op_resume,
5886 .config = wl1271_op_config,
5887 .prepare_multicast = wl1271_op_prepare_multicast,
5888 .configure_filter = wl1271_op_configure_filter,
5890 .set_key = wlcore_op_set_key,
5891 .hw_scan = wl1271_op_hw_scan,
5892 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5893 .sched_scan_start = wl1271_op_sched_scan_start,
5894 .sched_scan_stop = wl1271_op_sched_scan_stop,
5895 .bss_info_changed = wl1271_op_bss_info_changed,
5896 .set_frag_threshold = wl1271_op_set_frag_threshold,
5897 .set_rts_threshold = wl1271_op_set_rts_threshold,
5898 .conf_tx = wl1271_op_conf_tx,
5899 .get_tsf = wl1271_op_get_tsf,
5900 .get_survey = wl1271_op_get_survey,
5901 .sta_state = wl12xx_op_sta_state,
5902 .ampdu_action = wl1271_op_ampdu_action,
5903 .tx_frames_pending = wl1271_tx_frames_pending,
5904 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5905 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5906 .channel_switch = wl12xx_op_channel_switch,
5907 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
5908 .flush = wlcore_op_flush,
5909 .remain_on_channel = wlcore_op_remain_on_channel,
5910 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5911 .add_chanctx = wlcore_op_add_chanctx,
5912 .remove_chanctx = wlcore_op_remove_chanctx,
5913 .change_chanctx = wlcore_op_change_chanctx,
5914 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5915 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5916 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5917 .sta_rc_update = wlcore_op_sta_rc_update,
5918 .sta_statistics = wlcore_op_sta_statistics,
5919 .get_expected_throughput = wlcore_op_get_expected_throughput,
5920 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5924 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
5930 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5931 wl1271_error("Illegal RX rate from HW: %d", rate);
5935 idx = wl->band_rate_to_idx[band][rate];
5936 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5937 wl1271_error("Unsupported RX rate from HW: %d", rate);
5944 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5948 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5951 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5952 wl1271_warning("NIC part of the MAC address wraps around!");
5954 for (i = 0; i < wl->num_mac_addr; i++) {
5955 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5956 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5957 wl->addresses[i].addr[2] = (u8) oui;
5958 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5959 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5960 wl->addresses[i].addr[5] = (u8) nic;
5964 /* we may be one address short at the most */
5965 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5968 * turn on the LAA bit in the first address and use it as
5971 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5972 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5973 memcpy(&wl->addresses[idx], &wl->addresses[0],
5974 sizeof(wl->addresses[0]));
5976 wl->addresses[idx].addr[0] |= BIT(1);
5979 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5980 wl->hw->wiphy->addresses = wl->addresses;
5983 static int wl12xx_get_hw_info(struct wl1271 *wl)
5987 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5991 wl->fuse_oui_addr = 0;
5992 wl->fuse_nic_addr = 0;
5994 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5998 if (wl->ops->get_mac)
5999 ret = wl->ops->get_mac(wl);
6005 static int wl1271_register_hw(struct wl1271 *wl)
6008 u32 oui_addr = 0, nic_addr = 0;
6009 struct platform_device *pdev = wl->pdev;
6010 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6012 if (wl->mac80211_registered)
6015 if (wl->nvs_len >= 12) {
6016 /* NOTE: The wl->nvs->nvs element must be first, in
6017 * order to simplify the casting, we assume it is at
6018 * the beginning of the wl->nvs structure.
6020 u8 *nvs_ptr = (u8 *)wl->nvs;
6023 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6025 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6028 /* if the MAC address is zeroed in the NVS derive from fuse */
6029 if (oui_addr == 0 && nic_addr == 0) {
6030 oui_addr = wl->fuse_oui_addr;
6031 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6032 nic_addr = wl->fuse_nic_addr + 1;
6035 if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6036 wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.\n");
6037 if (!strcmp(pdev_data->family->name, "wl18xx")) {
6038 wl1271_warning("This default nvs file can be removed from the file system\n");
6040 wl1271_warning("Your device performance is not optimized.\n");
6041 wl1271_warning("Please use the calibrator tool to configure your device.\n");
6044 if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6045 wl1271_warning("Fuse mac address is zero. using random mac\n");
6046 /* Use TI oui and a random nic */
6047 oui_addr = WLCORE_TI_OUI_ADDRESS;
6048 nic_addr = get_random_int();
6050 oui_addr = wl->fuse_oui_addr;
6051 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6052 nic_addr = wl->fuse_nic_addr + 1;
6056 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6058 ret = ieee80211_register_hw(wl->hw);
6060 wl1271_error("unable to register mac80211 hw: %d", ret);
6064 wl->mac80211_registered = true;
6066 wl1271_debugfs_init(wl);
6068 wl1271_notice("loaded");
6074 static void wl1271_unregister_hw(struct wl1271 *wl)
6077 wl1271_plt_stop(wl);
6079 ieee80211_unregister_hw(wl->hw);
6080 wl->mac80211_registered = false;
6084 static int wl1271_init_ieee80211(struct wl1271 *wl)
6087 static const u32 cipher_suites[] = {
6088 WLAN_CIPHER_SUITE_WEP40,
6089 WLAN_CIPHER_SUITE_WEP104,
6090 WLAN_CIPHER_SUITE_TKIP,
6091 WLAN_CIPHER_SUITE_CCMP,
6092 WL1271_CIPHER_SUITE_GEM,
6095 /* The tx descriptor buffer */
6096 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6098 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6099 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6102 /* FIXME: find a proper value */
6103 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6105 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6106 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6107 ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6108 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6109 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6110 ieee80211_hw_set(wl->hw, AP_LINK_PS);
6111 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6112 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6113 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6114 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6115 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6116 ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6117 ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6118 ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6120 wl->hw->wiphy->cipher_suites = cipher_suites;
6121 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6123 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6124 BIT(NL80211_IFTYPE_AP) |
6125 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6126 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6127 #ifdef CONFIG_MAC80211_MESH
6128 BIT(NL80211_IFTYPE_MESH_POINT) |
6130 BIT(NL80211_IFTYPE_P2P_GO);
6132 wl->hw->wiphy->max_scan_ssids = 1;
6133 wl->hw->wiphy->max_sched_scan_ssids = 16;
6134 wl->hw->wiphy->max_match_sets = 16;
6136 * Maximum length of elements in scanning probe request templates
6137 * should be the maximum length possible for a template, without
6138 * the IEEE80211 header of the template
6140 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6141 sizeof(struct ieee80211_header);
6143 wl->hw->wiphy->max_sched_scan_reqs = 1;
6144 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6145 sizeof(struct ieee80211_header);
6147 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6149 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6150 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6151 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6153 wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6155 /* make sure all our channels fit in the scanned_ch bitmask */
6156 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6157 ARRAY_SIZE(wl1271_channels_5ghz) >
6158 WL1271_MAX_CHANNELS);
6160 * clear channel flags from the previous usage
6161 * and restore max_power & max_antenna_gain values.
6163 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6164 wl1271_band_2ghz.channels[i].flags = 0;
6165 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6166 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6169 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6170 wl1271_band_5ghz.channels[i].flags = 0;
6171 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6172 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6176 * We keep local copies of the band structs because we need to
6177 * modify them on a per-device basis.
6179 memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6180 sizeof(wl1271_band_2ghz));
6181 memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6182 &wl->ht_cap[NL80211_BAND_2GHZ],
6183 sizeof(*wl->ht_cap));
6184 memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6185 sizeof(wl1271_band_5ghz));
6186 memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6187 &wl->ht_cap[NL80211_BAND_5GHZ],
6188 sizeof(*wl->ht_cap));
6190 wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6191 &wl->bands[NL80211_BAND_2GHZ];
6192 wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6193 &wl->bands[NL80211_BAND_5GHZ];
6196 * allow 4 queues per mac address we support +
6197 * 1 cab queue per mac + one global offchannel Tx queue
6199 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6201 /* the last queue is the offchannel queue */
6202 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6203 wl->hw->max_rates = 1;
6205 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6207 /* the FW answers probe-requests in AP-mode */
6208 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6209 wl->hw->wiphy->probe_resp_offload =
6210 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6211 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6212 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6214 /* allowed interface combinations */
6215 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6216 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6218 /* register vendor commands */
6219 wlcore_set_vendor_commands(wl->hw->wiphy);
6221 SET_IEEE80211_DEV(wl->hw, wl->dev);
6223 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6224 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6226 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6231 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6234 struct ieee80211_hw *hw;
6239 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6241 wl1271_error("could not alloc ieee80211_hw");
6247 memset(wl, 0, sizeof(*wl));
6249 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6251 wl1271_error("could not alloc wl priv");
6253 goto err_priv_alloc;
6256 INIT_LIST_HEAD(&wl->wlvif_list);
6261 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6262 * we don't allocate any additional resource here, so that's fine.
6264 for (i = 0; i < NUM_TX_QUEUES; i++)
6265 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6266 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6268 skb_queue_head_init(&wl->deferred_rx_queue);
6269 skb_queue_head_init(&wl->deferred_tx_queue);
6271 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6272 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6273 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6274 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6275 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6276 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6277 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6279 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6280 if (!wl->freezable_wq) {
6287 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6288 wl->band = NL80211_BAND_2GHZ;
6289 wl->channel_type = NL80211_CHAN_NO_HT;
6291 wl->sg_enabled = true;
6292 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6293 wl->recovery_count = 0;
6296 wl->ap_fw_ps_map = 0;
6298 wl->system_hlid = WL12XX_SYSTEM_HLID;
6299 wl->active_sta_count = 0;
6300 wl->active_link_count = 0;
6303 /* The system link is always allocated */
6304 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6306 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6307 for (i = 0; i < wl->num_tx_desc; i++)
6308 wl->tx_frames[i] = NULL;
6310 spin_lock_init(&wl->wl_lock);
6312 wl->state = WLCORE_STATE_OFF;
6313 wl->fw_type = WL12XX_FW_TYPE_NONE;
6314 mutex_init(&wl->mutex);
6315 mutex_init(&wl->flush_mutex);
6316 init_completion(&wl->nvs_loading_complete);
6318 order = get_order(aggr_buf_size);
6319 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6320 if (!wl->aggr_buf) {
6324 wl->aggr_buf_size = aggr_buf_size;
6326 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6327 if (!wl->dummy_packet) {
6332 /* Allocate one page for the FW log */
6333 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6336 goto err_dummy_packet;
6339 wl->mbox_size = mbox_size;
6340 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6346 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6347 if (!wl->buffer_32) {
6358 free_page((unsigned long)wl->fwlog);
6361 dev_kfree_skb(wl->dummy_packet);
6364 free_pages((unsigned long)wl->aggr_buf, order);
6367 destroy_workqueue(wl->freezable_wq);
6370 wl1271_debugfs_exit(wl);
6374 ieee80211_free_hw(hw);
6378 return ERR_PTR(ret);
6380 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6382 int wlcore_free_hw(struct wl1271 *wl)
6384 /* Unblock any fwlog readers */
6385 mutex_lock(&wl->mutex);
6386 wl->fwlog_size = -1;
6387 mutex_unlock(&wl->mutex);
6389 wlcore_sysfs_free(wl);
6391 kfree(wl->buffer_32);
6393 free_page((unsigned long)wl->fwlog);
6394 dev_kfree_skb(wl->dummy_packet);
6395 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6397 wl1271_debugfs_exit(wl);
6401 wl->fw_type = WL12XX_FW_TYPE_NONE;
6405 kfree(wl->raw_fw_status);
6406 kfree(wl->fw_status);
6407 kfree(wl->tx_res_if);
6408 destroy_workqueue(wl->freezable_wq);
6411 ieee80211_free_hw(wl->hw);
6415 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6418 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6419 .flags = WIPHY_WOWLAN_ANY,
6420 .n_patterns = WL1271_MAX_RX_FILTERS,
6421 .pattern_min_len = 1,
6422 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6426 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6428 return IRQ_WAKE_THREAD;
6431 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6433 struct wl1271 *wl = context;
6434 struct platform_device *pdev = wl->pdev;
6435 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6436 struct resource *res;
6439 irq_handler_t hardirq_fn = NULL;
6442 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6444 wl1271_error("Could not allocate nvs data");
6447 wl->nvs_len = fw->size;
6448 } else if (pdev_data->family->nvs_name) {
6449 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6450 pdev_data->family->nvs_name);
6458 ret = wl->ops->setup(wl);
6462 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6464 /* adjust some runtime configuration parameters */
6465 wlcore_adjust_conf(wl);
6467 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6469 wl1271_error("Could not get IRQ resource");
6473 wl->irq = res->start;
6474 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6475 wl->if_ops = pdev_data->if_ops;
6477 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6478 hardirq_fn = wlcore_hardirq;
6480 wl->irq_flags |= IRQF_ONESHOT;
6482 ret = wl12xx_set_power_on(wl);
6486 ret = wl12xx_get_hw_info(wl);
6488 wl1271_error("couldn't get hw info");
6489 wl1271_power_off(wl);
6493 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6494 wl->irq_flags, pdev->name, wl);
6496 wl1271_error("interrupt configuration failed");
6497 wl1271_power_off(wl);
6502 ret = enable_irq_wake(wl->irq);
6504 wl->irq_wake_enabled = true;
6505 device_init_wakeup(wl->dev, 1);
6506 if (pdev_data->pwr_in_suspend)
6507 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6510 disable_irq(wl->irq);
6511 wl1271_power_off(wl);
6513 ret = wl->ops->identify_chip(wl);
6517 ret = wl1271_init_ieee80211(wl);
6521 ret = wl1271_register_hw(wl);
6525 ret = wlcore_sysfs_init(wl);
6529 wl->initialized = true;
6533 wl1271_unregister_hw(wl);
6536 free_irq(wl->irq, wl);
6542 release_firmware(fw);
6543 complete_all(&wl->nvs_loading_complete);
6546 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6548 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6549 const char *nvs_name;
6552 if (!wl->ops || !wl->ptable || !pdev_data)
6555 wl->dev = &pdev->dev;
6557 platform_set_drvdata(pdev, wl);
6559 if (pdev_data->family && pdev_data->family->nvs_name) {
6560 nvs_name = pdev_data->family->nvs_name;
6561 ret = reject_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6562 nvs_name, &pdev->dev, GFP_KERNEL,
6565 wl1271_error("request_firmware_nowait failed for %s: %d",
6567 complete_all(&wl->nvs_loading_complete);
6570 wlcore_nvs_cb(NULL, wl);
6575 EXPORT_SYMBOL_GPL(wlcore_probe);
6577 int wlcore_remove(struct platform_device *pdev)
6579 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6580 struct wl1271 *wl = platform_get_drvdata(pdev);
6582 if (pdev_data->family && pdev_data->family->nvs_name)
6583 wait_for_completion(&wl->nvs_loading_complete);
6584 if (!wl->initialized)
6587 if (wl->irq_wake_enabled) {
6588 device_init_wakeup(wl->dev, 0);
6589 disable_irq_wake(wl->irq);
6591 wl1271_unregister_hw(wl);
6592 free_irq(wl->irq, wl);
6597 EXPORT_SYMBOL_GPL(wlcore_remove);
6599 u32 wl12xx_debug_level = DEBUG_NONE;
6600 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6601 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6602 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6604 module_param_named(fwlog, fwlog_param, charp, 0);
6605 MODULE_PARM_DESC(fwlog,
6606 "FW logger options: continuous, dbgpins or disable");
6608 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6609 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6611 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6612 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6614 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6615 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6617 MODULE_LICENSE("GPL");
6618 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6619 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");