1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2020 The Linux Foundation. All rights reserved.
6 #include <linux/delay.h>
10 #include <net/mac80211.h>
18 static const struct wiphy_wowlan_support ath11k_wowlan_support = {
19 .flags = WIPHY_WOWLAN_DISCONNECT |
20 WIPHY_WOWLAN_MAGIC_PKT |
21 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
22 WIPHY_WOWLAN_GTK_REKEY_FAILURE,
23 .pattern_min_len = WOW_MIN_PATTERN_SIZE,
24 .pattern_max_len = WOW_MAX_PATTERN_SIZE,
25 .max_pkt_offset = WOW_MAX_PKT_OFFSET,
28 int ath11k_wow_enable(struct ath11k_base *ab)
30 struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
33 clear_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
35 for (i = 0; i < ATH11K_WOW_RETRY_NUM; i++) {
36 reinit_completion(&ab->htc_suspend);
38 ret = ath11k_wmi_wow_enable(ar);
40 ath11k_warn(ab, "failed to issue wow enable: %d\n", ret);
44 ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
47 "timed out while waiting for htc suspend completion\n");
51 if (test_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags))
52 /* success, suspend complete received */
55 ath11k_warn(ab, "htc suspend not complete, retrying (try %d)\n",
57 msleep(ATH11K_WOW_RETRY_WAIT_MS);
60 ath11k_warn(ab, "htc suspend not complete, failing after %d tries\n", i);
65 int ath11k_wow_wakeup(struct ath11k_base *ab)
67 struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
70 reinit_completion(&ab->wow.wakeup_completed);
72 ret = ath11k_wmi_wow_host_wakeup_ind(ar);
74 ath11k_warn(ab, "failed to send wow wakeup indication: %d\n",
79 ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
81 ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
88 static int ath11k_wow_vif_cleanup(struct ath11k_vif *arvif)
90 struct ath11k *ar = arvif->ar;
93 for (i = 0; i < WOW_EVENT_MAX; i++) {
94 ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
96 ath11k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
97 wow_wakeup_event(i), arvif->vdev_id, ret);
102 for (i = 0; i < ar->wow.max_num_patterns; i++) {
103 ret = ath11k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
105 ath11k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
106 i, arvif->vdev_id, ret);
114 static int ath11k_wow_cleanup(struct ath11k *ar)
116 struct ath11k_vif *arvif;
119 lockdep_assert_held(&ar->conf_mutex);
121 list_for_each_entry(arvif, &ar->arvifs, list) {
122 ret = ath11k_wow_vif_cleanup(arvif);
124 ath11k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
125 arvif->vdev_id, ret);
133 /* Convert a 802.3 format to a 802.11 format.
134 * +------------+-----------+--------+----------------+
135 * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
136 * +------------+-----------+--------+----------------+
137 * |__ |_______ |____________ |________
139 * +--+------------+----+-----------+---------------+-----------+
140 * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
141 * +--+------------+----+-----------+---------------+-----------+
143 static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
144 const struct cfg80211_pkt_pattern *old)
146 u8 hdr_8023_pattern[ETH_HLEN] = {};
147 u8 hdr_8023_bit_mask[ETH_HLEN] = {};
148 u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
149 u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
151 int total_len = old->pkt_offset + old->pattern_len;
152 int hdr_80211_end_offset;
154 struct ieee80211_hdr_3addr *new_hdr_pattern =
155 (struct ieee80211_hdr_3addr *)hdr_80211_pattern;
156 struct ieee80211_hdr_3addr *new_hdr_mask =
157 (struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
158 struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
159 struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
160 int hdr_len = sizeof(*new_hdr_pattern);
162 struct rfc1042_hdr *new_rfc_pattern =
163 (struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
164 struct rfc1042_hdr *new_rfc_mask =
165 (struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
166 int rfc_len = sizeof(*new_rfc_pattern);
168 memcpy(hdr_8023_pattern + old->pkt_offset,
169 old->pattern, ETH_HLEN - old->pkt_offset);
170 memcpy(hdr_8023_bit_mask + old->pkt_offset,
171 old->mask, ETH_HLEN - old->pkt_offset);
173 /* Copy destination address */
174 memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
175 memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
177 /* Copy source address */
178 memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
179 memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
181 /* Copy logic link type */
182 memcpy(&new_rfc_pattern->snap_type,
183 &old_hdr_pattern->h_proto,
184 sizeof(old_hdr_pattern->h_proto));
185 memcpy(&new_rfc_mask->snap_type,
186 &old_hdr_mask->h_proto,
187 sizeof(old_hdr_mask->h_proto));
189 /* Compute new pkt_offset */
190 if (old->pkt_offset < ETH_ALEN)
191 new->pkt_offset = old->pkt_offset +
192 offsetof(struct ieee80211_hdr_3addr, addr1);
193 else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
194 new->pkt_offset = old->pkt_offset +
195 offsetof(struct ieee80211_hdr_3addr, addr3) -
196 offsetof(struct ethhdr, h_source);
198 new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
200 /* Compute new hdr end offset */
201 if (total_len > ETH_HLEN)
202 hdr_80211_end_offset = hdr_len + rfc_len;
203 else if (total_len > offsetof(struct ethhdr, h_proto))
204 hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
205 else if (total_len > ETH_ALEN)
206 hdr_80211_end_offset = total_len - ETH_ALEN +
207 offsetof(struct ieee80211_hdr_3addr, addr3);
209 hdr_80211_end_offset = total_len +
210 offsetof(struct ieee80211_hdr_3addr, addr1);
212 new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
214 memcpy((u8 *)new->pattern,
215 hdr_80211_pattern + new->pkt_offset,
217 memcpy((u8 *)new->mask,
218 hdr_80211_bit_mask + new->pkt_offset,
221 if (total_len > ETH_HLEN) {
222 /* Copy frame body */
223 memcpy((u8 *)new->pattern + new->pattern_len,
224 (void *)old->pattern + ETH_HLEN - old->pkt_offset,
225 total_len - ETH_HLEN);
226 memcpy((u8 *)new->mask + new->pattern_len,
227 (void *)old->mask + ETH_HLEN - old->pkt_offset,
228 total_len - ETH_HLEN);
230 new->pattern_len += total_len - ETH_HLEN;
234 static int ath11k_wmi_pno_check_and_convert(struct ath11k *ar, u32 vdev_id,
235 struct cfg80211_sched_scan_request *nd_config,
236 struct wmi_pno_scan_req *pno)
242 pno->vdev_id = vdev_id;
243 pno->uc_networks_count = nd_config->n_match_sets;
245 if (!pno->uc_networks_count ||
246 pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
249 if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
252 /* Filling per profile params */
253 for (i = 0; i < pno->uc_networks_count; i++) {
254 ssid_len = nd_config->match_sets[i].ssid.ssid_len;
256 if (ssid_len == 0 || ssid_len > 32)
259 pno->a_networks[i].ssid.ssid_len = ssid_len;
261 memcpy(pno->a_networks[i].ssid.ssid,
262 nd_config->match_sets[i].ssid.ssid,
263 nd_config->match_sets[i].ssid.ssid_len);
264 pno->a_networks[i].authentication = 0;
265 pno->a_networks[i].encryption = 0;
266 pno->a_networks[i].bcast_nw_type = 0;
268 /* Copying list of valid channel into request */
269 pno->a_networks[i].channel_count = nd_config->n_channels;
270 pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
272 for (j = 0; j < nd_config->n_channels; j++) {
273 pno->a_networks[i].channels[j] =
274 nd_config->channels[j]->center_freq;
278 /* set scan to passive if no SSIDs are specified in the request */
279 if (nd_config->n_ssids == 0)
280 pno->do_passive_scan = true;
282 pno->do_passive_scan = false;
284 for (i = 0; i < nd_config->n_ssids; i++) {
286 while (j < pno->uc_networks_count) {
287 if (pno->a_networks[j].ssid.ssid_len ==
288 nd_config->ssids[i].ssid_len &&
289 (memcmp(pno->a_networks[j].ssid.ssid,
290 nd_config->ssids[i].ssid,
291 pno->a_networks[j].ssid.ssid_len) == 0)) {
292 pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
299 if (nd_config->n_scan_plans == 2) {
300 pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
301 pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
302 pno->slow_scan_period =
303 nd_config->scan_plans[1].interval * MSEC_PER_SEC;
304 } else if (nd_config->n_scan_plans == 1) {
305 pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
306 pno->fast_scan_max_cycles = 1;
307 pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
309 ath11k_warn(ar->ab, "Invalid number of scan plans %d !!",
310 nd_config->n_scan_plans);
313 if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
314 /* enable mac randomization */
315 pno->enable_pno_scan_randomization = 1;
316 memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
317 memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
320 pno->delay_start_time = nd_config->delay;
322 /* Current FW does not support min-max range for dwell time */
323 pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
324 pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
329 static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif,
330 struct cfg80211_wowlan *wowlan)
333 unsigned long wow_mask = 0;
334 struct ath11k *ar = arvif->ar;
335 const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
338 /* Setup requested WOW features */
339 switch (arvif->vdev_type) {
340 case WMI_VDEV_TYPE_IBSS:
341 __set_bit(WOW_BEACON_EVENT, &wow_mask);
343 case WMI_VDEV_TYPE_AP:
344 __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
345 __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
346 __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
347 __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
348 __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
349 __set_bit(WOW_HTT_EVENT, &wow_mask);
350 __set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
352 case WMI_VDEV_TYPE_STA:
353 if (wowlan->disconnect) {
354 __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
355 __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
356 __set_bit(WOW_BMISS_EVENT, &wow_mask);
357 __set_bit(WOW_CSA_IE_EVENT, &wow_mask);
360 if (wowlan->magic_pkt)
361 __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
363 if (wowlan->nd_config) {
364 struct wmi_pno_scan_req *pno;
367 pno = kzalloc(sizeof(*pno), GFP_KERNEL);
371 ar->nlo_enabled = true;
373 ret = ath11k_wmi_pno_check_and_convert(ar, arvif->vdev_id,
374 wowlan->nd_config, pno);
376 ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
377 __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
387 for (i = 0; i < wowlan->n_patterns; i++) {
388 u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
389 u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
390 u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
391 struct cfg80211_pkt_pattern new_pattern = {};
392 struct cfg80211_pkt_pattern old_pattern = patterns[i];
395 new_pattern.pattern = ath_pattern;
396 new_pattern.mask = ath_bitmask;
397 if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
399 /* convert bytemask to bitmask */
400 for (j = 0; j < patterns[i].pattern_len; j++)
401 if (patterns[i].mask[j / 8] & BIT(j % 8))
403 old_pattern.mask = bitmask;
405 if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
406 ATH11K_HW_TXRX_NATIVE_WIFI) {
407 if (patterns[i].pkt_offset < ETH_HLEN) {
408 u8 pattern_ext[WOW_MAX_PATTERN_SIZE] = {};
410 memcpy(pattern_ext, old_pattern.pattern,
411 old_pattern.pattern_len);
412 old_pattern.pattern = pattern_ext;
413 ath11k_wow_convert_8023_to_80211(&new_pattern,
416 new_pattern = old_pattern;
417 new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
421 if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
424 ret = ath11k_wmi_wow_add_pattern(ar, arvif->vdev_id,
428 new_pattern.pattern_len,
429 new_pattern.pkt_offset);
431 ath11k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
433 arvif->vdev_id, ret);
438 __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
441 for (i = 0; i < WOW_EVENT_MAX; i++) {
442 if (!test_bit(i, &wow_mask))
444 ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
446 ath11k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
447 wow_wakeup_event(i), arvif->vdev_id, ret);
455 static int ath11k_wow_set_wakeups(struct ath11k *ar,
456 struct cfg80211_wowlan *wowlan)
458 struct ath11k_vif *arvif;
461 lockdep_assert_held(&ar->conf_mutex);
463 list_for_each_entry(arvif, &ar->arvifs, list) {
464 ret = ath11k_vif_wow_set_wakeups(arvif, wowlan);
466 ath11k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
467 arvif->vdev_id, ret);
475 static int ath11k_vif_wow_clean_nlo(struct ath11k_vif *arvif)
478 struct ath11k *ar = arvif->ar;
480 switch (arvif->vdev_type) {
481 case WMI_VDEV_TYPE_STA:
482 if (ar->nlo_enabled) {
483 struct wmi_pno_scan_req *pno;
485 pno = kzalloc(sizeof(*pno), GFP_KERNEL);
490 ar->nlo_enabled = false;
491 ret = ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
501 static int ath11k_wow_nlo_cleanup(struct ath11k *ar)
503 struct ath11k_vif *arvif;
506 lockdep_assert_held(&ar->conf_mutex);
508 list_for_each_entry(arvif, &ar->arvifs, list) {
509 ret = ath11k_vif_wow_clean_nlo(arvif);
511 ath11k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
512 arvif->vdev_id, ret);
520 static int ath11k_wow_set_hw_filter(struct ath11k *ar)
522 struct ath11k_vif *arvif;
526 lockdep_assert_held(&ar->conf_mutex);
528 list_for_each_entry(arvif, &ar->arvifs, list) {
529 bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC |
530 WMI_HW_DATA_FILTER_DROP_NON_ARP_BC;
531 ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id,
535 ath11k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
536 arvif->vdev_id, ret);
544 static int ath11k_wow_clear_hw_filter(struct ath11k *ar)
546 struct ath11k_vif *arvif;
549 lockdep_assert_held(&ar->conf_mutex);
551 list_for_each_entry(arvif, &ar->arvifs, list) {
552 ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id, 0, false);
555 ath11k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
556 arvif->vdev_id, ret);
564 static int ath11k_wow_arp_ns_offload(struct ath11k *ar, bool enable)
566 struct ath11k_vif *arvif;
569 lockdep_assert_held(&ar->conf_mutex);
571 list_for_each_entry(arvif, &ar->arvifs, list) {
572 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
575 ret = ath11k_wmi_arp_ns_offload(ar, arvif, enable);
578 ath11k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
579 arvif->vdev_id, enable, ret);
587 static int ath11k_gtk_rekey_offload(struct ath11k *ar, bool enable)
589 struct ath11k_vif *arvif;
592 lockdep_assert_held(&ar->conf_mutex);
594 list_for_each_entry(arvif, &ar->arvifs, list) {
595 if (arvif->vdev_type != WMI_VDEV_TYPE_STA ||
597 !arvif->rekey_data.enable_offload)
600 /* get rekey info before disable rekey offload */
602 ret = ath11k_wmi_gtk_rekey_getinfo(ar, arvif);
604 ath11k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
605 arvif->vdev_id, ret);
610 ret = ath11k_wmi_gtk_rekey_offload(ar, arvif, enable);
613 ath11k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
614 arvif->vdev_id, enable, ret);
622 static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable)
626 ret = ath11k_wow_arp_ns_offload(ar, enable);
628 ath11k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
633 ret = ath11k_gtk_rekey_offload(ar, enable);
635 ath11k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
643 static int ath11k_wow_set_keepalive(struct ath11k *ar,
644 enum wmi_sta_keepalive_method method,
647 struct ath11k_vif *arvif;
650 lockdep_assert_held(&ar->conf_mutex);
652 list_for_each_entry(arvif, &ar->arvifs, list) {
653 ret = ath11k_mac_vif_set_keepalive(arvif, method, interval);
661 int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
662 struct cfg80211_wowlan *wowlan)
664 struct ath11k *ar = hw->priv;
667 mutex_lock(&ar->conf_mutex);
669 ret = ath11k_dp_rx_pktlog_stop(ar->ab, true);
672 "failed to stop dp rx (and timer) pktlog during wow suspend: %d\n",
677 ret = ath11k_wow_cleanup(ar);
679 ath11k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
684 ret = ath11k_wow_set_wakeups(ar, wowlan);
686 ath11k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
691 ret = ath11k_wow_protocol_offload(ar, true);
693 ath11k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
698 ath11k_mac_drain_tx(ar);
699 ret = ath11k_mac_wait_tx_complete(ar);
701 ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
705 ret = ath11k_wow_set_hw_filter(ar);
707 ath11k_warn(ar->ab, "failed to set hw filter: %d\n",
712 ret = ath11k_wow_set_keepalive(ar,
713 WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
714 WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
716 ath11k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
720 ret = ath11k_wow_enable(ar->ab);
722 ath11k_warn(ar->ab, "failed to start wow: %d\n", ret);
726 ret = ath11k_dp_rx_pktlog_stop(ar->ab, false);
729 "failed to stop dp rx pktlog during wow suspend: %d\n",
734 ath11k_ce_stop_shadow_timers(ar->ab);
735 ath11k_dp_stop_shadow_timers(ar->ab);
737 ath11k_hif_irq_disable(ar->ab);
738 ath11k_hif_ce_irq_disable(ar->ab);
740 ret = ath11k_hif_suspend(ar->ab);
742 ath11k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
749 ath11k_wow_wakeup(ar->ab);
752 ath11k_wow_cleanup(ar);
755 mutex_unlock(&ar->conf_mutex);
759 void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
761 struct ath11k *ar = hw->priv;
763 mutex_lock(&ar->conf_mutex);
764 device_set_wakeup_enable(ar->ab->dev, enabled);
765 mutex_unlock(&ar->conf_mutex);
768 int ath11k_wow_op_resume(struct ieee80211_hw *hw)
770 struct ath11k *ar = hw->priv;
773 mutex_lock(&ar->conf_mutex);
775 ret = ath11k_hif_resume(ar->ab);
777 ath11k_warn(ar->ab, "failed to resume hif: %d\n", ret);
781 ath11k_hif_ce_irq_enable(ar->ab);
782 ath11k_hif_irq_enable(ar->ab);
784 ret = ath11k_dp_rx_pktlog_start(ar->ab);
786 ath11k_warn(ar->ab, "failed to start rx pktlog from wow: %d\n", ret);
790 ret = ath11k_wow_wakeup(ar->ab);
792 ath11k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
796 ret = ath11k_wow_nlo_cleanup(ar);
798 ath11k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
802 ret = ath11k_wow_clear_hw_filter(ar);
804 ath11k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
808 ret = ath11k_wow_protocol_offload(ar, false);
810 ath11k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
815 ret = ath11k_wow_set_keepalive(ar,
816 WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
817 WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
819 ath11k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
826 case ATH11K_STATE_ON:
827 ar->state = ATH11K_STATE_RESTARTING;
830 case ATH11K_STATE_OFF:
831 case ATH11K_STATE_RESTARTING:
832 case ATH11K_STATE_RESTARTED:
833 case ATH11K_STATE_WEDGED:
834 ath11k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
841 mutex_unlock(&ar->conf_mutex);
845 int ath11k_wow_init(struct ath11k *ar)
847 if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
850 ar->wow.wowlan_support = ath11k_wowlan_support;
852 if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
853 ATH11K_HW_TXRX_NATIVE_WIFI) {
854 ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
855 ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
858 if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
859 ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
860 ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
863 ar->wow.max_num_patterns = ATH11K_WOW_PATTERNS;
864 ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
865 ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
867 device_set_wakeup_capable(ar->ab->dev, true);