2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
25 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
26 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
29 struct wmi_scan_ev_arg *arg);
30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
31 struct wmi_mgmt_rx_ev_arg *arg);
32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
33 struct wmi_ch_info_ev_arg *arg);
34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
35 struct wmi_vdev_start_ev_arg *arg);
36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
37 struct wmi_peer_kick_ev_arg *arg);
38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
39 struct wmi_swba_ev_arg *arg);
40 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
41 struct wmi_phyerr_hdr_arg *arg);
42 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
43 int left_len, struct wmi_phyerr_ev_arg *arg);
44 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
45 struct wmi_svc_rdy_ev_arg *arg);
46 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
47 struct wmi_rdy_ev_arg *arg);
48 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
49 struct ath10k_fw_stats *stats);
50 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
51 struct wmi_roam_ev_arg *arg);
52 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
53 struct wmi_wow_ev_arg *arg);
54 int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
55 struct wmi_echo_ev_arg *arg);
56 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
58 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
59 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
60 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
61 u16 rd5g, u16 ctl2g, u16 ctl5g,
62 enum wmi_dfs_region dfs_reg);
63 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
65 struct sk_buff *(*gen_init)(struct ath10k *ar);
66 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
67 const struct wmi_start_scan_arg *arg);
68 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
69 const struct wmi_stop_scan_arg *arg);
70 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
71 enum wmi_vdev_type type,
72 enum wmi_vdev_subtype subtype,
73 const u8 macaddr[ETH_ALEN]);
74 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
75 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
76 const struct wmi_vdev_start_request_arg *arg,
78 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
79 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
81 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
82 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
83 u32 param_id, u32 param_value);
84 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
85 const struct wmi_vdev_install_key_arg *arg);
86 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
87 const struct wmi_vdev_spectral_conf_arg *arg);
88 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
89 u32 trigger, u32 enable);
90 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
91 const struct wmi_wmm_params_all_arg *arg);
92 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
93 const u8 peer_addr[ETH_ALEN],
94 enum wmi_peer_type peer_type);
95 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
96 const u8 peer_addr[ETH_ALEN]);
97 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
98 const u8 peer_addr[ETH_ALEN],
100 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
102 enum wmi_peer_param param_id,
104 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
105 const struct wmi_peer_assoc_complete_arg *arg);
106 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
107 enum wmi_sta_ps_mode psmode);
108 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
109 enum wmi_sta_powersave_param param_id,
111 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
113 enum wmi_ap_ps_peer_param param_id,
115 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
116 const struct wmi_scan_chan_list_arg *arg);
117 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
118 const void *bcn, size_t bcn_len,
119 u32 bcn_paddr, bool dtim_zero,
121 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
122 const struct wmi_wmm_params_all_arg *arg);
123 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
124 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
125 enum wmi_force_fw_hang_type type,
127 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
128 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
130 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
131 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
132 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
133 u32 period, u32 duration,
136 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
137 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
139 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
140 const u8 *mac, u32 tid, u32 buf_size);
141 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
142 const u8 *mac, u32 tid,
144 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
145 const u8 *mac, u32 tid, u32 initiator,
147 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
148 u32 tim_ie_offset, struct sk_buff *bcn,
149 u32 prb_caps, u32 prb_erp,
150 void *prb_ies, size_t prb_ies_len);
151 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
152 struct sk_buff *bcn);
153 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
155 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
156 const u8 peer_addr[ETH_ALEN],
157 const struct wmi_sta_uapsd_auto_trig_arg *args,
159 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
160 const struct wmi_sta_keepalive_arg *arg);
161 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
162 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
163 enum wmi_wow_wakeup_event event,
165 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
166 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
172 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
174 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
176 enum wmi_tdls_state state);
177 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
178 const struct wmi_tdls_peer_update_cmd_arg *arg,
179 const struct wmi_tdls_peer_capab_arg *cap,
180 const struct wmi_channel_arg *chan);
181 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
182 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
184 void (*fw_stats_fill)(struct ath10k *ar,
185 struct ath10k_fw_stats *fw_stats,
187 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
191 struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
192 enum wmi_host_platform_type type,
193 u32 fw_feature_bitmap);
194 int (*get_vdev_subtype)(struct ath10k *ar,
195 enum wmi_vdev_subtype subtype);
196 struct sk_buff *(*gen_pdev_bss_chan_info_req)
198 enum wmi_bss_survey_req_type type);
199 struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
202 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
205 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
207 if (WARN_ON_ONCE(!ar->wmi.ops->rx))
210 ar->wmi.ops->rx(ar, skb);
215 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
218 if (!ar->wmi.ops->map_svc)
221 ar->wmi.ops->map_svc(in, out, len);
226 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
227 struct wmi_scan_ev_arg *arg)
229 if (!ar->wmi.ops->pull_scan)
232 return ar->wmi.ops->pull_scan(ar, skb, arg);
236 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
237 struct wmi_mgmt_rx_ev_arg *arg)
239 if (!ar->wmi.ops->pull_mgmt_rx)
242 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
246 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
247 struct wmi_ch_info_ev_arg *arg)
249 if (!ar->wmi.ops->pull_ch_info)
252 return ar->wmi.ops->pull_ch_info(ar, skb, arg);
256 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
257 struct wmi_vdev_start_ev_arg *arg)
259 if (!ar->wmi.ops->pull_vdev_start)
262 return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
266 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
267 struct wmi_peer_kick_ev_arg *arg)
269 if (!ar->wmi.ops->pull_peer_kick)
272 return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
276 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
277 struct wmi_swba_ev_arg *arg)
279 if (!ar->wmi.ops->pull_swba)
282 return ar->wmi.ops->pull_swba(ar, skb, arg);
286 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
287 struct wmi_phyerr_hdr_arg *arg)
289 if (!ar->wmi.ops->pull_phyerr_hdr)
292 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
296 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
297 int left_len, struct wmi_phyerr_ev_arg *arg)
299 if (!ar->wmi.ops->pull_phyerr)
302 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
306 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
307 struct wmi_svc_rdy_ev_arg *arg)
309 if (!ar->wmi.ops->pull_svc_rdy)
312 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
316 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
317 struct wmi_rdy_ev_arg *arg)
319 if (!ar->wmi.ops->pull_rdy)
322 return ar->wmi.ops->pull_rdy(ar, skb, arg);
326 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
327 struct ath10k_fw_stats *stats)
329 if (!ar->wmi.ops->pull_fw_stats)
332 return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
336 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
337 struct wmi_roam_ev_arg *arg)
339 if (!ar->wmi.ops->pull_roam_ev)
342 return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
346 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
347 struct wmi_wow_ev_arg *arg)
349 if (!ar->wmi.ops->pull_wow_event)
352 return ar->wmi.ops->pull_wow_event(ar, skb, arg);
356 ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
357 struct wmi_echo_ev_arg *arg)
359 if (!ar->wmi.ops->pull_echo_ev)
362 return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
365 static inline enum wmi_txbf_conf
366 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
368 if (!ar->wmi.ops->get_txbf_conf_scheme)
369 return WMI_TXBF_CONF_UNSUPPORTED;
371 return ar->wmi.ops->get_txbf_conf_scheme(ar);
375 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
377 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
381 if (!ar->wmi.ops->gen_mgmt_tx)
384 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
388 ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
392 /* FIXME There's no ACK event for Management Tx. This probably
393 * shouldn't be called here either.
395 info->flags |= IEEE80211_TX_STAT_ACK;
396 ieee80211_tx_status_irqsafe(ar->hw, msdu);
402 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
403 u16 ctl2g, u16 ctl5g,
404 enum wmi_dfs_region dfs_reg)
408 if (!ar->wmi.ops->gen_pdev_set_rd)
411 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
416 return ath10k_wmi_cmd_send(ar, skb,
417 ar->wmi.cmd->pdev_set_regdomain_cmdid);
421 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
425 if (!ar->wmi.ops->gen_pdev_suspend)
428 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
432 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
436 ath10k_wmi_pdev_resume_target(struct ath10k *ar)
440 if (!ar->wmi.ops->gen_pdev_resume)
443 skb = ar->wmi.ops->gen_pdev_resume(ar);
447 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
451 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
455 if (!ar->wmi.ops->gen_pdev_set_param)
458 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
462 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
466 ath10k_wmi_cmd_init(struct ath10k *ar)
470 if (!ar->wmi.ops->gen_init)
473 skb = ar->wmi.ops->gen_init(ar);
477 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
481 ath10k_wmi_start_scan(struct ath10k *ar,
482 const struct wmi_start_scan_arg *arg)
486 if (!ar->wmi.ops->gen_start_scan)
489 skb = ar->wmi.ops->gen_start_scan(ar, arg);
493 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
497 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
501 if (!ar->wmi.ops->gen_stop_scan)
504 skb = ar->wmi.ops->gen_stop_scan(ar, arg);
508 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
512 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
513 enum wmi_vdev_type type,
514 enum wmi_vdev_subtype subtype,
515 const u8 macaddr[ETH_ALEN])
519 if (!ar->wmi.ops->gen_vdev_create)
522 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
526 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
530 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
534 if (!ar->wmi.ops->gen_vdev_delete)
537 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
541 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
545 ath10k_wmi_vdev_start(struct ath10k *ar,
546 const struct wmi_vdev_start_request_arg *arg)
550 if (!ar->wmi.ops->gen_vdev_start)
553 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
557 return ath10k_wmi_cmd_send(ar, skb,
558 ar->wmi.cmd->vdev_start_request_cmdid);
562 ath10k_wmi_vdev_restart(struct ath10k *ar,
563 const struct wmi_vdev_start_request_arg *arg)
567 if (!ar->wmi.ops->gen_vdev_start)
570 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
574 return ath10k_wmi_cmd_send(ar, skb,
575 ar->wmi.cmd->vdev_restart_request_cmdid);
579 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
583 if (!ar->wmi.ops->gen_vdev_stop)
586 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
590 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
594 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
598 if (!ar->wmi.ops->gen_vdev_up)
601 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
605 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
609 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
613 if (!ar->wmi.ops->gen_vdev_down)
616 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
620 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
624 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
629 if (!ar->wmi.ops->gen_vdev_set_param)
632 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
637 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
641 ath10k_wmi_vdev_install_key(struct ath10k *ar,
642 const struct wmi_vdev_install_key_arg *arg)
646 if (!ar->wmi.ops->gen_vdev_install_key)
649 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
653 return ath10k_wmi_cmd_send(ar, skb,
654 ar->wmi.cmd->vdev_install_key_cmdid);
658 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
659 const struct wmi_vdev_spectral_conf_arg *arg)
664 if (!ar->wmi.ops->gen_vdev_spectral_conf)
667 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
671 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
672 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
676 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
682 if (!ar->wmi.ops->gen_vdev_spectral_enable)
685 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
690 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
691 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
695 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
696 const u8 peer_addr[ETH_ALEN],
697 const struct wmi_sta_uapsd_auto_trig_arg *args,
703 if (!ar->wmi.ops->gen_vdev_sta_uapsd)
706 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
711 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
712 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
716 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
717 const struct wmi_wmm_params_all_arg *arg)
722 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
726 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
727 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
731 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
732 const u8 peer_addr[ETH_ALEN],
733 enum wmi_peer_type peer_type)
737 if (!ar->wmi.ops->gen_peer_create)
740 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
744 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
748 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
749 const u8 peer_addr[ETH_ALEN])
753 if (!ar->wmi.ops->gen_peer_delete)
756 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
760 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
764 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
765 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
769 if (!ar->wmi.ops->gen_peer_flush)
772 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
776 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
780 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
781 enum wmi_peer_param param_id, u32 param_value)
785 if (!ar->wmi.ops->gen_peer_set_param)
788 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
793 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
797 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
798 enum wmi_sta_ps_mode psmode)
802 if (!ar->wmi.ops->gen_set_psmode)
805 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
809 return ath10k_wmi_cmd_send(ar, skb,
810 ar->wmi.cmd->sta_powersave_mode_cmdid);
814 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
815 enum wmi_sta_powersave_param param_id, u32 value)
819 if (!ar->wmi.ops->gen_set_sta_ps)
822 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
826 return ath10k_wmi_cmd_send(ar, skb,
827 ar->wmi.cmd->sta_powersave_param_cmdid);
831 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
832 enum wmi_ap_ps_peer_param param_id, u32 value)
836 if (!ar->wmi.ops->gen_set_ap_ps)
839 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
843 return ath10k_wmi_cmd_send(ar, skb,
844 ar->wmi.cmd->ap_ps_peer_param_cmdid);
848 ath10k_wmi_scan_chan_list(struct ath10k *ar,
849 const struct wmi_scan_chan_list_arg *arg)
853 if (!ar->wmi.ops->gen_scan_chan_list)
856 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
860 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
864 ath10k_wmi_peer_assoc(struct ath10k *ar,
865 const struct wmi_peer_assoc_complete_arg *arg)
869 if (!ar->wmi.ops->gen_peer_assoc)
872 skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
876 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
880 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
881 const void *bcn, size_t bcn_len,
882 u32 bcn_paddr, bool dtim_zero,
888 if (!ar->wmi.ops->gen_beacon_dma)
891 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
892 dtim_zero, deliver_cab);
896 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
897 ar->wmi.cmd->pdev_send_bcn_cmdid);
907 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
908 const struct wmi_wmm_params_all_arg *arg)
912 if (!ar->wmi.ops->gen_pdev_set_wmm)
915 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
919 return ath10k_wmi_cmd_send(ar, skb,
920 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
924 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
928 if (!ar->wmi.ops->gen_request_stats)
931 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
935 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
939 ath10k_wmi_force_fw_hang(struct ath10k *ar,
940 enum wmi_force_fw_hang_type type, u32 delay_ms)
944 if (!ar->wmi.ops->gen_force_fw_hang)
947 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
951 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
955 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
959 if (!ar->wmi.ops->gen_dbglog_cfg)
962 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
966 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
970 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
974 if (!ar->wmi.ops->gen_pktlog_enable)
977 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
981 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
985 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
989 if (!ar->wmi.ops->gen_pktlog_disable)
992 skb = ar->wmi.ops->gen_pktlog_disable(ar);
996 return ath10k_wmi_cmd_send(ar, skb,
997 ar->wmi.cmd->pdev_pktlog_disable_cmdid);
1001 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
1002 u32 next_offset, u32 enabled)
1004 struct sk_buff *skb;
1006 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
1009 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
1010 next_offset, enabled);
1012 return PTR_ERR(skb);
1014 return ath10k_wmi_cmd_send(ar, skb,
1015 ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
1019 ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
1021 struct sk_buff *skb;
1023 if (!ar->wmi.ops->gen_pdev_get_temperature)
1026 skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
1028 return PTR_ERR(skb);
1030 return ath10k_wmi_cmd_send(ar, skb,
1031 ar->wmi.cmd->pdev_get_temperature_cmdid);
1035 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
1037 struct sk_buff *skb;
1039 if (!ar->wmi.ops->gen_addba_clear_resp)
1042 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
1044 return PTR_ERR(skb);
1046 return ath10k_wmi_cmd_send(ar, skb,
1047 ar->wmi.cmd->addba_clear_resp_cmdid);
1051 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1052 u32 tid, u32 buf_size)
1054 struct sk_buff *skb;
1056 if (!ar->wmi.ops->gen_addba_send)
1059 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1061 return PTR_ERR(skb);
1063 return ath10k_wmi_cmd_send(ar, skb,
1064 ar->wmi.cmd->addba_send_cmdid);
1068 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1069 u32 tid, u32 status)
1071 struct sk_buff *skb;
1073 if (!ar->wmi.ops->gen_addba_set_resp)
1076 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1078 return PTR_ERR(skb);
1080 return ath10k_wmi_cmd_send(ar, skb,
1081 ar->wmi.cmd->addba_set_resp_cmdid);
1085 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1086 u32 tid, u32 initiator, u32 reason)
1088 struct sk_buff *skb;
1090 if (!ar->wmi.ops->gen_delba_send)
1093 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1096 return PTR_ERR(skb);
1098 return ath10k_wmi_cmd_send(ar, skb,
1099 ar->wmi.cmd->delba_send_cmdid);
1103 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1104 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1105 void *prb_ies, size_t prb_ies_len)
1107 struct sk_buff *skb;
1109 if (!ar->wmi.ops->gen_bcn_tmpl)
1112 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1113 prb_caps, prb_erp, prb_ies,
1116 return PTR_ERR(skb);
1118 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1122 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1124 struct sk_buff *skb;
1126 if (!ar->wmi.ops->gen_prb_tmpl)
1129 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1131 return PTR_ERR(skb);
1133 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1137 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1139 struct sk_buff *skb;
1141 if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1144 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1146 return PTR_ERR(skb);
1148 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1152 ath10k_wmi_sta_keepalive(struct ath10k *ar,
1153 const struct wmi_sta_keepalive_arg *arg)
1155 struct sk_buff *skb;
1158 if (!ar->wmi.ops->gen_sta_keepalive)
1161 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1163 return PTR_ERR(skb);
1165 cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1166 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1170 ath10k_wmi_wow_enable(struct ath10k *ar)
1172 struct sk_buff *skb;
1175 if (!ar->wmi.ops->gen_wow_enable)
1178 skb = ar->wmi.ops->gen_wow_enable(ar);
1180 return PTR_ERR(skb);
1182 cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1183 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1187 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1188 enum wmi_wow_wakeup_event event,
1191 struct sk_buff *skb;
1194 if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1197 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1199 return PTR_ERR(skb);
1201 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1202 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1206 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1208 struct sk_buff *skb;
1211 if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1214 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1216 return PTR_ERR(skb);
1218 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1219 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1223 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1224 const u8 *pattern, const u8 *mask,
1225 int pattern_len, int pattern_offset)
1227 struct sk_buff *skb;
1230 if (!ar->wmi.ops->gen_wow_add_pattern)
1233 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1234 pattern, mask, pattern_len,
1237 return PTR_ERR(skb);
1239 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1240 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1244 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1246 struct sk_buff *skb;
1249 if (!ar->wmi.ops->gen_wow_del_pattern)
1252 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1254 return PTR_ERR(skb);
1256 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1257 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1261 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1262 enum wmi_tdls_state state)
1264 struct sk_buff *skb;
1266 if (!ar->wmi.ops->gen_update_fw_tdls_state)
1269 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1271 return PTR_ERR(skb);
1273 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1277 ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1278 const struct wmi_tdls_peer_update_cmd_arg *arg,
1279 const struct wmi_tdls_peer_capab_arg *cap,
1280 const struct wmi_channel_arg *chan)
1282 struct sk_buff *skb;
1284 if (!ar->wmi.ops->gen_tdls_peer_update)
1287 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1289 return PTR_ERR(skb);
1291 return ath10k_wmi_cmd_send(ar, skb,
1292 ar->wmi.cmd->tdls_peer_update_cmdid);
1296 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1298 struct sk_buff *skb;
1300 if (!ar->wmi.ops->gen_adaptive_qcs)
1303 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1305 return PTR_ERR(skb);
1307 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
1311 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
1313 struct sk_buff *skb;
1315 if (!ar->wmi.ops->gen_pdev_get_tpc_config)
1318 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
1321 return PTR_ERR(skb);
1323 return ath10k_wmi_cmd_send(ar, skb,
1324 ar->wmi.cmd->pdev_get_tpc_config_cmdid);
1328 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
1331 if (!ar->wmi.ops->fw_stats_fill)
1334 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
1339 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
1340 u32 detect_level, u32 detect_margin)
1342 struct sk_buff *skb;
1344 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
1347 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
1352 return PTR_ERR(skb);
1354 return ath10k_wmi_cmd_send(ar, skb,
1355 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
1359 ath10k_wmi_ext_resource_config(struct ath10k *ar,
1360 enum wmi_host_platform_type type,
1361 u32 fw_feature_bitmap)
1363 struct sk_buff *skb;
1365 if (!ar->wmi.ops->ext_resource_config)
1368 skb = ar->wmi.ops->ext_resource_config(ar, type,
1372 return PTR_ERR(skb);
1374 return ath10k_wmi_cmd_send(ar, skb,
1375 ar->wmi.cmd->ext_resource_cfg_cmdid);
1379 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
1381 if (!ar->wmi.ops->get_vdev_subtype)
1384 return ar->wmi.ops->get_vdev_subtype(ar, subtype);
1388 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
1389 enum wmi_bss_survey_req_type type)
1391 struct ath10k_wmi *wmi = &ar->wmi;
1392 struct sk_buff *skb;
1394 if (!wmi->ops->gen_pdev_bss_chan_info_req)
1397 skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
1399 return PTR_ERR(skb);
1401 return ath10k_wmi_cmd_send(ar, skb,
1402 wmi->cmd->pdev_bss_chan_info_request_cmdid);
1406 ath10k_wmi_echo(struct ath10k *ar, u32 value)
1408 struct ath10k_wmi *wmi = &ar->wmi;
1409 struct sk_buff *skb;
1411 if (!wmi->ops->gen_echo)
1414 skb = wmi->ops->gen_echo(ar, value);
1416 return PTR_ERR(skb);
1418 return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);