1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2005-2011 Atheros Communications Inc.
4  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
6  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
7  */
8 
9 #include <linux/skbuff.h>
10 #include <linux/ctype.h>
11 
12 #include "core.h"
13 #include "htc.h"
14 #include "debug.h"
15 #include "wmi.h"
16 #include "wmi-tlv.h"
17 #include "mac.h"
18 #include "testmode.h"
19 #include "wmi-ops.h"
20 #include "p2p.h"
21 #include "hw.h"
22 #include "hif.h"
23 #include "txrx.h"
24 
25 #define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
26 #define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
27 #define ATH10K_WMI_DFS_CONF_TIMEOUT_HZ (HZ / 6)
28 
29 /* MAIN WMI cmd track */
30 static struct wmi_cmd_map wmi_cmd_map = {
31 	.init_cmdid = WMI_INIT_CMDID,
32 	.start_scan_cmdid = WMI_START_SCAN_CMDID,
33 	.stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
34 	.scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
35 	.scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
36 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
37 	.pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
38 	.pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
39 	.pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
40 	.pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
41 	.pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
42 	.pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
43 	.pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
44 	.pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
45 	.pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
46 	.pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
47 	.pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
48 	.pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
49 	.pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
50 	.vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
51 	.vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
52 	.vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
53 	.vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
54 	.vdev_up_cmdid = WMI_VDEV_UP_CMDID,
55 	.vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
56 	.vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
57 	.vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
58 	.vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
59 	.peer_create_cmdid = WMI_PEER_CREATE_CMDID,
60 	.peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
61 	.peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
62 	.peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
63 	.peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
64 	.peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
65 	.peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
66 	.peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
67 	.bcn_tx_cmdid = WMI_BCN_TX_CMDID,
68 	.pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
69 	.bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
70 	.bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
71 	.prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
72 	.mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
73 	.prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
74 	.addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
75 	.addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
76 	.addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
77 	.delba_send_cmdid = WMI_DELBA_SEND_CMDID,
78 	.addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
79 	.send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
80 	.sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
81 	.sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
82 	.sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
83 	.pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
84 	.pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
85 	.roam_scan_mode = WMI_ROAM_SCAN_MODE,
86 	.roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
87 	.roam_scan_period = WMI_ROAM_SCAN_PERIOD,
88 	.roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
89 	.roam_ap_profile = WMI_ROAM_AP_PROFILE,
90 	.ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
91 	.ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
92 	.ofl_scan_period = WMI_OFL_SCAN_PERIOD,
93 	.p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
94 	.p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
95 	.p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
96 	.p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
97 	.p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
98 	.ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
99 	.ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
100 	.peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
101 	.wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
102 	.wlan_profile_set_hist_intvl_cmdid =
103 				WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
104 	.wlan_profile_get_profile_data_cmdid =
105 				WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
106 	.wlan_profile_enable_profile_id_cmdid =
107 				WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
108 	.wlan_profile_list_profile_id_cmdid =
109 				WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
110 	.pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
111 	.pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
112 	.add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
113 	.rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
114 	.wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
115 	.wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
116 	.wow_enable_disable_wake_event_cmdid =
117 				WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
118 	.wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
119 	.wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
120 	.rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
121 	.rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
122 	.vdev_spectral_scan_configure_cmdid =
123 				WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
124 	.vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
125 	.request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
126 	.set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
127 	.network_list_offload_config_cmdid =
128 				WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
129 	.gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
130 	.csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
131 	.csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
132 	.chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
133 	.peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
134 	.peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
135 	.sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
136 	.sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
137 	.sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
138 	.echo_cmdid = WMI_ECHO_CMDID,
139 	.pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
140 	.dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
141 	.pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
142 	.pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
143 	.vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
144 	.vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
145 	.force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
146 	.gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
147 	.gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
148 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
149 	.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
150 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
151 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
152 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
153 	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
154 	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
155 	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
156 	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
157 	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
158 	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
159 	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
160 	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
161 	.nan_cmdid = WMI_CMD_UNSUPPORTED,
162 	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
163 	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
164 	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
165 	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
166 	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
167 	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
168 	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
169 	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
170 	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
171 	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
172 	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
173 	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
174 	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
175 	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
176 	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
177 	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
178 	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
179 	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
180 	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
181 	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
182 	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
183 	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
184 	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
185 	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
186 	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
187 	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
188 	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
189 	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
190 	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
191 	.pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
192 	.pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
193 	.radar_found_cmdid = WMI_CMD_UNSUPPORTED,
194 };
195 
196 /* 10.X WMI cmd track */
197 static struct wmi_cmd_map wmi_10x_cmd_map = {
198 	.init_cmdid = WMI_10X_INIT_CMDID,
199 	.start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
200 	.stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
201 	.scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
202 	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
203 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
204 	.pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
205 	.pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
206 	.pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
207 	.pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
208 	.pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
209 	.pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
210 	.pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
211 	.pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
212 	.pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
213 	.pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
214 	.pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
215 	.pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
216 	.pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
217 	.vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
218 	.vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
219 	.vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
220 	.vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
221 	.vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
222 	.vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
223 	.vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
224 	.vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
225 	.vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
226 	.peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
227 	.peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
228 	.peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
229 	.peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
230 	.peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
231 	.peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
232 	.peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
233 	.peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
234 	.bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
235 	.pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
236 	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
237 	.bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
238 	.prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
239 	.mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
240 	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
241 	.addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
242 	.addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
243 	.addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
244 	.delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
245 	.addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
246 	.send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
247 	.sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
248 	.sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
249 	.sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
250 	.pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
251 	.pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
252 	.roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
253 	.roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
254 	.roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
255 	.roam_scan_rssi_change_threshold =
256 				WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
257 	.roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
258 	.ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
259 	.ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
260 	.ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
261 	.p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
262 	.p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
263 	.p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
264 	.p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
265 	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
266 	.ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
267 	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
268 	.peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
269 	.wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
270 	.wlan_profile_set_hist_intvl_cmdid =
271 				WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
272 	.wlan_profile_get_profile_data_cmdid =
273 				WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
274 	.wlan_profile_enable_profile_id_cmdid =
275 				WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
276 	.wlan_profile_list_profile_id_cmdid =
277 				WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
278 	.pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
279 	.pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
280 	.add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
281 	.rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
282 	.wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
283 	.wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
284 	.wow_enable_disable_wake_event_cmdid =
285 				WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
286 	.wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
287 	.wow_hostwakeup_from_sleep_cmdid =
288 				WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
289 	.rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
290 	.rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
291 	.vdev_spectral_scan_configure_cmdid =
292 				WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
293 	.vdev_spectral_scan_enable_cmdid =
294 				WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
295 	.request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
296 	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
297 	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
298 	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
299 	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
300 	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
301 	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
302 	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
303 	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
304 	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
305 	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
306 	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
307 	.echo_cmdid = WMI_10X_ECHO_CMDID,
308 	.pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
309 	.dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
310 	.pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
311 	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
312 	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
313 	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
314 	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
315 	.gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
316 	.gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
317 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
318 	.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
319 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
320 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
321 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
322 	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
323 	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
324 	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
325 	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
326 	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
327 	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
328 	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
329 	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
330 	.nan_cmdid = WMI_CMD_UNSUPPORTED,
331 	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
332 	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
333 	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
334 	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
335 	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
336 	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
337 	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
338 	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
339 	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
340 	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
341 	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
342 	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
343 	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
344 	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
345 	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
346 	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
347 	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
348 	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
349 	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
350 	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
351 	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
352 	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
353 	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
354 	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
355 	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
356 	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
357 	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
358 	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
359 	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
360 	.pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
361 	.pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
362 	.radar_found_cmdid = WMI_CMD_UNSUPPORTED,
363 };
364 
365 /* 10.2.4 WMI cmd track */
366 static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
367 	.init_cmdid = WMI_10_2_INIT_CMDID,
368 	.start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
369 	.stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
370 	.scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
371 	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
372 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
373 	.pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
374 	.pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
375 	.pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
376 	.pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
377 	.pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
378 	.pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
379 	.pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
380 	.pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
381 	.pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
382 	.pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
383 	.pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
384 	.pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
385 	.vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
386 	.vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
387 	.vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
388 	.vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
389 	.vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
390 	.vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
391 	.vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
392 	.vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
393 	.vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
394 	.peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
395 	.peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
396 	.peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
397 	.peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
398 	.peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
399 	.peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
400 	.peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
401 	.peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
402 	.bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
403 	.pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
404 	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
405 	.bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
406 	.prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
407 	.mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
408 	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
409 	.addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
410 	.addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
411 	.addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
412 	.delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
413 	.addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
414 	.send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
415 	.sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
416 	.sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
417 	.sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
418 	.pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
419 	.pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
420 	.roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
421 	.roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
422 	.roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
423 	.roam_scan_rssi_change_threshold =
424 				WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
425 	.roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
426 	.ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
427 	.ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
428 	.ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
429 	.p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
430 	.p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
431 	.p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
432 	.p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
433 	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
434 	.ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
435 	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
436 	.peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
437 	.wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
438 	.wlan_profile_set_hist_intvl_cmdid =
439 				WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
440 	.wlan_profile_get_profile_data_cmdid =
441 				WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
442 	.wlan_profile_enable_profile_id_cmdid =
443 				WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
444 	.wlan_profile_list_profile_id_cmdid =
445 				WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
446 	.pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
447 	.pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
448 	.add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
449 	.rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
450 	.wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
451 	.wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
452 	.wow_enable_disable_wake_event_cmdid =
453 				WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
454 	.wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
455 	.wow_hostwakeup_from_sleep_cmdid =
456 				WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
457 	.rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
458 	.rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
459 	.vdev_spectral_scan_configure_cmdid =
460 				WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
461 	.vdev_spectral_scan_enable_cmdid =
462 				WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
463 	.request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
464 	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
465 	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
466 	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
467 	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
468 	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
469 	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
470 	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
471 	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
472 	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
473 	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
474 	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
475 	.echo_cmdid = WMI_10_2_ECHO_CMDID,
476 	.pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
477 	.dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
478 	.pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
479 	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
480 	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
481 	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
482 	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
483 	.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
484 	.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
485 	.pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
486 	.pdev_enable_adaptive_cca_cmdid = WMI_10_2_SET_CCA_PARAMS,
487 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
488 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
489 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
490 	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
491 	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
492 	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
493 	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
494 	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
495 	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
496 	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
497 	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
498 	.nan_cmdid = WMI_CMD_UNSUPPORTED,
499 	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
500 	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
501 	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
502 	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
503 	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
504 	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
505 	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
506 	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
507 	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
508 	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
509 	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
510 	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
511 	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
512 	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
513 	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
514 	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
515 	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
516 	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
517 	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
518 	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
519 	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
520 	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
521 	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
522 	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
523 	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
524 	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
525 	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
526 	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
527 	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
528 	.pdev_bss_chan_info_request_cmdid =
529 		WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
530 	.pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
531 	.radar_found_cmdid = WMI_CMD_UNSUPPORTED,
532 	.set_bb_timing_cmdid = WMI_10_2_PDEV_SET_BB_TIMING_CONFIG_CMDID,
533 };
534 
535 /* 10.4 WMI cmd track */
536 static struct wmi_cmd_map wmi_10_4_cmd_map = {
537 	.init_cmdid = WMI_10_4_INIT_CMDID,
538 	.start_scan_cmdid = WMI_10_4_START_SCAN_CMDID,
539 	.stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID,
540 	.scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID,
541 	.scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
542 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
543 	.pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
544 	.pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID,
545 	.pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID,
546 	.pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
547 	.pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
548 	.pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
549 	.pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
550 	.pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
551 	.pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
552 	.pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
553 	.pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
554 	.pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
555 	.pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
556 	.vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID,
557 	.vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID,
558 	.vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID,
559 	.vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
560 	.vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID,
561 	.vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID,
562 	.vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID,
563 	.vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID,
564 	.vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID,
565 	.peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID,
566 	.peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID,
567 	.peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID,
568 	.peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID,
569 	.peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID,
570 	.peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
571 	.peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
572 	.peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID,
573 	.bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID,
574 	.pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID,
575 	.bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID,
576 	.bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID,
577 	.prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
578 	.mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID,
579 	.prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID,
580 	.addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
581 	.addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID,
582 	.addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID,
583 	.delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID,
584 	.addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID,
585 	.send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID,
586 	.sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID,
587 	.sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
588 	.sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID,
589 	.pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID,
590 	.pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID,
591 	.roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE,
592 	.roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
593 	.roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD,
594 	.roam_scan_rssi_change_threshold =
595 				WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
596 	.roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE,
597 	.ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
598 	.ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
599 	.ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD,
600 	.p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
601 	.p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
602 	.p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE,
603 	.p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
604 	.p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
605 	.ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID,
606 	.ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
607 	.peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
608 	.wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
609 	.wlan_profile_set_hist_intvl_cmdid =
610 				WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
611 	.wlan_profile_get_profile_data_cmdid =
612 				WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
613 	.wlan_profile_enable_profile_id_cmdid =
614 				WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
615 	.wlan_profile_list_profile_id_cmdid =
616 				WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
617 	.pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID,
618 	.pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID,
619 	.add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID,
620 	.rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID,
621 	.wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
622 	.wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
623 	.wow_enable_disable_wake_event_cmdid =
624 				WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
625 	.wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID,
626 	.wow_hostwakeup_from_sleep_cmdid =
627 				WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
628 	.rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID,
629 	.rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID,
630 	.vdev_spectral_scan_configure_cmdid =
631 				WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
632 	.vdev_spectral_scan_enable_cmdid =
633 				WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
634 	.request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID,
635 	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
636 	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
637 	.gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID,
638 	.csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
639 	.csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
640 	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
641 	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
642 	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
643 	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
644 	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
645 	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
646 	.echo_cmdid = WMI_10_4_ECHO_CMDID,
647 	.pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID,
648 	.dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID,
649 	.pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID,
650 	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
651 	.vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
652 	.vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
653 	.force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID,
654 	.gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID,
655 	.gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID,
656 	.pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
657 	.vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED,
658 	.adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED,
659 	.scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
660 	.vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
661 	.vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
662 	.wlan_peer_caching_add_peer_cmdid =
663 			WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
664 	.wlan_peer_caching_evict_peer_cmdid =
665 			WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
666 	.wlan_peer_caching_restore_peer_cmdid =
667 			WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
668 	.wlan_peer_caching_print_all_peers_info_cmdid =
669 			WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
670 	.peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
671 	.peer_add_proxy_sta_entry_cmdid =
672 			WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
673 	.rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID,
674 	.oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID,
675 	.nan_cmdid = WMI_10_4_NAN_CMDID,
676 	.vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID,
677 	.qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID,
678 	.pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
679 	.pdev_smart_ant_set_rx_antenna_cmdid =
680 			WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
681 	.peer_smart_ant_set_tx_antenna_cmdid =
682 			WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
683 	.peer_smart_ant_set_train_info_cmdid =
684 			WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
685 	.peer_smart_ant_set_node_config_ops_cmdid =
686 			WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
687 	.pdev_set_antenna_switch_table_cmdid =
688 			WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
689 	.pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
690 	.pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
691 	.pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
692 	.pdev_ratepwr_chainmsk_table_cmdid =
693 			WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
694 	.pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID,
695 	.tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID,
696 	.fwtest_cmdid = WMI_10_4_FWTEST_CMDID,
697 	.vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID,
698 	.peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID,
699 	.pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
700 	.pdev_get_ani_ofdm_config_cmdid =
701 			WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
702 	.pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
703 	.pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
704 	.pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID,
705 	.pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID,
706 	.vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
707 	.pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID,
708 	.vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID,
709 	.vdev_filter_neighbor_rx_packets_cmdid =
710 			WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
711 	.mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID,
712 	.set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
713 	.pdev_bss_chan_info_request_cmdid =
714 			WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
715 	.ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID,
716 	.vdev_set_ie_cmdid = WMI_10_4_VDEV_SET_IE_CMDID,
717 	.set_lteu_config_cmdid = WMI_10_4_SET_LTEU_CONFIG_CMDID,
718 	.atf_ssid_grouping_request_cmdid =
719 			WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID,
720 	.peer_atf_ext_request_cmdid = WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID,
721 	.set_periodic_channel_stats_cfg_cmdid =
722 			WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG,
723 	.peer_bwf_request_cmdid = WMI_10_4_PEER_BWF_REQUEST_CMDID,
724 	.btcoex_cfg_cmdid = WMI_10_4_BTCOEX_CFG_CMDID,
725 	.peer_tx_mu_txmit_count_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID,
726 	.peer_tx_mu_txmit_rstcnt_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID,
727 	.peer_gid_userpos_list_cmdid = WMI_10_4_PEER_GID_USERPOS_LIST_CMDID,
728 	.pdev_check_cal_version_cmdid = WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID,
729 	.coex_version_cfg_cmid = WMI_10_4_COEX_VERSION_CFG_CMID,
730 	.pdev_get_rx_filter_cmdid = WMI_10_4_PDEV_GET_RX_FILTER_CMDID,
731 	.pdev_extended_nss_cfg_cmdid = WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID,
732 	.vdev_set_scan_nac_rssi_cmdid = WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID,
733 	.prog_gpio_band_select_cmdid = WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID,
734 	.config_smart_logging_cmdid = WMI_10_4_CONFIG_SMART_LOGGING_CMDID,
735 	.debug_fatal_condition_cmdid = WMI_10_4_DEBUG_FATAL_CONDITION_CMDID,
736 	.get_tsf_timer_cmdid = WMI_10_4_GET_TSF_TIMER_CMDID,
737 	.pdev_get_tpc_table_cmdid = WMI_10_4_PDEV_GET_TPC_TABLE_CMDID,
738 	.vdev_sifs_trigger_time_cmdid = WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID,
739 	.pdev_wds_entry_list_cmdid = WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID,
740 	.tdls_set_state_cmdid = WMI_10_4_TDLS_SET_STATE_CMDID,
741 	.tdls_peer_update_cmdid = WMI_10_4_TDLS_PEER_UPDATE_CMDID,
742 	.tdls_set_offchan_mode_cmdid = WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID,
743 	.radar_found_cmdid = WMI_10_4_RADAR_FOUND_CMDID,
744 	.per_peer_per_tid_config_cmdid = WMI_10_4_PER_PEER_PER_TID_CONFIG_CMDID,
745 };
746 
747 static struct wmi_peer_param_map wmi_peer_param_map = {
748 	.smps_state = WMI_PEER_SMPS_STATE,
749 	.ampdu = WMI_PEER_AMPDU,
750 	.authorize = WMI_PEER_AUTHORIZE,
751 	.chan_width = WMI_PEER_CHAN_WIDTH,
752 	.nss = WMI_PEER_NSS,
753 	.use_4addr = WMI_PEER_USE_4ADDR,
754 	.use_fixed_power = WMI_PEER_USE_FIXED_PWR,
755 	.debug = WMI_PEER_DEBUG,
756 	.phymode = WMI_PEER_PHYMODE,
757 	.dummy_var = WMI_PEER_DUMMY_VAR,
758 };
759 
760 /* MAIN WMI VDEV param map */
761 static struct wmi_vdev_param_map wmi_vdev_param_map = {
762 	.rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
763 	.fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
764 	.beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
765 	.listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
766 	.multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
767 	.mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
768 	.slot_time = WMI_VDEV_PARAM_SLOT_TIME,
769 	.preamble = WMI_VDEV_PARAM_PREAMBLE,
770 	.swba_time = WMI_VDEV_PARAM_SWBA_TIME,
771 	.wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
772 	.wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
773 	.wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
774 	.dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
775 	.wmi_vdev_oc_scheduler_air_time_limit =
776 					WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
777 	.wds = WMI_VDEV_PARAM_WDS,
778 	.atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
779 	.bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
780 	.bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
781 	.bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
782 	.feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
783 	.chwidth = WMI_VDEV_PARAM_CHWIDTH,
784 	.chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
785 	.disable_htprotection =	WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
786 	.sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
787 	.mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
788 	.protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
789 	.fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
790 	.sgi = WMI_VDEV_PARAM_SGI,
791 	.ldpc = WMI_VDEV_PARAM_LDPC,
792 	.tx_stbc = WMI_VDEV_PARAM_TX_STBC,
793 	.rx_stbc = WMI_VDEV_PARAM_RX_STBC,
794 	.intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
795 	.def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
796 	.nss = WMI_VDEV_PARAM_NSS,
797 	.bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
798 	.mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
799 	.mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
800 	.dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
801 	.unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
802 	.ap_keepalive_min_idle_inactive_time_secs =
803 			WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
804 	.ap_keepalive_max_idle_inactive_time_secs =
805 			WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
806 	.ap_keepalive_max_unresponsive_time_secs =
807 			WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
808 	.ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
809 	.mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
810 	.enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
811 	.txbf = WMI_VDEV_PARAM_TXBF,
812 	.packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
813 	.drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
814 	.tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
815 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
816 					WMI_VDEV_PARAM_UNSUPPORTED,
817 	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
818 	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
819 	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
820 	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
821 	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
822 	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
823 	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
824 	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
825 	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
826 	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
827 	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
828 	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
829 	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
830 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
831 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
832 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
833 	.disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
834 	.rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
835 };
836 
837 /* 10.X WMI VDEV param map */
838 static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
839 	.rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
840 	.fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
841 	.beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
842 	.listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
843 	.multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
844 	.mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
845 	.slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
846 	.preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
847 	.swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
848 	.wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
849 	.wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
850 	.wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
851 	.dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
852 	.wmi_vdev_oc_scheduler_air_time_limit =
853 				WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
854 	.wds = WMI_10X_VDEV_PARAM_WDS,
855 	.atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
856 	.bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
857 	.bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
858 	.bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
859 	.feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
860 	.chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
861 	.chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
862 	.disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
863 	.sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
864 	.mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
865 	.protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
866 	.fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
867 	.sgi = WMI_10X_VDEV_PARAM_SGI,
868 	.ldpc = WMI_10X_VDEV_PARAM_LDPC,
869 	.tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
870 	.rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
871 	.intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
872 	.def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
873 	.nss = WMI_10X_VDEV_PARAM_NSS,
874 	.bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
875 	.mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
876 	.mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
877 	.dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
878 	.unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
879 	.ap_keepalive_min_idle_inactive_time_secs =
880 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
881 	.ap_keepalive_max_idle_inactive_time_secs =
882 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
883 	.ap_keepalive_max_unresponsive_time_secs =
884 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
885 	.ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
886 	.mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
887 	.enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
888 	.txbf = WMI_VDEV_PARAM_UNSUPPORTED,
889 	.packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
890 	.drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
891 	.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
892 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
893 		WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
894 	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
895 	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
896 	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
897 	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
898 	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
899 	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
900 	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
901 	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
902 	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
903 	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
904 	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
905 	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
906 	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
907 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
908 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
909 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
910 	.disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
911 	.rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
912 };
913 
914 static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
915 	.rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
916 	.fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
917 	.beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
918 	.listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
919 	.multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
920 	.mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
921 	.slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
922 	.preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
923 	.swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
924 	.wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
925 	.wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
926 	.wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
927 	.dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
928 	.wmi_vdev_oc_scheduler_air_time_limit =
929 				WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
930 	.wds = WMI_10X_VDEV_PARAM_WDS,
931 	.atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
932 	.bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
933 	.bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
934 	.bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
935 	.feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
936 	.chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
937 	.chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
938 	.disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
939 	.sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
940 	.mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
941 	.protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
942 	.fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
943 	.sgi = WMI_10X_VDEV_PARAM_SGI,
944 	.ldpc = WMI_10X_VDEV_PARAM_LDPC,
945 	.tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
946 	.rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
947 	.intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
948 	.def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
949 	.nss = WMI_10X_VDEV_PARAM_NSS,
950 	.bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
951 	.mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
952 	.mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
953 	.dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
954 	.unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
955 	.ap_keepalive_min_idle_inactive_time_secs =
956 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
957 	.ap_keepalive_max_idle_inactive_time_secs =
958 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
959 	.ap_keepalive_max_unresponsive_time_secs =
960 		WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
961 	.ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
962 	.mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
963 	.enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
964 	.txbf = WMI_VDEV_PARAM_UNSUPPORTED,
965 	.packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
966 	.drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
967 	.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
968 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
969 		WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
970 	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
971 	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
972 	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
973 	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
974 	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
975 	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
976 	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
977 	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
978 	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
979 	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
980 	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
981 	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
982 	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
983 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
984 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
985 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
986 	.disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
987 	.rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
988 };
989 
990 static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
991 	.rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD,
992 	.fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
993 	.beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
994 	.listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
995 	.multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
996 	.mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
997 	.slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME,
998 	.preamble = WMI_10_4_VDEV_PARAM_PREAMBLE,
999 	.swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME,
1000 	.wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
1001 	.wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
1002 	.wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
1003 	.dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
1004 	.wmi_vdev_oc_scheduler_air_time_limit =
1005 	       WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
1006 	.wds = WMI_10_4_VDEV_PARAM_WDS,
1007 	.atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
1008 	.bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
1009 	.bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
1010 	.bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
1011 	.feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM,
1012 	.chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH,
1013 	.chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
1014 	.disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
1015 	.sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
1016 	.mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE,
1017 	.protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
1018 	.fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE,
1019 	.sgi = WMI_10_4_VDEV_PARAM_SGI,
1020 	.ldpc = WMI_10_4_VDEV_PARAM_LDPC,
1021 	.tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC,
1022 	.rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC,
1023 	.intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
1024 	.def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID,
1025 	.nss = WMI_10_4_VDEV_PARAM_NSS,
1026 	.bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
1027 	.mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
1028 	.mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
1029 	.dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
1030 	.unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
1031 	.ap_keepalive_min_idle_inactive_time_secs =
1032 	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
1033 	.ap_keepalive_max_idle_inactive_time_secs =
1034 	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
1035 	.ap_keepalive_max_unresponsive_time_secs =
1036 	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
1037 	.ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
1038 	.mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
1039 	.enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
1040 	.txbf = WMI_10_4_VDEV_PARAM_TXBF,
1041 	.packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
1042 	.drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
1043 	.tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
1044 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
1045 	       WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
1046 	.rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
1047 	.cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
1048 	.mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET,
1049 	.rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
1050 	.vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
1051 	.vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
1052 	.early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
1053 	.early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
1054 	.early_rx_bmiss_sample_cycle =
1055 	       WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
1056 	.early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
1057 	.early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
1058 	.early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
1059 	.proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA,
1060 	.meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
1061 	.rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
1062 	.bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
1063 	.inc_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
1064 	.dec_tsf = WMI_10_4_VDEV_PARAM_TSF_DECREMENT,
1065 	.disable_4addr_src_lrn = WMI_10_4_VDEV_PARAM_DISABLE_4_ADDR_SRC_LRN,
1066 	.rtt_responder_role = WMI_10_4_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE,
1067 };
1068 
1069 static struct wmi_pdev_param_map wmi_pdev_param_map = {
1070 	.tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
1071 	.rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
1072 	.txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
1073 	.txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
1074 	.txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
1075 	.beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
1076 	.beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
1077 	.resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1078 	.protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
1079 	.dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
1080 	.non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1081 	.agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
1082 	.sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
1083 	.ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1084 	.ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
1085 	.ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
1086 	.ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
1087 	.ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
1088 	.ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
1089 	.ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1090 	.ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1091 	.ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
1092 	.ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1093 	.l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
1094 	.dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
1095 	.pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
1096 	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1097 	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1098 	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
1099 	.pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1100 	.vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1101 	.peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1102 	.bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1103 	.pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
1104 	.arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
1105 	.dcs = WMI_PDEV_PARAM_DCS,
1106 	.ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
1107 	.ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
1108 	.ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
1109 	.ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
1110 	.ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
1111 	.dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
1112 	.proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
1113 	.idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
1114 	.power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
1115 	.fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1116 	.burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
1117 	.burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1118 	.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
1119 	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1120 	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1121 	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1122 	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1123 	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1124 	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1125 	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1126 	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1127 	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1128 	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1129 	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1130 	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1131 	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1132 	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1133 	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1134 	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1135 	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1136 	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1137 	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1138 	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1139 	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1140 	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1141 	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1142 	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1143 	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1144 	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1145 	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1146 	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1147 	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1148 	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1149 	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1150 	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1151 	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1152 	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1153 	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1154 	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1155 	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1156 	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1157 	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1158 	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1159 	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1160 	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1161 	.enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1162 };
1163 
1164 static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
1165 	.tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
1166 	.rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
1167 	.txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
1168 	.txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
1169 	.txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
1170 	.beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
1171 	.beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
1172 	.resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1173 	.protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
1174 	.dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
1175 	.non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1176 	.agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
1177 	.sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
1178 	.ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1179 	.ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
1180 	.ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
1181 	.ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
1182 	.ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
1183 	.ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
1184 	.ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1185 	.ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1186 	.ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
1187 	.ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1188 	.l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
1189 	.dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
1190 	.pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
1191 	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
1192 	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
1193 	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
1194 	.pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1195 	.vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1196 	.peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1197 	.bcnflt_stats_update_period =
1198 				WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1199 	.pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
1200 	.arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
1201 	.dcs = WMI_10X_PDEV_PARAM_DCS,
1202 	.ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
1203 	.ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
1204 	.ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
1205 	.ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
1206 	.ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
1207 	.dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
1208 	.proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
1209 	.idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
1210 	.power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
1211 	.fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
1212 	.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
1213 	.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
1214 	.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
1215 	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1216 	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1217 	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1218 	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1219 	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1220 	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1221 	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1222 	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1223 	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1224 	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1225 	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1226 	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1227 	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1228 	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1229 	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1230 	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1231 	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1232 	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1233 	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1234 	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1235 	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1236 	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1237 	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1238 	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1239 	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1240 	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1241 	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1242 	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1243 	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1244 	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1245 	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1246 	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1247 	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1248 	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1249 	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1250 	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1251 	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1252 	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1253 	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
1254 	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1255 	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1256 	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1257 	.enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1258 };
1259 
1260 static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
1261 	.tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
1262 	.rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
1263 	.txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
1264 	.txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
1265 	.txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
1266 	.beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
1267 	.beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
1268 	.resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1269 	.protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
1270 	.dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
1271 	.non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1272 	.agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
1273 	.sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
1274 	.ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1275 	.ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
1276 	.ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
1277 	.ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
1278 	.ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
1279 	.ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
1280 	.ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1281 	.ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1282 	.ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
1283 	.ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1284 	.l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
1285 	.dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
1286 	.pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
1287 	.pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
1288 	.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
1289 	.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
1290 	.pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1291 	.vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1292 	.peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1293 	.bcnflt_stats_update_period =
1294 				WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1295 	.pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
1296 	.arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
1297 	.dcs = WMI_10X_PDEV_PARAM_DCS,
1298 	.ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
1299 	.ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
1300 	.ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
1301 	.ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
1302 	.ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
1303 	.dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
1304 	.proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
1305 	.idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
1306 	.power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
1307 	.fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
1308 	.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
1309 	.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
1310 	.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
1311 	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
1312 	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1313 	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
1314 	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
1315 	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1316 	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
1317 	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
1318 	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
1319 	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1320 	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
1321 	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1322 	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
1323 	.peer_sta_ps_statechg_enable =
1324 				WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
1325 	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
1326 	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
1327 	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1328 	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1329 	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1330 	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1331 	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1332 	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
1333 	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
1334 	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
1335 	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
1336 	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1337 	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
1338 	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
1339 	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
1340 	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
1341 	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
1342 	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
1343 	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
1344 	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
1345 	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
1346 	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
1347 	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
1348 	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
1349 	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
1350 	.pdev_reset = WMI_10X_PDEV_PARAM_PDEV_RESET,
1351 	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
1352 	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1353 	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
1354 	.enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
1355 };
1356 
1357 /* firmware 10.2 specific mappings */
1358 static struct wmi_cmd_map wmi_10_2_cmd_map = {
1359 	.init_cmdid = WMI_10_2_INIT_CMDID,
1360 	.start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
1361 	.stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
1362 	.scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
1363 	.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
1364 	.scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
1365 	.pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
1366 	.pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
1367 	.pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
1368 	.pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
1369 	.pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
1370 	.pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
1371 	.pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
1372 	.pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
1373 	.pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
1374 	.pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
1375 	.pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
1376 	.pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
1377 	.vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
1378 	.vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
1379 	.vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
1380 	.vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
1381 	.vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
1382 	.vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
1383 	.vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
1384 	.vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
1385 	.vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
1386 	.peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
1387 	.peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
1388 	.peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
1389 	.peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
1390 	.peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
1391 	.peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
1392 	.peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
1393 	.peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
1394 	.bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
1395 	.pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
1396 	.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
1397 	.bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
1398 	.prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
1399 	.mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
1400 	.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
1401 	.addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
1402 	.addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
1403 	.addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
1404 	.delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
1405 	.addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
1406 	.send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
1407 	.sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
1408 	.sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
1409 	.sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
1410 	.pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
1411 	.pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
1412 	.roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
1413 	.roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
1414 	.roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
1415 	.roam_scan_rssi_change_threshold =
1416 				WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
1417 	.roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
1418 	.ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
1419 	.ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
1420 	.ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
1421 	.p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
1422 	.p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
1423 	.p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
1424 	.p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
1425 	.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
1426 	.ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
1427 	.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
1428 	.peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
1429 	.wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
1430 	.wlan_profile_set_hist_intvl_cmdid =
1431 				WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
1432 	.wlan_profile_get_profile_data_cmdid =
1433 				WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
1434 	.wlan_profile_enable_profile_id_cmdid =
1435 				WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
1436 	.wlan_profile_list_profile_id_cmdid =
1437 				WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
1438 	.pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
1439 	.pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
1440 	.add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
1441 	.rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
1442 	.wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
1443 	.wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
1444 	.wow_enable_disable_wake_event_cmdid =
1445 				WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
1446 	.wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
1447 	.wow_hostwakeup_from_sleep_cmdid =
1448 				WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
1449 	.rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
1450 	.rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
1451 	.vdev_spectral_scan_configure_cmdid =
1452 				WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
1453 	.vdev_spectral_scan_enable_cmdid =
1454 				WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
1455 	.request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
1456 	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
1457 	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
1458 	.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
1459 	.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
1460 	.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
1461 	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
1462 	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
1463 	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
1464 	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
1465 	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
1466 	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
1467 	.echo_cmdid = WMI_10_2_ECHO_CMDID,
1468 	.pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
1469 	.dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
1470 	.pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
1471 	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
1472 	.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1473 	.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1474 	.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
1475 	.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
1476 	.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
1477 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
1478 	.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
1479 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
1480 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
1481 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
1482 	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
1483 	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
1484 	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
1485 	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
1486 	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
1487 	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
1488 	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
1489 	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
1490 	.nan_cmdid = WMI_CMD_UNSUPPORTED,
1491 	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
1492 	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
1493 	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
1494 	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
1495 	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
1496 	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
1497 	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
1498 	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
1499 	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
1500 	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
1501 	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
1502 	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
1503 	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
1504 	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
1505 	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
1506 	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
1507 	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
1508 	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
1509 	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
1510 	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
1511 	.pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
1512 	.radar_found_cmdid = WMI_CMD_UNSUPPORTED,
1513 };
1514 
1515 static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
1516 	.tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK,
1517 	.rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
1518 	.txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
1519 	.txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
1520 	.txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
1521 	.beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
1522 	.beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
1523 	.resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
1524 	.protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
1525 	.dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
1526 	.non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
1527 	.agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
1528 	.sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
1529 	.ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
1530 	.ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE,
1531 	.ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
1532 	.ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
1533 	.ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
1534 	.ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
1535 	.ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
1536 	.ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
1537 	.ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
1538 	.ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
1539 	.l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
1540 	.dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
1541 	.pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
1542 	.pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
1543 	.pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
1544 	.pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
1545 	.pdev_stats_update_period =
1546 			WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
1547 	.vdev_stats_update_period =
1548 			WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
1549 	.peer_stats_update_period =
1550 			WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
1551 	.bcnflt_stats_update_period =
1552 			WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
1553 	.pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS,
1554 	.arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
1555 	.dcs = WMI_10_4_PDEV_PARAM_DCS,
1556 	.ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE,
1557 	.ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
1558 	.ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
1559 	.ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
1560 	.ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
1561 	.dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
1562 	.proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA,
1563 	.idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
1564 	.power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
1565 	.fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
1566 	.burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR,
1567 	.burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE,
1568 	.cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD,
1569 	.aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST,
1570 	.rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
1571 	.smart_antenna_default_antenna =
1572 			WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
1573 	.igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
1574 	.igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
1575 	.antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
1576 	.rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER,
1577 	.set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
1578 	.proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
1579 	.set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
1580 	.set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
1581 	.remove_mcast2ucast_buffer =
1582 			WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
1583 	.peer_sta_ps_statechg_enable =
1584 			WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
1585 	.igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
1586 	.block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
1587 	.set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
1588 	.set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
1589 	.set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
1590 	.txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
1591 	.set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
1592 	.set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
1593 	.en_stats = WMI_10_4_PDEV_PARAM_EN_STATS,
1594 	.mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
1595 	.noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
1596 	.noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
1597 	.dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE,
1598 	.set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
1599 	.atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
1600 	.atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
1601 	.ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN,
1602 	.mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
1603 	.sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
1604 	.signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
1605 	.signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
1606 	.enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
1607 	.enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
1608 	.cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
1609 	.rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
1610 	.pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET,
1611 	.wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
1612 	.arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
1613 	.arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
1614 	.enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
1615 };
1616 
1617 static const u8 wmi_key_cipher_suites[] = {
1618 	[WMI_CIPHER_NONE] = WMI_CIPHER_NONE,
1619 	[WMI_CIPHER_WEP] = WMI_CIPHER_WEP,
1620 	[WMI_CIPHER_TKIP] = WMI_CIPHER_TKIP,
1621 	[WMI_CIPHER_AES_OCB] = WMI_CIPHER_AES_OCB,
1622 	[WMI_CIPHER_AES_CCM] = WMI_CIPHER_AES_CCM,
1623 	[WMI_CIPHER_WAPI] = WMI_CIPHER_WAPI,
1624 	[WMI_CIPHER_CKIP] = WMI_CIPHER_CKIP,
1625 	[WMI_CIPHER_AES_CMAC] = WMI_CIPHER_AES_CMAC,
1626 	[WMI_CIPHER_AES_GCM] = WMI_CIPHER_AES_GCM,
1627 };
1628 
1629 static const u8 wmi_tlv_key_cipher_suites[] = {
1630 	[WMI_CIPHER_NONE] = WMI_TLV_CIPHER_NONE,
1631 	[WMI_CIPHER_WEP] = WMI_TLV_CIPHER_WEP,
1632 	[WMI_CIPHER_TKIP] = WMI_TLV_CIPHER_TKIP,
1633 	[WMI_CIPHER_AES_OCB] = WMI_TLV_CIPHER_AES_OCB,
1634 	[WMI_CIPHER_AES_CCM] = WMI_TLV_CIPHER_AES_CCM,
1635 	[WMI_CIPHER_WAPI] = WMI_TLV_CIPHER_WAPI,
1636 	[WMI_CIPHER_CKIP] = WMI_TLV_CIPHER_CKIP,
1637 	[WMI_CIPHER_AES_CMAC] = WMI_TLV_CIPHER_AES_CMAC,
1638 	[WMI_CIPHER_AES_GCM] = WMI_TLV_CIPHER_AES_GCM,
1639 };
1640 
1641 static const struct wmi_peer_flags_map wmi_peer_flags_map = {
1642 	.auth = WMI_PEER_AUTH,
1643 	.qos = WMI_PEER_QOS,
1644 	.need_ptk_4_way = WMI_PEER_NEED_PTK_4_WAY,
1645 	.need_gtk_2_way = WMI_PEER_NEED_GTK_2_WAY,
1646 	.apsd = WMI_PEER_APSD,
1647 	.ht = WMI_PEER_HT,
1648 	.bw40 = WMI_PEER_40MHZ,
1649 	.stbc = WMI_PEER_STBC,
1650 	.ldbc = WMI_PEER_LDPC,
1651 	.dyn_mimops = WMI_PEER_DYN_MIMOPS,
1652 	.static_mimops = WMI_PEER_STATIC_MIMOPS,
1653 	.spatial_mux = WMI_PEER_SPATIAL_MUX,
1654 	.vht = WMI_PEER_VHT,
1655 	.bw80 = WMI_PEER_80MHZ,
1656 	.vht_2g = WMI_PEER_VHT_2G,
1657 	.pmf = WMI_PEER_PMF,
1658 	.bw160 = WMI_PEER_160MHZ,
1659 };
1660 
1661 static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = {
1662 	.auth = WMI_10X_PEER_AUTH,
1663 	.qos = WMI_10X_PEER_QOS,
1664 	.need_ptk_4_way = WMI_10X_PEER_NEED_PTK_4_WAY,
1665 	.need_gtk_2_way = WMI_10X_PEER_NEED_GTK_2_WAY,
1666 	.apsd = WMI_10X_PEER_APSD,
1667 	.ht = WMI_10X_PEER_HT,
1668 	.bw40 = WMI_10X_PEER_40MHZ,
1669 	.stbc = WMI_10X_PEER_STBC,
1670 	.ldbc = WMI_10X_PEER_LDPC,
1671 	.dyn_mimops = WMI_10X_PEER_DYN_MIMOPS,
1672 	.static_mimops = WMI_10X_PEER_STATIC_MIMOPS,
1673 	.spatial_mux = WMI_10X_PEER_SPATIAL_MUX,
1674 	.vht = WMI_10X_PEER_VHT,
1675 	.bw80 = WMI_10X_PEER_80MHZ,
1676 	.bw160 = WMI_10X_PEER_160MHZ,
1677 };
1678 
1679 static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = {
1680 	.auth = WMI_10_2_PEER_AUTH,
1681 	.qos = WMI_10_2_PEER_QOS,
1682 	.need_ptk_4_way = WMI_10_2_PEER_NEED_PTK_4_WAY,
1683 	.need_gtk_2_way = WMI_10_2_PEER_NEED_GTK_2_WAY,
1684 	.apsd = WMI_10_2_PEER_APSD,
1685 	.ht = WMI_10_2_PEER_HT,
1686 	.bw40 = WMI_10_2_PEER_40MHZ,
1687 	.stbc = WMI_10_2_PEER_STBC,
1688 	.ldbc = WMI_10_2_PEER_LDPC,
1689 	.dyn_mimops = WMI_10_2_PEER_DYN_MIMOPS,
1690 	.static_mimops = WMI_10_2_PEER_STATIC_MIMOPS,
1691 	.spatial_mux = WMI_10_2_PEER_SPATIAL_MUX,
1692 	.vht = WMI_10_2_PEER_VHT,
1693 	.bw80 = WMI_10_2_PEER_80MHZ,
1694 	.vht_2g = WMI_10_2_PEER_VHT_2G,
1695 	.pmf = WMI_10_2_PEER_PMF,
1696 	.bw160 = WMI_10_2_PEER_160MHZ,
1697 };
1698 
ath10k_wmi_put_wmi_channel(struct ath10k * ar,struct wmi_channel * ch,const struct wmi_channel_arg * arg)1699 void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
1700 				const struct wmi_channel_arg *arg)
1701 {
1702 	u32 flags = 0;
1703 	struct ieee80211_channel *chan = NULL;
1704 
1705 	memset(ch, 0, sizeof(*ch));
1706 
1707 	if (arg->passive)
1708 		flags |= WMI_CHAN_FLAG_PASSIVE;
1709 	if (arg->allow_ibss)
1710 		flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
1711 	if (arg->allow_ht)
1712 		flags |= WMI_CHAN_FLAG_ALLOW_HT;
1713 	if (arg->allow_vht)
1714 		flags |= WMI_CHAN_FLAG_ALLOW_VHT;
1715 	if (arg->ht40plus)
1716 		flags |= WMI_CHAN_FLAG_HT40_PLUS;
1717 	if (arg->chan_radar)
1718 		flags |= WMI_CHAN_FLAG_DFS;
1719 
1720 	ch->band_center_freq2 = 0;
1721 	ch->mhz = __cpu_to_le32(arg->freq);
1722 	ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
1723 	if (arg->mode == MODE_11AC_VHT80_80) {
1724 		ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq2);
1725 		chan = ieee80211_get_channel(ar->hw->wiphy,
1726 					     arg->band_center_freq2 - 10);
1727 	}
1728 
1729 	if (arg->mode == MODE_11AC_VHT160) {
1730 		u32 band_center_freq1;
1731 		u32 band_center_freq2;
1732 
1733 		if (arg->freq > arg->band_center_freq1) {
1734 			band_center_freq1 = arg->band_center_freq1 + 40;
1735 			band_center_freq2 = arg->band_center_freq1 - 40;
1736 		} else {
1737 			band_center_freq1 = arg->band_center_freq1 - 40;
1738 			band_center_freq2 = arg->band_center_freq1 + 40;
1739 		}
1740 
1741 		ch->band_center_freq1 =
1742 					__cpu_to_le32(band_center_freq1);
1743 		/* Minus 10 to get a defined 5G channel frequency*/
1744 		chan = ieee80211_get_channel(ar->hw->wiphy,
1745 					     band_center_freq2 - 10);
1746 		/* The center frequency of the entire VHT160 */
1747 		ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq1);
1748 	}
1749 
1750 	if (chan && chan->flags & IEEE80211_CHAN_RADAR)
1751 		flags |= WMI_CHAN_FLAG_DFS_CFREQ2;
1752 
1753 	ch->min_power = arg->min_power;
1754 	ch->max_power = arg->max_power;
1755 	ch->reg_power = arg->max_reg_power;
1756 	ch->antenna_max = arg->max_antenna_gain;
1757 	ch->max_tx_power = arg->max_power;
1758 
1759 	/* mode & flags share storage */
1760 	ch->mode = arg->mode;
1761 	ch->flags |= __cpu_to_le32(flags);
1762 }
1763 
ath10k_wmi_wait_for_service_ready(struct ath10k * ar)1764 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
1765 {
1766 	unsigned long time_left, i;
1767 
1768 	time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
1769 						WMI_SERVICE_READY_TIMEOUT_HZ);
1770 	if (!time_left) {
1771 		/* Sometimes the PCI HIF doesn't receive interrupt
1772 		 * for the service ready message even if the buffer
1773 		 * was completed. PCIe sniffer shows that it's
1774 		 * because the corresponding CE ring doesn't fires
1775 		 * it. Workaround here by polling CE rings once.
1776 		 */
1777 		ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
1778 
1779 		for (i = 0; i < CE_COUNT; i++)
1780 			ath10k_hif_send_complete_check(ar, i, 1);
1781 
1782 		time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
1783 							WMI_SERVICE_READY_TIMEOUT_HZ);
1784 		if (!time_left) {
1785 			ath10k_warn(ar, "polling timed out\n");
1786 			return -ETIMEDOUT;
1787 		}
1788 
1789 		ath10k_warn(ar, "service ready completion received, continuing normally\n");
1790 	}
1791 
1792 	return 0;
1793 }
1794 
ath10k_wmi_wait_for_unified_ready(struct ath10k * ar)1795 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
1796 {
1797 	unsigned long time_left;
1798 
1799 	time_left = wait_for_completion_timeout(&ar->wmi.unified_ready,
1800 						WMI_UNIFIED_READY_TIMEOUT_HZ);
1801 	if (!time_left)
1802 		return -ETIMEDOUT;
1803 	return 0;
1804 }
1805 
ath10k_wmi_alloc_skb(struct ath10k * ar,u32 len)1806 struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
1807 {
1808 	struct sk_buff *skb;
1809 	u32 round_len = roundup(len, 4);
1810 
1811 	skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
1812 	if (!skb)
1813 		return NULL;
1814 
1815 	skb_reserve(skb, WMI_SKB_HEADROOM);
1816 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
1817 		ath10k_warn(ar, "Unaligned WMI skb\n");
1818 
1819 	skb_put(skb, round_len);
1820 	memset(skb->data, 0, round_len);
1821 
1822 	return skb;
1823 }
1824 
ath10k_wmi_htc_tx_complete(struct ath10k * ar,struct sk_buff * skb)1825 static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
1826 {
1827 	dev_kfree_skb(skb);
1828 }
1829 
ath10k_wmi_cmd_send_nowait(struct ath10k * ar,struct sk_buff * skb,u32 cmd_id)1830 int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
1831 			       u32 cmd_id)
1832 {
1833 	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
1834 	struct wmi_cmd_hdr *cmd_hdr;
1835 	int ret;
1836 	u32 cmd = 0;
1837 
1838 	if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
1839 		return -ENOMEM;
1840 
1841 	cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
1842 
1843 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
1844 	cmd_hdr->cmd_id = __cpu_to_le32(cmd);
1845 
1846 	memset(skb_cb, 0, sizeof(*skb_cb));
1847 	trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
1848 	ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
1849 
1850 	if (ret)
1851 		goto err_pull;
1852 
1853 	return 0;
1854 
1855 err_pull:
1856 	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
1857 	return ret;
1858 }
1859 
ath10k_wmi_tx_beacon_nowait(struct ath10k_vif * arvif)1860 static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
1861 {
1862 	struct ath10k *ar = arvif->ar;
1863 	struct ath10k_skb_cb *cb;
1864 	struct sk_buff *bcn;
1865 	bool dtim_zero;
1866 	bool deliver_cab;
1867 	int ret;
1868 
1869 	spin_lock_bh(&ar->data_lock);
1870 
1871 	bcn = arvif->beacon;
1872 
1873 	if (!bcn)
1874 		goto unlock;
1875 
1876 	cb = ATH10K_SKB_CB(bcn);
1877 
1878 	switch (arvif->beacon_state) {
1879 	case ATH10K_BEACON_SENDING:
1880 	case ATH10K_BEACON_SENT:
1881 		break;
1882 	case ATH10K_BEACON_SCHEDULED:
1883 		arvif->beacon_state = ATH10K_BEACON_SENDING;
1884 		spin_unlock_bh(&ar->data_lock);
1885 
1886 		dtim_zero = !!(cb->flags & ATH10K_SKB_F_DTIM_ZERO);
1887 		deliver_cab = !!(cb->flags & ATH10K_SKB_F_DELIVER_CAB);
1888 		ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
1889 							arvif->vdev_id,
1890 							bcn->data, bcn->len,
1891 							cb->paddr,
1892 							dtim_zero,
1893 							deliver_cab);
1894 
1895 		spin_lock_bh(&ar->data_lock);
1896 
1897 		if (ret == 0)
1898 			arvif->beacon_state = ATH10K_BEACON_SENT;
1899 		else
1900 			arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
1901 	}
1902 
1903 unlock:
1904 	spin_unlock_bh(&ar->data_lock);
1905 }
1906 
ath10k_wmi_tx_beacons_iter(void * data,u8 * mac,struct ieee80211_vif * vif)1907 static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
1908 				       struct ieee80211_vif *vif)
1909 {
1910 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
1911 
1912 	ath10k_wmi_tx_beacon_nowait(arvif);
1913 }
1914 
ath10k_wmi_tx_beacons_nowait(struct ath10k * ar)1915 static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
1916 {
1917 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
1918 						   ATH10K_ITER_NORMAL_FLAGS,
1919 						   ath10k_wmi_tx_beacons_iter,
1920 						   NULL);
1921 }
1922 
ath10k_wmi_op_ep_tx_credits(struct ath10k * ar)1923 static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
1924 {
1925 	/* try to send pending beacons first. they take priority */
1926 	ath10k_wmi_tx_beacons_nowait(ar);
1927 
1928 	wake_up(&ar->wmi.tx_credits_wq);
1929 }
1930 
ath10k_wmi_cmd_send(struct ath10k * ar,struct sk_buff * skb,u32 cmd_id)1931 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
1932 {
1933 	int ret = -EOPNOTSUPP;
1934 
1935 	might_sleep();
1936 
1937 	if (cmd_id == WMI_CMD_UNSUPPORTED) {
1938 		ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
1939 			    cmd_id);
1940 		return ret;
1941 	}
1942 
1943 	wait_event_timeout(ar->wmi.tx_credits_wq, ({
1944 		/* try to send pending beacons first. they take priority */
1945 		ath10k_wmi_tx_beacons_nowait(ar);
1946 
1947 		ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
1948 
1949 		if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
1950 			ret = -ESHUTDOWN;
1951 
1952 		(ret != -EAGAIN);
1953 	}), 3 * HZ);
1954 
1955 	if (ret)
1956 		dev_kfree_skb_any(skb);
1957 
1958 	if (ret == -EAGAIN) {
1959 		ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n",
1960 			    cmd_id);
1961 		ath10k_core_start_recovery(ar);
1962 	}
1963 
1964 	return ret;
1965 }
1966 
1967 static struct sk_buff *
ath10k_wmi_op_gen_mgmt_tx(struct ath10k * ar,struct sk_buff * msdu)1968 ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
1969 {
1970 	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
1971 	struct ath10k_vif *arvif;
1972 	struct wmi_mgmt_tx_cmd *cmd;
1973 	struct ieee80211_hdr *hdr;
1974 	struct sk_buff *skb;
1975 	int len;
1976 	u32 vdev_id;
1977 	u32 buf_len = msdu->len;
1978 	u16 fc;
1979 	const u8 *peer_addr;
1980 
1981 	hdr = (struct ieee80211_hdr *)msdu->data;
1982 	fc = le16_to_cpu(hdr->frame_control);
1983 
1984 	if (cb->vif) {
1985 		arvif = (void *)cb->vif->drv_priv;
1986 		vdev_id = arvif->vdev_id;
1987 	} else {
1988 		vdev_id = 0;
1989 	}
1990 
1991 	if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
1992 		return ERR_PTR(-EINVAL);
1993 
1994 	len = sizeof(cmd->hdr) + msdu->len;
1995 
1996 	if ((ieee80211_is_action(hdr->frame_control) ||
1997 	     ieee80211_is_deauth(hdr->frame_control) ||
1998 	     ieee80211_is_disassoc(hdr->frame_control)) &&
1999 	     ieee80211_has_protected(hdr->frame_control)) {
2000 		peer_addr = hdr->addr1;
2001 		if (is_multicast_ether_addr(peer_addr)) {
2002 			len += sizeof(struct ieee80211_mmie_16);
2003 			buf_len += sizeof(struct ieee80211_mmie_16);
2004 		} else {
2005 			if (cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP ||
2006 			    cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256) {
2007 				len += IEEE80211_GCMP_MIC_LEN;
2008 				buf_len += IEEE80211_GCMP_MIC_LEN;
2009 			} else {
2010 				len += IEEE80211_CCMP_MIC_LEN;
2011 				buf_len += IEEE80211_CCMP_MIC_LEN;
2012 			}
2013 		}
2014 	}
2015 
2016 	len = round_up(len, 4);
2017 
2018 	skb = ath10k_wmi_alloc_skb(ar, len);
2019 	if (!skb)
2020 		return ERR_PTR(-ENOMEM);
2021 
2022 	cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
2023 
2024 	cmd->hdr.vdev_id = __cpu_to_le32(vdev_id);
2025 	cmd->hdr.tx_rate = 0;
2026 	cmd->hdr.tx_power = 0;
2027 	cmd->hdr.buf_len = __cpu_to_le32(buf_len);
2028 
2029 	ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
2030 	memcpy(cmd->buf, msdu->data, msdu->len);
2031 
2032 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %pK len %d ftype %02x stype %02x\n",
2033 		   msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
2034 		   fc & IEEE80211_FCTL_STYPE);
2035 	trace_ath10k_tx_hdr(ar, skb->data, skb->len);
2036 	trace_ath10k_tx_payload(ar, skb->data, skb->len);
2037 
2038 	return skb;
2039 }
2040 
ath10k_wmi_event_scan_started(struct ath10k * ar)2041 static void ath10k_wmi_event_scan_started(struct ath10k *ar)
2042 {
2043 	lockdep_assert_held(&ar->data_lock);
2044 
2045 	switch (ar->scan.state) {
2046 	case ATH10K_SCAN_IDLE:
2047 	case ATH10K_SCAN_RUNNING:
2048 	case ATH10K_SCAN_ABORTING:
2049 		ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
2050 			    ath10k_scan_state_str(ar->scan.state),
2051 			    ar->scan.state);
2052 		break;
2053 	case ATH10K_SCAN_STARTING:
2054 		ar->scan.state = ATH10K_SCAN_RUNNING;
2055 
2056 		if (ar->scan.is_roc)
2057 			ieee80211_ready_on_channel(ar->hw);
2058 
2059 		complete(&ar->scan.started);
2060 		break;
2061 	}
2062 }
2063 
ath10k_wmi_event_scan_start_failed(struct ath10k * ar)2064 static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar)
2065 {
2066 	lockdep_assert_held(&ar->data_lock);
2067 
2068 	switch (ar->scan.state) {
2069 	case ATH10K_SCAN_IDLE:
2070 	case ATH10K_SCAN_RUNNING:
2071 	case ATH10K_SCAN_ABORTING:
2072 		ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n",
2073 			    ath10k_scan_state_str(ar->scan.state),
2074 			    ar->scan.state);
2075 		break;
2076 	case ATH10K_SCAN_STARTING:
2077 		complete(&ar->scan.started);
2078 		__ath10k_scan_finish(ar);
2079 		break;
2080 	}
2081 }
2082 
ath10k_wmi_event_scan_completed(struct ath10k * ar)2083 static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
2084 {
2085 	lockdep_assert_held(&ar->data_lock);
2086 
2087 	switch (ar->scan.state) {
2088 	case ATH10K_SCAN_IDLE:
2089 	case ATH10K_SCAN_STARTING:
2090 		/* One suspected reason scan can be completed while starting is
2091 		 * if firmware fails to deliver all scan events to the host,
2092 		 * e.g. when transport pipe is full. This has been observed
2093 		 * with spectral scan phyerr events starving wmi transport
2094 		 * pipe. In such case the "scan completed" event should be (and
2095 		 * is) ignored by the host as it may be just firmware's scan
2096 		 * state machine recovering.
2097 		 */
2098 		ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
2099 			    ath10k_scan_state_str(ar->scan.state),
2100 			    ar->scan.state);
2101 		break;
2102 	case ATH10K_SCAN_RUNNING:
2103 	case ATH10K_SCAN_ABORTING:
2104 		__ath10k_scan_finish(ar);
2105 		break;
2106 	}
2107 }
2108 
ath10k_wmi_event_scan_bss_chan(struct ath10k * ar)2109 static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
2110 {
2111 	lockdep_assert_held(&ar->data_lock);
2112 
2113 	switch (ar->scan.state) {
2114 	case ATH10K_SCAN_IDLE:
2115 	case ATH10K_SCAN_STARTING:
2116 		ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
2117 			    ath10k_scan_state_str(ar->scan.state),
2118 			    ar->scan.state);
2119 		break;
2120 	case ATH10K_SCAN_RUNNING:
2121 	case ATH10K_SCAN_ABORTING:
2122 		ar->scan_channel = NULL;
2123 		break;
2124 	}
2125 }
2126 
ath10k_wmi_event_scan_foreign_chan(struct ath10k * ar,u32 freq)2127 static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
2128 {
2129 	lockdep_assert_held(&ar->data_lock);
2130 
2131 	switch (ar->scan.state) {
2132 	case ATH10K_SCAN_IDLE:
2133 	case ATH10K_SCAN_STARTING:
2134 		ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
2135 			    ath10k_scan_state_str(ar->scan.state),
2136 			    ar->scan.state);
2137 		break;
2138 	case ATH10K_SCAN_RUNNING:
2139 	case ATH10K_SCAN_ABORTING:
2140 		ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
2141 
2142 		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
2143 			complete(&ar->scan.on_channel);
2144 		break;
2145 	}
2146 }
2147 
2148 static const char *
ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,enum wmi_scan_completion_reason reason)2149 ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
2150 			       enum wmi_scan_completion_reason reason)
2151 {
2152 	switch (type) {
2153 	case WMI_SCAN_EVENT_STARTED:
2154 		return "started";
2155 	case WMI_SCAN_EVENT_COMPLETED:
2156 		switch (reason) {
2157 		case WMI_SCAN_REASON_COMPLETED:
2158 			return "completed";
2159 		case WMI_SCAN_REASON_CANCELLED:
2160 			return "completed [cancelled]";
2161 		case WMI_SCAN_REASON_PREEMPTED:
2162 			return "completed [preempted]";
2163 		case WMI_SCAN_REASON_TIMEDOUT:
2164 			return "completed [timedout]";
2165 		case WMI_SCAN_REASON_INTERNAL_FAILURE:
2166 			return "completed [internal err]";
2167 		case WMI_SCAN_REASON_MAX:
2168 			break;
2169 		}
2170 		return "completed [unknown]";
2171 	case WMI_SCAN_EVENT_BSS_CHANNEL:
2172 		return "bss channel";
2173 	case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
2174 		return "foreign channel";
2175 	case WMI_SCAN_EVENT_DEQUEUED:
2176 		return "dequeued";
2177 	case WMI_SCAN_EVENT_PREEMPTED:
2178 		return "preempted";
2179 	case WMI_SCAN_EVENT_START_FAILED:
2180 		return "start failed";
2181 	case WMI_SCAN_EVENT_RESTARTED:
2182 		return "restarted";
2183 	case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
2184 		return "foreign channel exit";
2185 	default:
2186 		return "unknown";
2187 	}
2188 }
2189 
ath10k_wmi_op_pull_scan_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_scan_ev_arg * arg)2190 static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb,
2191 				      struct wmi_scan_ev_arg *arg)
2192 {
2193 	struct wmi_scan_event *ev = (void *)skb->data;
2194 
2195 	if (skb->len < sizeof(*ev))
2196 		return -EPROTO;
2197 
2198 	skb_pull(skb, sizeof(*ev));
2199 	arg->event_type = ev->event_type;
2200 	arg->reason = ev->reason;
2201 	arg->channel_freq = ev->channel_freq;
2202 	arg->scan_req_id = ev->scan_req_id;
2203 	arg->scan_id = ev->scan_id;
2204 	arg->vdev_id = ev->vdev_id;
2205 
2206 	return 0;
2207 }
2208 
ath10k_wmi_event_scan(struct ath10k * ar,struct sk_buff * skb)2209 int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
2210 {
2211 	struct wmi_scan_ev_arg arg = {};
2212 	enum wmi_scan_event_type event_type;
2213 	enum wmi_scan_completion_reason reason;
2214 	u32 freq;
2215 	u32 req_id;
2216 	u32 scan_id;
2217 	u32 vdev_id;
2218 	int ret;
2219 
2220 	ret = ath10k_wmi_pull_scan(ar, skb, &arg);
2221 	if (ret) {
2222 		ath10k_warn(ar, "failed to parse scan event: %d\n", ret);
2223 		return ret;
2224 	}
2225 
2226 	event_type = __le32_to_cpu(arg.event_type);
2227 	reason = __le32_to_cpu(arg.reason);
2228 	freq = __le32_to_cpu(arg.channel_freq);
2229 	req_id = __le32_to_cpu(arg.scan_req_id);
2230 	scan_id = __le32_to_cpu(arg.scan_id);
2231 	vdev_id = __le32_to_cpu(arg.vdev_id);
2232 
2233 	spin_lock_bh(&ar->data_lock);
2234 
2235 	ath10k_dbg(ar, ATH10K_DBG_WMI,
2236 		   "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
2237 		   ath10k_wmi_event_scan_type_str(event_type, reason),
2238 		   event_type, reason, freq, req_id, scan_id, vdev_id,
2239 		   ath10k_scan_state_str(ar->scan.state), ar->scan.state);
2240 
2241 	switch (event_type) {
2242 	case WMI_SCAN_EVENT_STARTED:
2243 		ath10k_wmi_event_scan_started(ar);
2244 		break;
2245 	case WMI_SCAN_EVENT_COMPLETED:
2246 		ath10k_wmi_event_scan_completed(ar);
2247 		break;
2248 	case WMI_SCAN_EVENT_BSS_CHANNEL:
2249 		ath10k_wmi_event_scan_bss_chan(ar);
2250 		break;
2251 	case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
2252 		ath10k_wmi_event_scan_foreign_chan(ar, freq);
2253 		break;
2254 	case WMI_SCAN_EVENT_START_FAILED:
2255 		ath10k_warn(ar, "received scan start failure event\n");
2256 		ath10k_wmi_event_scan_start_failed(ar);
2257 		break;
2258 	case WMI_SCAN_EVENT_DEQUEUED:
2259 	case WMI_SCAN_EVENT_PREEMPTED:
2260 	case WMI_SCAN_EVENT_RESTARTED:
2261 	case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
2262 	default:
2263 		break;
2264 	}
2265 
2266 	spin_unlock_bh(&ar->data_lock);
2267 	return 0;
2268 }
2269 
2270 /* If keys are configured, HW decrypts all frames
2271  * with protected bit set. Mark such frames as decrypted.
2272  */
ath10k_wmi_handle_wep_reauth(struct ath10k * ar,struct sk_buff * skb,struct ieee80211_rx_status * status)2273 static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
2274 					 struct sk_buff *skb,
2275 					 struct ieee80211_rx_status *status)
2276 {
2277 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2278 	unsigned int hdrlen;
2279 	bool peer_key;
2280 	u8 *addr, keyidx;
2281 
2282 	if (!ieee80211_is_auth(hdr->frame_control) ||
2283 	    !ieee80211_has_protected(hdr->frame_control))
2284 		return;
2285 
2286 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
2287 	if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN))
2288 		return;
2289 
2290 	keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT;
2291 	addr = ieee80211_get_SA(hdr);
2292 
2293 	spin_lock_bh(&ar->data_lock);
2294 	peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx);
2295 	spin_unlock_bh(&ar->data_lock);
2296 
2297 	if (peer_key) {
2298 		ath10k_dbg(ar, ATH10K_DBG_MAC,
2299 			   "mac wep key present for peer %pM\n", addr);
2300 		status->flag |= RX_FLAG_DECRYPTED;
2301 	}
2302 }
2303 
ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_mgmt_rx_ev_arg * arg)2304 static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
2305 					 struct wmi_mgmt_rx_ev_arg *arg)
2306 {
2307 	struct wmi_mgmt_rx_event_v1 *ev_v1;
2308 	struct wmi_mgmt_rx_event_v2 *ev_v2;
2309 	struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
2310 	struct wmi_mgmt_rx_ext_info *ext_info;
2311 	size_t pull_len;
2312 	u32 msdu_len;
2313 	u32 len;
2314 
2315 	if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX,
2316 		     ar->running_fw->fw_file.fw_features)) {
2317 		ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
2318 		ev_hdr = &ev_v2->hdr.v1;
2319 		pull_len = sizeof(*ev_v2);
2320 	} else {
2321 		ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
2322 		ev_hdr = &ev_v1->hdr;
2323 		pull_len = sizeof(*ev_v1);
2324 	}
2325 
2326 	if (skb->len < pull_len)
2327 		return -EPROTO;
2328 
2329 	skb_pull(skb, pull_len);
2330 	arg->channel = ev_hdr->channel;
2331 	arg->buf_len = ev_hdr->buf_len;
2332 	arg->status = ev_hdr->status;
2333 	arg->snr = ev_hdr->snr;
2334 	arg->phy_mode = ev_hdr->phy_mode;
2335 	arg->rate = ev_hdr->rate;
2336 
2337 	msdu_len = __le32_to_cpu(arg->buf_len);
2338 	if (skb->len < msdu_len)
2339 		return -EPROTO;
2340 
2341 	if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
2342 		len = ALIGN(le32_to_cpu(arg->buf_len), 4);
2343 		ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
2344 		memcpy(&arg->ext_info, ext_info,
2345 		       sizeof(struct wmi_mgmt_rx_ext_info));
2346 	}
2347 	/* the WMI buffer might've ended up being padded to 4 bytes due to HTC
2348 	 * trailer with credit update. Trim the excess garbage.
2349 	 */
2350 	skb_trim(skb, msdu_len);
2351 
2352 	return 0;
2353 }
2354 
ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_mgmt_rx_ev_arg * arg)2355 static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
2356 					      struct sk_buff *skb,
2357 					      struct wmi_mgmt_rx_ev_arg *arg)
2358 {
2359 	struct wmi_10_4_mgmt_rx_event *ev;
2360 	struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
2361 	size_t pull_len;
2362 	u32 msdu_len;
2363 	struct wmi_mgmt_rx_ext_info *ext_info;
2364 	u32 len;
2365 
2366 	ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
2367 	ev_hdr = &ev->hdr;
2368 	pull_len = sizeof(*ev);
2369 
2370 	if (skb->len < pull_len)
2371 		return -EPROTO;
2372 
2373 	skb_pull(skb, pull_len);
2374 	arg->channel = ev_hdr->channel;
2375 	arg->buf_len = ev_hdr->buf_len;
2376 	arg->status = ev_hdr->status;
2377 	arg->snr = ev_hdr->snr;
2378 	arg->phy_mode = ev_hdr->phy_mode;
2379 	arg->rate = ev_hdr->rate;
2380 
2381 	msdu_len = __le32_to_cpu(arg->buf_len);
2382 	if (skb->len < msdu_len)
2383 		return -EPROTO;
2384 
2385 	if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
2386 		len = ALIGN(le32_to_cpu(arg->buf_len), 4);
2387 		ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
2388 		memcpy(&arg->ext_info, ext_info,
2389 		       sizeof(struct wmi_mgmt_rx_ext_info));
2390 	}
2391 
2392 	/* Make sure bytes added for padding are removed. */
2393 	skb_trim(skb, msdu_len);
2394 
2395 	return 0;
2396 }
2397 
ath10k_wmi_rx_is_decrypted(struct ath10k * ar,struct ieee80211_hdr * hdr)2398 static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
2399 				       struct ieee80211_hdr *hdr)
2400 {
2401 	if (!ieee80211_has_protected(hdr->frame_control))
2402 		return false;
2403 
2404 	/* FW delivers WEP Shared Auth frame with Protected Bit set and
2405 	 * encrypted payload. However in case of PMF it delivers decrypted
2406 	 * frames with Protected Bit set.
2407 	 */
2408 	if (ieee80211_is_auth(hdr->frame_control))
2409 		return false;
2410 
2411 	/* qca99x0 based FW delivers broadcast or multicast management frames
2412 	 * (ex: group privacy action frames in mesh) as encrypted payload.
2413 	 */
2414 	if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) &&
2415 	    ar->hw_params.sw_decrypt_mcast_mgmt)
2416 		return false;
2417 
2418 	return true;
2419 }
2420 
2421 static int
wmi_process_mgmt_tx_comp(struct ath10k * ar,struct mgmt_tx_compl_params * param)2422 wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param)
2423 {
2424 	struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
2425 	struct ath10k_wmi *wmi = &ar->wmi;
2426 	struct ieee80211_tx_info *info;
2427 	struct sk_buff *msdu;
2428 	int ret;
2429 
2430 	spin_lock_bh(&ar->data_lock);
2431 
2432 	pkt_addr = idr_find(&wmi->mgmt_pending_tx, param->desc_id);
2433 	if (!pkt_addr) {
2434 		ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n",
2435 			    param->desc_id);
2436 		ret = -ENOENT;
2437 		goto out;
2438 	}
2439 
2440 	msdu = pkt_addr->vaddr;
2441 	dma_unmap_single(ar->dev, pkt_addr->paddr,
2442 			 msdu->len, DMA_TO_DEVICE);
2443 	info = IEEE80211_SKB_CB(msdu);
2444 	kfree(pkt_addr);
2445 
2446 	if (param->status) {
2447 		info->flags &= ~IEEE80211_TX_STAT_ACK;
2448 	} else {
2449 		info->flags |= IEEE80211_TX_STAT_ACK;
2450 		info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
2451 					  param->ack_rssi;
2452 		info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
2453 	}
2454 
2455 	ieee80211_tx_status_irqsafe(ar->hw, msdu);
2456 
2457 	ret = 0;
2458 
2459 out:
2460 	idr_remove(&wmi->mgmt_pending_tx, param->desc_id);
2461 	spin_unlock_bh(&ar->data_lock);
2462 	return ret;
2463 }
2464 
ath10k_wmi_event_mgmt_tx_compl(struct ath10k * ar,struct sk_buff * skb)2465 int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
2466 {
2467 	struct wmi_tlv_mgmt_tx_compl_ev_arg arg;
2468 	struct mgmt_tx_compl_params param;
2469 	int ret;
2470 
2471 	ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg);
2472 	if (ret) {
2473 		ath10k_warn(ar, "failed to parse mgmt comp event: %d\n", ret);
2474 		return ret;
2475 	}
2476 
2477 	memset(&param, 0, sizeof(struct mgmt_tx_compl_params));
2478 	param.desc_id = __le32_to_cpu(arg.desc_id);
2479 	param.status = __le32_to_cpu(arg.status);
2480 
2481 	if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
2482 		param.ack_rssi = __le32_to_cpu(arg.ack_rssi);
2483 
2484 	wmi_process_mgmt_tx_comp(ar, &param);
2485 
2486 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n");
2487 
2488 	return 0;
2489 }
2490 
ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k * ar,struct sk_buff * skb)2491 int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb)
2492 {
2493 	struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg arg;
2494 	struct mgmt_tx_compl_params param;
2495 	u32 num_reports;
2496 	int i, ret;
2497 
2498 	ret = ath10k_wmi_pull_mgmt_tx_bundle_compl(ar, skb, &arg);
2499 	if (ret) {
2500 		ath10k_warn(ar, "failed to parse bundle mgmt compl event: %d\n", ret);
2501 		return ret;
2502 	}
2503 
2504 	num_reports = __le32_to_cpu(arg.num_reports);
2505 
2506 	for (i = 0; i < num_reports; i++) {
2507 		memset(&param, 0, sizeof(struct mgmt_tx_compl_params));
2508 		param.desc_id = __le32_to_cpu(arg.desc_ids[i]);
2509 		param.status = __le32_to_cpu(arg.desc_ids[i]);
2510 
2511 		if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
2512 			param.ack_rssi = __le32_to_cpu(arg.ack_rssi[i]);
2513 		wmi_process_mgmt_tx_comp(ar, &param);
2514 	}
2515 
2516 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv event bundle mgmt tx completion\n");
2517 
2518 	return 0;
2519 }
2520 
ath10k_wmi_event_mgmt_rx(struct ath10k * ar,struct sk_buff * skb)2521 int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
2522 {
2523 	struct wmi_mgmt_rx_ev_arg arg = {};
2524 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2525 	struct ieee80211_hdr *hdr;
2526 	struct ieee80211_supported_band *sband;
2527 	u32 rx_status;
2528 	u32 channel;
2529 	u32 phy_mode;
2530 	u32 snr, rssi;
2531 	u32 rate;
2532 	u16 fc;
2533 	int ret, i;
2534 
2535 	ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
2536 	if (ret) {
2537 		ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret);
2538 		dev_kfree_skb(skb);
2539 		return ret;
2540 	}
2541 
2542 	channel = __le32_to_cpu(arg.channel);
2543 	rx_status = __le32_to_cpu(arg.status);
2544 	snr = __le32_to_cpu(arg.snr);
2545 	phy_mode = __le32_to_cpu(arg.phy_mode);
2546 	rate = __le32_to_cpu(arg.rate);
2547 
2548 	memset(status, 0, sizeof(*status));
2549 
2550 	ath10k_dbg(ar, ATH10K_DBG_MGMT,
2551 		   "event mgmt rx status %08x\n", rx_status);
2552 
2553 	if ((test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) ||
2554 	    (rx_status & (WMI_RX_STATUS_ERR_DECRYPT |
2555 	    WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
2556 		dev_kfree_skb(skb);
2557 		return 0;
2558 	}
2559 
2560 	if (rx_status & WMI_RX_STATUS_ERR_MIC)
2561 		status->flag |= RX_FLAG_MMIC_ERROR;
2562 
2563 	if (rx_status & WMI_RX_STATUS_EXT_INFO) {
2564 		status->mactime =
2565 			__le64_to_cpu(arg.ext_info.rx_mac_timestamp);
2566 		status->flag |= RX_FLAG_MACTIME_END;
2567 	}
2568 	/* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
2569 	 * MODE_11B. This means phy_mode is not a reliable source for the band
2570 	 * of mgmt rx.
2571 	 */
2572 	if (channel >= 1 && channel <= 14) {
2573 		status->band = NL80211_BAND_2GHZ;
2574 	} else if (channel >= 36 && channel <= ATH10K_MAX_5G_CHAN) {
2575 		status->band = NL80211_BAND_5GHZ;
2576 	} else {
2577 		/* Shouldn't happen unless list of advertised channels to
2578 		 * mac80211 has been changed.
2579 		 */
2580 		WARN_ON_ONCE(1);
2581 		dev_kfree_skb(skb);
2582 		return 0;
2583 	}
2584 
2585 	if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
2586 		ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
2587 
2588 	sband = &ar->mac.sbands[status->band];
2589 
2590 	status->freq = ieee80211_channel_to_frequency(channel, status->band);
2591 	status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
2592 
2593 	BUILD_BUG_ON(ARRAY_SIZE(status->chain_signal) != ARRAY_SIZE(arg.rssi));
2594 
2595 	for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
2596 		status->chains &= ~BIT(i);
2597 		rssi = __le32_to_cpu(arg.rssi[i]);
2598 		ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt rssi[%d]:%d\n", i, arg.rssi[i]);
2599 
2600 		if (rssi != ATH10K_INVALID_RSSI && rssi != 0) {
2601 			status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + rssi;
2602 			status->chains |= BIT(i);
2603 		}
2604 	}
2605 
2606 	status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
2607 
2608 	hdr = (struct ieee80211_hdr *)skb->data;
2609 	fc = le16_to_cpu(hdr->frame_control);
2610 
2611 	/* Firmware is guaranteed to report all essential management frames via
2612 	 * WMI while it can deliver some extra via HTT. Since there can be
2613 	 * duplicates split the reporting wrt monitor/sniffing.
2614 	 */
2615 	status->flag |= RX_FLAG_SKIP_MONITOR;
2616 
2617 	ath10k_wmi_handle_wep_reauth(ar, skb, status);
2618 
2619 	if (ath10k_wmi_rx_is_decrypted(ar, hdr)) {
2620 		status->flag |= RX_FLAG_DECRYPTED;
2621 
2622 		if (!ieee80211_is_action(hdr->frame_control) &&
2623 		    !ieee80211_is_deauth(hdr->frame_control) &&
2624 		    !ieee80211_is_disassoc(hdr->frame_control)) {
2625 			status->flag |= RX_FLAG_IV_STRIPPED |
2626 					RX_FLAG_MMIC_STRIPPED;
2627 			hdr->frame_control = __cpu_to_le16(fc &
2628 					~IEEE80211_FCTL_PROTECTED);
2629 		}
2630 	}
2631 
2632 	if (ieee80211_is_beacon(hdr->frame_control))
2633 		ath10k_mac_handle_beacon(ar, skb);
2634 
2635 	if (ieee80211_is_beacon(hdr->frame_control) ||
2636 	    ieee80211_is_probe_resp(hdr->frame_control))
2637 		status->boottime_ns = ktime_get_boottime_ns();
2638 
2639 	ath10k_dbg(ar, ATH10K_DBG_MGMT,
2640 		   "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
2641 		   skb, skb->len,
2642 		   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
2643 
2644 	ath10k_dbg(ar, ATH10K_DBG_MGMT,
2645 		   "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
2646 		   status->freq, status->band, status->signal,
2647 		   status->rate_idx);
2648 
2649 	ieee80211_rx_ni(ar->hw, skb);
2650 
2651 	return 0;
2652 }
2653 
freq_to_idx(struct ath10k * ar,int freq)2654 static int freq_to_idx(struct ath10k *ar, int freq)
2655 {
2656 	struct ieee80211_supported_band *sband;
2657 	int band, ch, idx = 0;
2658 
2659 	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
2660 		sband = ar->hw->wiphy->bands[band];
2661 		if (!sband)
2662 			continue;
2663 
2664 		for (ch = 0; ch < sband->n_channels; ch++, idx++)
2665 			if (sband->channels[ch].center_freq == freq)
2666 				goto exit;
2667 	}
2668 
2669 exit:
2670 	return idx;
2671 }
2672 
ath10k_wmi_op_pull_ch_info_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_ch_info_ev_arg * arg)2673 static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
2674 					 struct wmi_ch_info_ev_arg *arg)
2675 {
2676 	struct wmi_chan_info_event *ev = (void *)skb->data;
2677 
2678 	if (skb->len < sizeof(*ev))
2679 		return -EPROTO;
2680 
2681 	skb_pull(skb, sizeof(*ev));
2682 	arg->err_code = ev->err_code;
2683 	arg->freq = ev->freq;
2684 	arg->cmd_flags = ev->cmd_flags;
2685 	arg->noise_floor = ev->noise_floor;
2686 	arg->rx_clear_count = ev->rx_clear_count;
2687 	arg->cycle_count = ev->cycle_count;
2688 
2689 	return 0;
2690 }
2691 
ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_ch_info_ev_arg * arg)2692 static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar,
2693 					      struct sk_buff *skb,
2694 					      struct wmi_ch_info_ev_arg *arg)
2695 {
2696 	struct wmi_10_4_chan_info_event *ev = (void *)skb->data;
2697 
2698 	if (skb->len < sizeof(*ev))
2699 		return -EPROTO;
2700 
2701 	skb_pull(skb, sizeof(*ev));
2702 	arg->err_code = ev->err_code;
2703 	arg->freq = ev->freq;
2704 	arg->cmd_flags = ev->cmd_flags;
2705 	arg->noise_floor = ev->noise_floor;
2706 	arg->rx_clear_count = ev->rx_clear_count;
2707 	arg->cycle_count = ev->cycle_count;
2708 	arg->chan_tx_pwr_range = ev->chan_tx_pwr_range;
2709 	arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
2710 	arg->rx_frame_count = ev->rx_frame_count;
2711 
2712 	return 0;
2713 }
2714 
2715 /*
2716  * Handle the channel info event for firmware which only sends one
2717  * chan_info event per scanned channel.
2718  */
ath10k_wmi_event_chan_info_unpaired(struct ath10k * ar,struct chan_info_params * params)2719 static void ath10k_wmi_event_chan_info_unpaired(struct ath10k *ar,
2720 						struct chan_info_params *params)
2721 {
2722 	struct survey_info *survey;
2723 	int idx;
2724 
2725 	if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
2726 		ath10k_dbg(ar, ATH10K_DBG_WMI, "chan info report completed\n");
2727 		return;
2728 	}
2729 
2730 	idx = freq_to_idx(ar, params->freq);
2731 	if (idx >= ARRAY_SIZE(ar->survey)) {
2732 		ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
2733 			    params->freq, idx);
2734 		return;
2735 	}
2736 
2737 	survey = &ar->survey[idx];
2738 
2739 	if (!params->mac_clk_mhz)
2740 		return;
2741 
2742 	memset(survey, 0, sizeof(*survey));
2743 
2744 	survey->noise = params->noise_floor;
2745 	survey->time = (params->cycle_count / params->mac_clk_mhz) / 1000;
2746 	survey->time_busy = (params->rx_clear_count / params->mac_clk_mhz) / 1000;
2747 	survey->filled |= SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
2748 			  SURVEY_INFO_TIME_BUSY;
2749 }
2750 
2751 /*
2752  * Handle the channel info event for firmware which sends chan_info
2753  * event in pairs(start and stop events) for every scanned channel.
2754  */
ath10k_wmi_event_chan_info_paired(struct ath10k * ar,struct chan_info_params * params)2755 static void ath10k_wmi_event_chan_info_paired(struct ath10k *ar,
2756 					      struct chan_info_params *params)
2757 {
2758 	struct survey_info *survey;
2759 	int idx;
2760 
2761 	idx = freq_to_idx(ar, params->freq);
2762 	if (idx >= ARRAY_SIZE(ar->survey)) {
2763 		ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
2764 			    params->freq, idx);
2765 		return;
2766 	}
2767 
2768 	if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
2769 		if (ar->ch_info_can_report_survey) {
2770 			survey = &ar->survey[idx];
2771 			survey->noise = params->noise_floor;
2772 			survey->filled = SURVEY_INFO_NOISE_DBM;
2773 
2774 			ath10k_hw_fill_survey_time(ar,
2775 						   survey,
2776 						   params->cycle_count,
2777 						   params->rx_clear_count,
2778 						   ar->survey_last_cycle_count,
2779 						   ar->survey_last_rx_clear_count);
2780 		}
2781 
2782 		ar->ch_info_can_report_survey = false;
2783 	} else {
2784 		ar->ch_info_can_report_survey = true;
2785 	}
2786 
2787 	if (!(params->cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) {
2788 		ar->survey_last_rx_clear_count = params->rx_clear_count;
2789 		ar->survey_last_cycle_count = params->cycle_count;
2790 	}
2791 }
2792 
ath10k_wmi_event_chan_info(struct ath10k * ar,struct sk_buff * skb)2793 void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
2794 {
2795 	struct chan_info_params ch_info_param;
2796 	struct wmi_ch_info_ev_arg arg = {};
2797 	int ret;
2798 
2799 	ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
2800 	if (ret) {
2801 		ath10k_warn(ar, "failed to parse chan info event: %d\n", ret);
2802 		return;
2803 	}
2804 
2805 	ch_info_param.err_code = __le32_to_cpu(arg.err_code);
2806 	ch_info_param.freq = __le32_to_cpu(arg.freq);
2807 	ch_info_param.cmd_flags = __le32_to_cpu(arg.cmd_flags);
2808 	ch_info_param.noise_floor = __le32_to_cpu(arg.noise_floor);
2809 	ch_info_param.rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
2810 	ch_info_param.cycle_count = __le32_to_cpu(arg.cycle_count);
2811 	ch_info_param.mac_clk_mhz = __le32_to_cpu(arg.mac_clk_mhz);
2812 
2813 	ath10k_dbg(ar, ATH10K_DBG_WMI,
2814 		   "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
2815 		   ch_info_param.err_code, ch_info_param.freq, ch_info_param.cmd_flags,
2816 		   ch_info_param.noise_floor, ch_info_param.rx_clear_count,
2817 		   ch_info_param.cycle_count);
2818 
2819 	spin_lock_bh(&ar->data_lock);
2820 
2821 	switch (ar->scan.state) {
2822 	case ATH10K_SCAN_IDLE:
2823 	case ATH10K_SCAN_STARTING:
2824 		ath10k_dbg(ar, ATH10K_DBG_WMI, "received chan info event without a scan request, ignoring\n");
2825 		goto exit;
2826 	case ATH10K_SCAN_RUNNING:
2827 	case ATH10K_SCAN_ABORTING:
2828 		break;
2829 	}
2830 
2831 	if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL,
2832 		     ar->running_fw->fw_file.fw_features))
2833 		ath10k_wmi_event_chan_info_unpaired(ar, &ch_info_param);
2834 	else
2835 		ath10k_wmi_event_chan_info_paired(ar, &ch_info_param);
2836 
2837 exit:
2838 	spin_unlock_bh(&ar->data_lock);
2839 }
2840 
ath10k_wmi_event_echo(struct ath10k * ar,struct sk_buff * skb)2841 void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
2842 {
2843 	struct wmi_echo_ev_arg arg = {};
2844 	int ret;
2845 
2846 	ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg);
2847 	if (ret) {
2848 		ath10k_warn(ar, "failed to parse echo: %d\n", ret);
2849 		return;
2850 	}
2851 
2852 	ath10k_dbg(ar, ATH10K_DBG_WMI,
2853 		   "wmi event echo value 0x%08x\n",
2854 		   le32_to_cpu(arg.value));
2855 
2856 	if (le32_to_cpu(arg.value) == ATH10K_WMI_BARRIER_ECHO_ID)
2857 		complete(&ar->wmi.barrier);
2858 }
2859 
ath10k_wmi_event_debug_mesg(struct ath10k * ar,struct sk_buff * skb)2860 int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
2861 {
2862 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
2863 		   skb->len);
2864 
2865 	trace_ath10k_wmi_dbglog(ar, skb->data, skb->len);
2866 
2867 	return 0;
2868 }
2869 
ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base * src,struct ath10k_fw_stats_pdev * dst)2870 void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
2871 				     struct ath10k_fw_stats_pdev *dst)
2872 {
2873 	dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
2874 	dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
2875 	dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
2876 	dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
2877 	dst->cycle_count = __le32_to_cpu(src->cycle_count);
2878 	dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
2879 	dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
2880 }
2881 
ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx * src,struct ath10k_fw_stats_pdev * dst)2882 void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
2883 				   struct ath10k_fw_stats_pdev *dst)
2884 {
2885 	dst->comp_queued = __le32_to_cpu(src->comp_queued);
2886 	dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
2887 	dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
2888 	dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
2889 	dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
2890 	dst->local_enqued = __le32_to_cpu(src->local_enqued);
2891 	dst->local_freed = __le32_to_cpu(src->local_freed);
2892 	dst->hw_queued = __le32_to_cpu(src->hw_queued);
2893 	dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
2894 	dst->underrun = __le32_to_cpu(src->underrun);
2895 	dst->tx_abort = __le32_to_cpu(src->tx_abort);
2896 	dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued);
2897 	dst->tx_ko = __le32_to_cpu(src->tx_ko);
2898 	dst->data_rc = __le32_to_cpu(src->data_rc);
2899 	dst->self_triggers = __le32_to_cpu(src->self_triggers);
2900 	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
2901 	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
2902 	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
2903 	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
2904 	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
2905 	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
2906 	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
2907 }
2908 
2909 static void
ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx * src,struct ath10k_fw_stats_pdev * dst)2910 ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx *src,
2911 				   struct ath10k_fw_stats_pdev *dst)
2912 {
2913 	dst->comp_queued = __le32_to_cpu(src->comp_queued);
2914 	dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
2915 	dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
2916 	dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
2917 	dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
2918 	dst->local_enqued = __le32_to_cpu(src->local_enqued);
2919 	dst->local_freed = __le32_to_cpu(src->local_freed);
2920 	dst->hw_queued = __le32_to_cpu(src->hw_queued);
2921 	dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
2922 	dst->underrun = __le32_to_cpu(src->underrun);
2923 	dst->tx_abort = __le32_to_cpu(src->tx_abort);
2924 	dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued);
2925 	dst->tx_ko = __le32_to_cpu(src->tx_ko);
2926 	dst->data_rc = __le32_to_cpu(src->data_rc);
2927 	dst->self_triggers = __le32_to_cpu(src->self_triggers);
2928 	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
2929 	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
2930 	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
2931 	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
2932 	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
2933 	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
2934 	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
2935 	dst->hw_paused = __le32_to_cpu(src->hw_paused);
2936 	dst->seq_posted = __le32_to_cpu(src->seq_posted);
2937 	dst->seq_failed_queueing =
2938 		__le32_to_cpu(src->seq_failed_queueing);
2939 	dst->seq_completed = __le32_to_cpu(src->seq_completed);
2940 	dst->seq_restarted = __le32_to_cpu(src->seq_restarted);
2941 	dst->mu_seq_posted = __le32_to_cpu(src->mu_seq_posted);
2942 	dst->mpdus_sw_flush = __le32_to_cpu(src->mpdus_sw_flush);
2943 	dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
2944 	dst->mpdus_truncated = __le32_to_cpu(src->mpdus_truncated);
2945 	dst->mpdus_ack_failed = __le32_to_cpu(src->mpdus_ack_failed);
2946 	dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
2947 	dst->mpdus_expired = __le32_to_cpu(src->mpdus_expired);
2948 }
2949 
ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx * src,struct ath10k_fw_stats_pdev * dst)2950 void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
2951 				   struct ath10k_fw_stats_pdev *dst)
2952 {
2953 	dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change);
2954 	dst->status_rcvd = __le32_to_cpu(src->status_rcvd);
2955 	dst->r0_frags = __le32_to_cpu(src->r0_frags);
2956 	dst->r1_frags = __le32_to_cpu(src->r1_frags);
2957 	dst->r2_frags = __le32_to_cpu(src->r2_frags);
2958 	dst->r3_frags = __le32_to_cpu(src->r3_frags);
2959 	dst->htt_msdus = __le32_to_cpu(src->htt_msdus);
2960 	dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus);
2961 	dst->loc_msdus = __le32_to_cpu(src->loc_msdus);
2962 	dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus);
2963 	dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu);
2964 	dst->phy_errs = __le32_to_cpu(src->phy_errs);
2965 	dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop);
2966 	dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs);
2967 }
2968 
ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra * src,struct ath10k_fw_stats_pdev * dst)2969 void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
2970 				      struct ath10k_fw_stats_pdev *dst)
2971 {
2972 	dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
2973 	dst->rts_bad = __le32_to_cpu(src->rts_bad);
2974 	dst->rts_good = __le32_to_cpu(src->rts_good);
2975 	dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
2976 	dst->no_beacons = __le32_to_cpu(src->no_beacons);
2977 	dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
2978 }
2979 
ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats * src,struct ath10k_fw_stats_peer * dst)2980 void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
2981 				struct ath10k_fw_stats_peer *dst)
2982 {
2983 	ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
2984 	dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
2985 	dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
2986 }
2987 
2988 static void
ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats * src,struct ath10k_fw_stats_peer * dst)2989 ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src,
2990 				struct ath10k_fw_stats_peer *dst)
2991 {
2992 	ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
2993 	dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
2994 	dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
2995 	dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
2996 }
2997 
2998 static void
ath10k_wmi_10_4_pull_vdev_stats(const struct wmi_vdev_stats_extd * src,struct ath10k_fw_stats_vdev_extd * dst)2999 ath10k_wmi_10_4_pull_vdev_stats(const struct wmi_vdev_stats_extd *src,
3000 				struct ath10k_fw_stats_vdev_extd *dst)
3001 {
3002 	dst->vdev_id = __le32_to_cpu(src->vdev_id);
3003 	dst->ppdu_aggr_cnt = __le32_to_cpu(src->ppdu_aggr_cnt);
3004 	dst->ppdu_noack = __le32_to_cpu(src->ppdu_noack);
3005 	dst->mpdu_queued = __le32_to_cpu(src->mpdu_queued);
3006 	dst->ppdu_nonaggr_cnt = __le32_to_cpu(src->ppdu_nonaggr_cnt);
3007 	dst->mpdu_sw_requeued = __le32_to_cpu(src->mpdu_sw_requeued);
3008 	dst->mpdu_suc_retry = __le32_to_cpu(src->mpdu_suc_retry);
3009 	dst->mpdu_suc_multitry = __le32_to_cpu(src->mpdu_suc_multitry);
3010 	dst->mpdu_fail_retry = __le32_to_cpu(src->mpdu_fail_retry);
3011 	dst->tx_ftm_suc = __le32_to_cpu(src->tx_ftm_suc);
3012 	dst->tx_ftm_suc_retry = __le32_to_cpu(src->tx_ftm_suc_retry);
3013 	dst->tx_ftm_fail = __le32_to_cpu(src->tx_ftm_fail);
3014 	dst->rx_ftmr_cnt = __le32_to_cpu(src->rx_ftmr_cnt);
3015 	dst->rx_ftmr_dup_cnt = __le32_to_cpu(src->rx_ftmr_dup_cnt);
3016 	dst->rx_iftmr_cnt = __le32_to_cpu(src->rx_iftmr_cnt);
3017 	dst->rx_iftmr_dup_cnt = __le32_to_cpu(src->rx_iftmr_dup_cnt);
3018 }
3019 
ath10k_wmi_main_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3020 static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
3021 					    struct sk_buff *skb,
3022 					    struct ath10k_fw_stats *stats)
3023 {
3024 	const struct wmi_stats_event *ev = (void *)skb->data;
3025 	u32 num_pdev_stats, num_peer_stats;
3026 	int i;
3027 
3028 	if (!skb_pull(skb, sizeof(*ev)))
3029 		return -EPROTO;
3030 
3031 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3032 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3033 
3034 	for (i = 0; i < num_pdev_stats; i++) {
3035 		const struct wmi_pdev_stats *src;
3036 		struct ath10k_fw_stats_pdev *dst;
3037 
3038 		src = (void *)skb->data;
3039 		if (!skb_pull(skb, sizeof(*src)))
3040 			return -EPROTO;
3041 
3042 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3043 		if (!dst)
3044 			continue;
3045 
3046 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3047 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3048 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3049 
3050 		list_add_tail(&dst->list, &stats->pdevs);
3051 	}
3052 
3053 	/* fw doesn't implement vdev stats */
3054 
3055 	for (i = 0; i < num_peer_stats; i++) {
3056 		const struct wmi_peer_stats *src;
3057 		struct ath10k_fw_stats_peer *dst;
3058 
3059 		src = (void *)skb->data;
3060 		if (!skb_pull(skb, sizeof(*src)))
3061 			return -EPROTO;
3062 
3063 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3064 		if (!dst)
3065 			continue;
3066 
3067 		ath10k_wmi_pull_peer_stats(src, dst);
3068 		list_add_tail(&dst->list, &stats->peers);
3069 	}
3070 
3071 	return 0;
3072 }
3073 
ath10k_wmi_10x_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3074 static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar,
3075 					   struct sk_buff *skb,
3076 					   struct ath10k_fw_stats *stats)
3077 {
3078 	const struct wmi_stats_event *ev = (void *)skb->data;
3079 	u32 num_pdev_stats, num_peer_stats;
3080 	int i;
3081 
3082 	if (!skb_pull(skb, sizeof(*ev)))
3083 		return -EPROTO;
3084 
3085 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3086 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3087 
3088 	for (i = 0; i < num_pdev_stats; i++) {
3089 		const struct wmi_10x_pdev_stats *src;
3090 		struct ath10k_fw_stats_pdev *dst;
3091 
3092 		src = (void *)skb->data;
3093 		if (!skb_pull(skb, sizeof(*src)))
3094 			return -EPROTO;
3095 
3096 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3097 		if (!dst)
3098 			continue;
3099 
3100 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3101 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3102 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3103 		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3104 
3105 		list_add_tail(&dst->list, &stats->pdevs);
3106 	}
3107 
3108 	/* fw doesn't implement vdev stats */
3109 
3110 	for (i = 0; i < num_peer_stats; i++) {
3111 		const struct wmi_10x_peer_stats *src;
3112 		struct ath10k_fw_stats_peer *dst;
3113 
3114 		src = (void *)skb->data;
3115 		if (!skb_pull(skb, sizeof(*src)))
3116 			return -EPROTO;
3117 
3118 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3119 		if (!dst)
3120 			continue;
3121 
3122 		ath10k_wmi_pull_peer_stats(&src->old, dst);
3123 
3124 		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
3125 
3126 		list_add_tail(&dst->list, &stats->peers);
3127 	}
3128 
3129 	return 0;
3130 }
3131 
ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3132 static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar,
3133 					    struct sk_buff *skb,
3134 					    struct ath10k_fw_stats *stats)
3135 {
3136 	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
3137 	u32 num_pdev_stats;
3138 	u32 num_pdev_ext_stats;
3139 	u32 num_peer_stats;
3140 	int i;
3141 
3142 	if (!skb_pull(skb, sizeof(*ev)))
3143 		return -EPROTO;
3144 
3145 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3146 	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
3147 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3148 
3149 	for (i = 0; i < num_pdev_stats; i++) {
3150 		const struct wmi_10_2_pdev_stats *src;
3151 		struct ath10k_fw_stats_pdev *dst;
3152 
3153 		src = (void *)skb->data;
3154 		if (!skb_pull(skb, sizeof(*src)))
3155 			return -EPROTO;
3156 
3157 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3158 		if (!dst)
3159 			continue;
3160 
3161 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3162 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3163 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3164 		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3165 		/* FIXME: expose 10.2 specific values */
3166 
3167 		list_add_tail(&dst->list, &stats->pdevs);
3168 	}
3169 
3170 	for (i = 0; i < num_pdev_ext_stats; i++) {
3171 		const struct wmi_10_2_pdev_ext_stats *src;
3172 
3173 		src = (void *)skb->data;
3174 		if (!skb_pull(skb, sizeof(*src)))
3175 			return -EPROTO;
3176 
3177 		/* FIXME: expose values to userspace
3178 		 *
3179 		 * Note: Even though this loop seems to do nothing it is
3180 		 * required to parse following sub-structures properly.
3181 		 */
3182 	}
3183 
3184 	/* fw doesn't implement vdev stats */
3185 
3186 	for (i = 0; i < num_peer_stats; i++) {
3187 		const struct wmi_10_2_peer_stats *src;
3188 		struct ath10k_fw_stats_peer *dst;
3189 
3190 		src = (void *)skb->data;
3191 		if (!skb_pull(skb, sizeof(*src)))
3192 			return -EPROTO;
3193 
3194 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3195 		if (!dst)
3196 			continue;
3197 
3198 		ath10k_wmi_pull_peer_stats(&src->old, dst);
3199 
3200 		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
3201 		/* FIXME: expose 10.2 specific values */
3202 
3203 		list_add_tail(&dst->list, &stats->peers);
3204 	}
3205 
3206 	return 0;
3207 }
3208 
ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3209 static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
3210 					      struct sk_buff *skb,
3211 					      struct ath10k_fw_stats *stats)
3212 {
3213 	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
3214 	u32 num_pdev_stats;
3215 	u32 num_pdev_ext_stats;
3216 	u32 num_peer_stats;
3217 	int i;
3218 
3219 	if (!skb_pull(skb, sizeof(*ev)))
3220 		return -EPROTO;
3221 
3222 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3223 	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
3224 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3225 
3226 	for (i = 0; i < num_pdev_stats; i++) {
3227 		const struct wmi_10_2_pdev_stats *src;
3228 		struct ath10k_fw_stats_pdev *dst;
3229 
3230 		src = (void *)skb->data;
3231 		if (!skb_pull(skb, sizeof(*src)))
3232 			return -EPROTO;
3233 
3234 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3235 		if (!dst)
3236 			continue;
3237 
3238 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3239 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
3240 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3241 		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3242 		/* FIXME: expose 10.2 specific values */
3243 
3244 		list_add_tail(&dst->list, &stats->pdevs);
3245 	}
3246 
3247 	for (i = 0; i < num_pdev_ext_stats; i++) {
3248 		const struct wmi_10_2_pdev_ext_stats *src;
3249 
3250 		src = (void *)skb->data;
3251 		if (!skb_pull(skb, sizeof(*src)))
3252 			return -EPROTO;
3253 
3254 		/* FIXME: expose values to userspace
3255 		 *
3256 		 * Note: Even though this loop seems to do nothing it is
3257 		 * required to parse following sub-structures properly.
3258 		 */
3259 	}
3260 
3261 	/* fw doesn't implement vdev stats */
3262 
3263 	for (i = 0; i < num_peer_stats; i++) {
3264 		const struct wmi_10_2_4_ext_peer_stats *src;
3265 		struct ath10k_fw_stats_peer *dst;
3266 		int stats_len;
3267 
3268 		if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
3269 			stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats);
3270 		else
3271 			stats_len = sizeof(struct wmi_10_2_4_peer_stats);
3272 
3273 		src = (void *)skb->data;
3274 		if (!skb_pull(skb, stats_len))
3275 			return -EPROTO;
3276 
3277 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3278 		if (!dst)
3279 			continue;
3280 
3281 		ath10k_wmi_pull_peer_stats(&src->common.old, dst);
3282 
3283 		dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
3284 
3285 		if (ath10k_peer_stats_enabled(ar))
3286 			dst->rx_duration = __le32_to_cpu(src->rx_duration);
3287 		/* FIXME: expose 10.2 specific values */
3288 
3289 		list_add_tail(&dst->list, &stats->peers);
3290 	}
3291 
3292 	return 0;
3293 }
3294 
ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k * ar,struct sk_buff * skb,struct ath10k_fw_stats * stats)3295 static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
3296 					    struct sk_buff *skb,
3297 					    struct ath10k_fw_stats *stats)
3298 {
3299 	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
3300 	u32 num_pdev_stats;
3301 	u32 num_pdev_ext_stats;
3302 	u32 num_vdev_stats;
3303 	u32 num_peer_stats;
3304 	u32 num_bcnflt_stats;
3305 	u32 stats_id;
3306 	int i;
3307 
3308 	if (!skb_pull(skb, sizeof(*ev)))
3309 		return -EPROTO;
3310 
3311 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
3312 	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
3313 	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
3314 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
3315 	num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
3316 	stats_id = __le32_to_cpu(ev->stats_id);
3317 
3318 	for (i = 0; i < num_pdev_stats; i++) {
3319 		const struct wmi_10_4_pdev_stats *src;
3320 		struct ath10k_fw_stats_pdev *dst;
3321 
3322 		src = (void *)skb->data;
3323 		if (!skb_pull(skb, sizeof(*src)))
3324 			return -EPROTO;
3325 
3326 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3327 		if (!dst)
3328 			continue;
3329 
3330 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
3331 		ath10k_wmi_10_4_pull_pdev_stats_tx(&src->tx, dst);
3332 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
3333 		dst->rx_ovfl_errs = __le32_to_cpu(src->rx_ovfl_errs);
3334 		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
3335 
3336 		list_add_tail(&dst->list, &stats->pdevs);
3337 	}
3338 
3339 	for (i = 0; i < num_pdev_ext_stats; i++) {
3340 		const struct wmi_10_2_pdev_ext_stats *src;
3341 
3342 		src = (void *)skb->data;
3343 		if (!skb_pull(skb, sizeof(*src)))
3344 			return -EPROTO;
3345 
3346 		/* FIXME: expose values to userspace
3347 		 *
3348 		 * Note: Even though this loop seems to do nothing it is
3349 		 * required to parse following sub-structures properly.
3350 		 */
3351 	}
3352 
3353 	for (i = 0; i < num_vdev_stats; i++) {
3354 		const struct wmi_vdev_stats *src;
3355 
3356 		/* Ignore vdev stats here as it has only vdev id. Actual vdev
3357 		 * stats will be retrieved from vdev extended stats.
3358 		 */
3359 		src = (void *)skb->data;
3360 		if (!skb_pull(skb, sizeof(*src)))
3361 			return -EPROTO;
3362 	}
3363 
3364 	for (i = 0; i < num_peer_stats; i++) {
3365 		const struct wmi_10_4_peer_stats *src;
3366 		struct ath10k_fw_stats_peer *dst;
3367 
3368 		src = (void *)skb->data;
3369 		if (!skb_pull(skb, sizeof(*src)))
3370 			return -EPROTO;
3371 
3372 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3373 		if (!dst)
3374 			continue;
3375 
3376 		ath10k_wmi_10_4_pull_peer_stats(src, dst);
3377 		list_add_tail(&dst->list, &stats->peers);
3378 	}
3379 
3380 	for (i = 0; i < num_bcnflt_stats; i++) {
3381 		const struct wmi_10_4_bss_bcn_filter_stats *src;
3382 
3383 		src = (void *)skb->data;
3384 		if (!skb_pull(skb, sizeof(*src)))
3385 			return -EPROTO;
3386 
3387 		/* FIXME: expose values to userspace
3388 		 *
3389 		 * Note: Even though this loop seems to do nothing it is
3390 		 * required to parse following sub-structures properly.
3391 		 */
3392 	}
3393 
3394 	if (stats_id & WMI_10_4_STAT_PEER_EXTD) {
3395 		stats->extended = true;
3396 
3397 		for (i = 0; i < num_peer_stats; i++) {
3398 			const struct wmi_10_4_peer_extd_stats *src;
3399 			struct ath10k_fw_extd_stats_peer *dst;
3400 
3401 			src = (void *)skb->data;
3402 			if (!skb_pull(skb, sizeof(*src)))
3403 				return -EPROTO;
3404 
3405 			dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3406 			if (!dst)
3407 				continue;
3408 
3409 			ether_addr_copy(dst->peer_macaddr,
3410 					src->peer_macaddr.addr);
3411 			dst->rx_duration = __le32_to_cpu(src->rx_duration);
3412 			list_add_tail(&dst->list, &stats->peers_extd);
3413 		}
3414 	}
3415 
3416 	if (stats_id & WMI_10_4_STAT_VDEV_EXTD) {
3417 		for (i = 0; i < num_vdev_stats; i++) {
3418 			const struct wmi_vdev_stats_extd *src;
3419 			struct ath10k_fw_stats_vdev_extd *dst;
3420 
3421 			src = (void *)skb->data;
3422 			if (!skb_pull(skb, sizeof(*src)))
3423 				return -EPROTO;
3424 
3425 			dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
3426 			if (!dst)
3427 				continue;
3428 			ath10k_wmi_10_4_pull_vdev_stats(src, dst);
3429 			list_add_tail(&dst->list, &stats->vdevs);
3430 		}
3431 	}
3432 
3433 	return 0;
3434 }
3435 
ath10k_wmi_event_update_stats(struct ath10k * ar,struct sk_buff * skb)3436 void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
3437 {
3438 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
3439 	ath10k_debug_fw_stats_process(ar, skb);
3440 }
3441 
3442 static int
ath10k_wmi_op_pull_vdev_start_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_vdev_start_ev_arg * arg)3443 ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
3444 				 struct wmi_vdev_start_ev_arg *arg)
3445 {
3446 	struct wmi_vdev_start_response_event *ev = (void *)skb->data;
3447 
3448 	if (skb->len < sizeof(*ev))
3449 		return -EPROTO;
3450 
3451 	skb_pull(skb, sizeof(*ev));
3452 	arg->vdev_id = ev->vdev_id;
3453 	arg->req_id = ev->req_id;
3454 	arg->resp_type = ev->resp_type;
3455 	arg->status = ev->status;
3456 
3457 	return 0;
3458 }
3459 
ath10k_wmi_event_vdev_start_resp(struct ath10k * ar,struct sk_buff * skb)3460 void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
3461 {
3462 	struct wmi_vdev_start_ev_arg arg = {};
3463 	int ret;
3464 	u32 status;
3465 
3466 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
3467 
3468 	ar->last_wmi_vdev_start_status = 0;
3469 
3470 	ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
3471 	if (ret) {
3472 		ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
3473 		ar->last_wmi_vdev_start_status = ret;
3474 		goto out;
3475 	}
3476 
3477 	status = __le32_to_cpu(arg.status);
3478 	if (WARN_ON_ONCE(status)) {
3479 		ath10k_warn(ar, "vdev-start-response reports status error: %d (%s)\n",
3480 			    status, (status == WMI_VDEV_START_CHAN_INVALID) ?
3481 			    "chan-invalid" : "unknown");
3482 		/* Setup is done one way or another though, so we should still
3483 		 * do the completion, so don't return here.
3484 		 */
3485 		ar->last_wmi_vdev_start_status = -EINVAL;
3486 	}
3487 
3488 out:
3489 	complete(&ar->vdev_setup_done);
3490 }
3491 
ath10k_wmi_event_vdev_stopped(struct ath10k * ar,struct sk_buff * skb)3492 void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb)
3493 {
3494 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
3495 	complete(&ar->vdev_setup_done);
3496 }
3497 
3498 static int
ath10k_wmi_op_pull_peer_kick_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_peer_kick_ev_arg * arg)3499 ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb,
3500 				struct wmi_peer_kick_ev_arg *arg)
3501 {
3502 	struct wmi_peer_sta_kickout_event *ev = (void *)skb->data;
3503 
3504 	if (skb->len < sizeof(*ev))
3505 		return -EPROTO;
3506 
3507 	skb_pull(skb, sizeof(*ev));
3508 	arg->mac_addr = ev->peer_macaddr.addr;
3509 
3510 	return 0;
3511 }
3512 
ath10k_wmi_event_peer_sta_kickout(struct ath10k * ar,struct sk_buff * skb)3513 void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
3514 {
3515 	struct wmi_peer_kick_ev_arg arg = {};
3516 	struct ieee80211_sta *sta;
3517 	int ret;
3518 
3519 	ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg);
3520 	if (ret) {
3521 		ath10k_warn(ar, "failed to parse peer kickout event: %d\n",
3522 			    ret);
3523 		return;
3524 	}
3525 
3526 	ath10k_dbg(ar, ATH10K_DBG_STA, "wmi event peer sta kickout %pM\n",
3527 		   arg.mac_addr);
3528 
3529 	rcu_read_lock();
3530 
3531 	sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL);
3532 	if (!sta) {
3533 		ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
3534 			    arg.mac_addr);
3535 		goto exit;
3536 	}
3537 
3538 	ieee80211_report_low_ack(sta, 10);
3539 
3540 exit:
3541 	rcu_read_unlock();
3542 }
3543 
3544 /*
3545  * FIXME
3546  *
3547  * We don't report to mac80211 sleep state of connected
3548  * stations. Due to this mac80211 can't fill in TIM IE
3549  * correctly.
3550  *
3551  * I know of no way of getting nullfunc frames that contain
3552  * sleep transition from connected stations - these do not
3553  * seem to be sent from the target to the host. There also
3554  * doesn't seem to be a dedicated event for that. So the
3555  * only way left to do this would be to read tim_bitmap
3556  * during SWBA.
3557  *
3558  * We could probably try using tim_bitmap from SWBA to tell
3559  * mac80211 which stations are asleep and which are not. The
3560  * problem here is calling mac80211 functions so many times
3561  * could take too long and make us miss the time to submit
3562  * the beacon to the target.
3563  *
3564  * So as a workaround we try to extend the TIM IE if there
3565  * is unicast buffered for stations with aid > 7 and fill it
3566  * in ourselves.
3567  */
ath10k_wmi_update_tim(struct ath10k * ar,struct ath10k_vif * arvif,struct sk_buff * bcn,const struct wmi_tim_info_arg * tim_info)3568 static void ath10k_wmi_update_tim(struct ath10k *ar,
3569 				  struct ath10k_vif *arvif,
3570 				  struct sk_buff *bcn,
3571 				  const struct wmi_tim_info_arg *tim_info)
3572 {
3573 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
3574 	struct ieee80211_tim_ie *tim;
3575 	u8 *ies, *ie;
3576 	u8 ie_len, pvm_len;
3577 	__le32 t;
3578 	u32 v, tim_len;
3579 
3580 	/* When FW reports 0 in tim_len, ensure at least first byte
3581 	 * in tim_bitmap is considered for pvm calculation.
3582 	 */
3583 	tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
3584 
3585 	/* if next SWBA has no tim_changed the tim_bitmap is garbage.
3586 	 * we must copy the bitmap upon change and reuse it later
3587 	 */
3588 	if (__le32_to_cpu(tim_info->tim_changed)) {
3589 		int i;
3590 
3591 		if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) {
3592 			ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu",
3593 				    tim_len, sizeof(arvif->u.ap.tim_bitmap));
3594 			tim_len = sizeof(arvif->u.ap.tim_bitmap);
3595 		}
3596 
3597 		for (i = 0; i < tim_len; i++) {
3598 			t = tim_info->tim_bitmap[i / 4];
3599 			v = __le32_to_cpu(t);
3600 			arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
3601 		}
3602 
3603 		/* FW reports either length 0 or length based on max supported
3604 		 * station. so we calculate this on our own
3605 		 */
3606 		arvif->u.ap.tim_len = 0;
3607 		for (i = 0; i < tim_len; i++)
3608 			if (arvif->u.ap.tim_bitmap[i])
3609 				arvif->u.ap.tim_len = i;
3610 
3611 		arvif->u.ap.tim_len++;
3612 	}
3613 
3614 	ies = bcn->data;
3615 	ies += ieee80211_hdrlen(hdr->frame_control);
3616 	ies += 12; /* fixed parameters */
3617 
3618 	ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
3619 				    (u8 *)skb_tail_pointer(bcn) - ies);
3620 	if (!ie) {
3621 		if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
3622 			ath10k_warn(ar, "no tim ie found;\n");
3623 		return;
3624 	}
3625 
3626 	tim = (void *)ie + 2;
3627 	ie_len = ie[1];
3628 	pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
3629 
3630 	if (pvm_len < arvif->u.ap.tim_len) {
3631 		int expand_size = tim_len - pvm_len;
3632 		int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
3633 		void *next_ie = ie + 2 + ie_len;
3634 
3635 		if (skb_put(bcn, expand_size)) {
3636 			memmove(next_ie + expand_size, next_ie, move_size);
3637 
3638 			ie[1] += expand_size;
3639 			ie_len += expand_size;
3640 			pvm_len += expand_size;
3641 		} else {
3642 			ath10k_warn(ar, "tim expansion failed\n");
3643 		}
3644 	}
3645 
3646 	if (pvm_len > tim_len) {
3647 		ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
3648 		return;
3649 	}
3650 
3651 	tim->bitmap_ctrl = !!__le32_to_cpu(tim_info->tim_mcast);
3652 	memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
3653 
3654 	if (tim->dtim_count == 0) {
3655 		ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DTIM_ZERO;
3656 
3657 		if (__le32_to_cpu(tim_info->tim_mcast) == 1)
3658 			ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DELIVER_CAB;
3659 	}
3660 
3661 	ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
3662 		   tim->dtim_count, tim->dtim_period,
3663 		   tim->bitmap_ctrl, pvm_len);
3664 }
3665 
ath10k_wmi_update_noa(struct ath10k * ar,struct ath10k_vif * arvif,struct sk_buff * bcn,const struct wmi_p2p_noa_info * noa)3666 static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
3667 				  struct sk_buff *bcn,
3668 				  const struct wmi_p2p_noa_info *noa)
3669 {
3670 	if (!arvif->vif->p2p)
3671 		return;
3672 
3673 	ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
3674 
3675 	if (noa->changed & WMI_P2P_NOA_CHANGED_BIT)
3676 		ath10k_p2p_noa_update(arvif, noa);
3677 
3678 	if (arvif->u.ap.noa_data)
3679 		if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
3680 			skb_put_data(bcn, arvif->u.ap.noa_data,
3681 				     arvif->u.ap.noa_len);
3682 }
3683 
ath10k_wmi_op_pull_swba_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_swba_ev_arg * arg)3684 static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
3685 				      struct wmi_swba_ev_arg *arg)
3686 {
3687 	struct wmi_host_swba_event *ev = (void *)skb->data;
3688 	u32 map;
3689 	size_t i;
3690 
3691 	if (skb->len < sizeof(*ev))
3692 		return -EPROTO;
3693 
3694 	skb_pull(skb, sizeof(*ev));
3695 	arg->vdev_map = ev->vdev_map;
3696 
3697 	for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3698 		if (!(map & BIT(0)))
3699 			continue;
3700 
3701 		/* If this happens there were some changes in firmware and
3702 		 * ath10k should update the max size of tim_info array.
3703 		 */
3704 		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3705 			break;
3706 
3707 		if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3708 		     sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3709 			ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3710 			return -EPROTO;
3711 		}
3712 
3713 		arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
3714 		arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3715 		arg->tim_info[i].tim_bitmap =
3716 				ev->bcn_info[i].tim_info.tim_bitmap;
3717 		arg->tim_info[i].tim_changed =
3718 				ev->bcn_info[i].tim_info.tim_changed;
3719 		arg->tim_info[i].tim_num_ps_pending =
3720 				ev->bcn_info[i].tim_info.tim_num_ps_pending;
3721 
3722 		arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
3723 		i++;
3724 	}
3725 
3726 	return 0;
3727 }
3728 
ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_swba_ev_arg * arg)3729 static int ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k *ar,
3730 					     struct sk_buff *skb,
3731 					     struct wmi_swba_ev_arg *arg)
3732 {
3733 	struct wmi_10_2_4_host_swba_event *ev = (void *)skb->data;
3734 	u32 map;
3735 	size_t i;
3736 
3737 	if (skb->len < sizeof(*ev))
3738 		return -EPROTO;
3739 
3740 	skb_pull(skb, sizeof(*ev));
3741 	arg->vdev_map = ev->vdev_map;
3742 
3743 	for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3744 		if (!(map & BIT(0)))
3745 			continue;
3746 
3747 		/* If this happens there were some changes in firmware and
3748 		 * ath10k should update the max size of tim_info array.
3749 		 */
3750 		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3751 			break;
3752 
3753 		if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3754 		     sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3755 			ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3756 			return -EPROTO;
3757 		}
3758 
3759 		arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
3760 		arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3761 		arg->tim_info[i].tim_bitmap =
3762 				ev->bcn_info[i].tim_info.tim_bitmap;
3763 		arg->tim_info[i].tim_changed =
3764 				ev->bcn_info[i].tim_info.tim_changed;
3765 		arg->tim_info[i].tim_num_ps_pending =
3766 				ev->bcn_info[i].tim_info.tim_num_ps_pending;
3767 		i++;
3768 	}
3769 
3770 	return 0;
3771 }
3772 
ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_swba_ev_arg * arg)3773 static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
3774 					   struct sk_buff *skb,
3775 					   struct wmi_swba_ev_arg *arg)
3776 {
3777 	struct wmi_10_4_host_swba_event *ev = (void *)skb->data;
3778 	u32 map, tim_len;
3779 	size_t i;
3780 
3781 	if (skb->len < sizeof(*ev))
3782 		return -EPROTO;
3783 
3784 	skb_pull(skb, sizeof(*ev));
3785 	arg->vdev_map = ev->vdev_map;
3786 
3787 	for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
3788 		if (!(map & BIT(0)))
3789 			continue;
3790 
3791 		/* If this happens there were some changes in firmware and
3792 		 * ath10k should update the max size of tim_info array.
3793 		 */
3794 		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
3795 			break;
3796 
3797 		if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
3798 		      sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
3799 			ath10k_warn(ar, "refusing to parse invalid swba structure\n");
3800 			return -EPROTO;
3801 		}
3802 
3803 		tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len);
3804 		if (tim_len) {
3805 			/* Exclude 4 byte guard length */
3806 			tim_len -= 4;
3807 			arg->tim_info[i].tim_len = __cpu_to_le32(tim_len);
3808 		} else {
3809 			arg->tim_info[i].tim_len = 0;
3810 		}
3811 
3812 		arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
3813 		arg->tim_info[i].tim_bitmap =
3814 				ev->bcn_info[i].tim_info.tim_bitmap;
3815 		arg->tim_info[i].tim_changed =
3816 				ev->bcn_info[i].tim_info.tim_changed;
3817 		arg->tim_info[i].tim_num_ps_pending =
3818 				ev->bcn_info[i].tim_info.tim_num_ps_pending;
3819 
3820 		/* 10.4 firmware doesn't have p2p support. notice of absence
3821 		 * info can be ignored for now.
3822 		 */
3823 
3824 		i++;
3825 	}
3826 
3827 	return 0;
3828 }
3829 
ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k * ar)3830 static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar)
3831 {
3832 	return WMI_TXBF_CONF_BEFORE_ASSOC;
3833 }
3834 
ath10k_wmi_event_host_swba(struct ath10k * ar,struct sk_buff * skb)3835 void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
3836 {
3837 	struct wmi_swba_ev_arg arg = {};
3838 	u32 map;
3839 	int i = -1;
3840 	const struct wmi_tim_info_arg *tim_info;
3841 	const struct wmi_p2p_noa_info *noa_info;
3842 	struct ath10k_vif *arvif;
3843 	struct sk_buff *bcn;
3844 	dma_addr_t paddr;
3845 	int ret, vdev_id = 0;
3846 
3847 	ret = ath10k_wmi_pull_swba(ar, skb, &arg);
3848 	if (ret) {
3849 		ath10k_warn(ar, "failed to parse swba event: %d\n", ret);
3850 		return;
3851 	}
3852 
3853 	map = __le32_to_cpu(arg.vdev_map);
3854 
3855 	ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
3856 		   map);
3857 
3858 	for (; map; map >>= 1, vdev_id++) {
3859 		if (!(map & 0x1))
3860 			continue;
3861 
3862 		i++;
3863 
3864 		if (i >= WMI_MAX_AP_VDEV) {
3865 			ath10k_warn(ar, "swba has corrupted vdev map\n");
3866 			break;
3867 		}
3868 
3869 		tim_info = &arg.tim_info[i];
3870 		noa_info = arg.noa_info[i];
3871 
3872 		ath10k_dbg(ar, ATH10K_DBG_MGMT,
3873 			   "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
3874 			   i,
3875 			   __le32_to_cpu(tim_info->tim_len),
3876 			   __le32_to_cpu(tim_info->tim_mcast),
3877 			   __le32_to_cpu(tim_info->tim_changed),
3878 			   __le32_to_cpu(tim_info->tim_num_ps_pending),
3879 			   __le32_to_cpu(tim_info->tim_bitmap[3]),
3880 			   __le32_to_cpu(tim_info->tim_bitmap[2]),
3881 			   __le32_to_cpu(tim_info->tim_bitmap[1]),
3882 			   __le32_to_cpu(tim_info->tim_bitmap[0]));
3883 
3884 		/* TODO: Only first 4 word from tim_bitmap is dumped.
3885 		 * Extend debug code to dump full tim_bitmap.
3886 		 */
3887 
3888 		arvif = ath10k_get_arvif(ar, vdev_id);
3889 		if (arvif == NULL) {
3890 			ath10k_warn(ar, "no vif for vdev_id %d found\n",
3891 				    vdev_id);
3892 			continue;
3893 		}
3894 
3895 		/* mac80211 would have already asked us to stop beaconing and
3896 		 * bring the vdev down, so continue in that case
3897 		 */
3898 		if (!arvif->is_up)
3899 			continue;
3900 
3901 		/* There are no completions for beacons so wait for next SWBA
3902 		 * before telling mac80211 to decrement CSA counter
3903 		 *
3904 		 * Once CSA counter is completed stop sending beacons until
3905 		 * actual channel switch is done
3906 		 */
3907 		if (arvif->vif->bss_conf.csa_active &&
3908 		    ieee80211_beacon_cntdwn_is_complete(arvif->vif, 0)) {
3909 			ieee80211_csa_finish(arvif->vif, 0);
3910 			continue;
3911 		}
3912 
3913 		bcn = ieee80211_beacon_get(ar->hw, arvif->vif, 0);
3914 		if (!bcn) {
3915 			ath10k_warn(ar, "could not get mac80211 beacon\n");
3916 			continue;
3917 		}
3918 
3919 		ath10k_tx_h_seq_no(arvif->vif, bcn);
3920 		ath10k_wmi_update_tim(ar, arvif, bcn, tim_info);
3921 		ath10k_wmi_update_noa(ar, arvif, bcn, noa_info);
3922 
3923 		spin_lock_bh(&ar->data_lock);
3924 
3925 		if (arvif->beacon) {
3926 			switch (arvif->beacon_state) {
3927 			case ATH10K_BEACON_SENT:
3928 				break;
3929 			case ATH10K_BEACON_SCHEDULED:
3930 				ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n",
3931 					    arvif->vdev_id);
3932 				break;
3933 			case ATH10K_BEACON_SENDING:
3934 				ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n",
3935 					    arvif->vdev_id);
3936 				dev_kfree_skb(bcn);
3937 				goto skip;
3938 			}
3939 
3940 			ath10k_mac_vif_beacon_free(arvif);
3941 		}
3942 
3943 		if (!arvif->beacon_buf) {
3944 			paddr = dma_map_single(arvif->ar->dev, bcn->data,
3945 					       bcn->len, DMA_TO_DEVICE);
3946 			ret = dma_mapping_error(arvif->ar->dev, paddr);
3947 			if (ret) {
3948 				ath10k_warn(ar, "failed to map beacon: %d\n",
3949 					    ret);
3950 				dev_kfree_skb_any(bcn);
3951 				goto skip;
3952 			}
3953 
3954 			ATH10K_SKB_CB(bcn)->paddr = paddr;
3955 		} else {
3956 			if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
3957 				ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
3958 					    bcn->len, IEEE80211_MAX_FRAME_LEN);
3959 				skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
3960 			}
3961 			memcpy(arvif->beacon_buf, bcn->data, bcn->len);
3962 			ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
3963 		}
3964 
3965 		arvif->beacon = bcn;
3966 		arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
3967 
3968 		trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
3969 		trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
3970 
3971 skip:
3972 		spin_unlock_bh(&ar->data_lock);
3973 	}
3974 
3975 	ath10k_wmi_tx_beacons_nowait(ar);
3976 }
3977 
ath10k_wmi_event_tbttoffset_update(struct ath10k * ar,struct sk_buff * skb)3978 void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
3979 {
3980 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
3981 }
3982 
ath10k_radar_detected(struct ath10k * ar)3983 static void ath10k_radar_detected(struct ath10k *ar)
3984 {
3985 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
3986 	ATH10K_DFS_STAT_INC(ar, radar_detected);
3987 
3988 	/* Control radar events reporting in debugfs file
3989 	 * dfs_block_radar_events
3990 	 */
3991 	if (ar->dfs_block_radar_events)
3992 		ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
3993 	else
3994 		ieee80211_radar_detected(ar->hw, NULL);
3995 }
3996 
ath10k_radar_confirmation_work(struct work_struct * work)3997 static void ath10k_radar_confirmation_work(struct work_struct *work)
3998 {
3999 	struct ath10k *ar = container_of(work, struct ath10k,
4000 					 radar_confirmation_work);
4001 	struct ath10k_radar_found_info radar_info;
4002 	int ret, time_left;
4003 
4004 	reinit_completion(&ar->wmi.radar_confirm);
4005 
4006 	spin_lock_bh(&ar->data_lock);
4007 	memcpy(&radar_info, &ar->last_radar_info, sizeof(radar_info));
4008 	spin_unlock_bh(&ar->data_lock);
4009 
4010 	ret = ath10k_wmi_report_radar_found(ar, &radar_info);
4011 	if (ret) {
4012 		ath10k_warn(ar, "failed to send radar found %d\n", ret);
4013 		goto wait_complete;
4014 	}
4015 
4016 	time_left = wait_for_completion_timeout(&ar->wmi.radar_confirm,
4017 						ATH10K_WMI_DFS_CONF_TIMEOUT_HZ);
4018 	if (time_left) {
4019 		/* DFS Confirmation status event received and
4020 		 * necessary action completed.
4021 		 */
4022 		goto wait_complete;
4023 	} else {
4024 		/* DFS Confirmation event not received from FW.Considering this
4025 		 * as real radar.
4026 		 */
4027 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4028 			   "dfs confirmation not received from fw, considering as radar\n");
4029 		goto radar_detected;
4030 	}
4031 
4032 radar_detected:
4033 	ath10k_radar_detected(ar);
4034 
4035 	/* Reset state to allow sending confirmation on consecutive radar
4036 	 * detections, unless radar confirmation is disabled/stopped.
4037 	 */
4038 wait_complete:
4039 	spin_lock_bh(&ar->data_lock);
4040 	if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_STOPPED)
4041 		ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE;
4042 	spin_unlock_bh(&ar->data_lock);
4043 }
4044 
ath10k_dfs_radar_report(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,const struct phyerr_radar_report * rr,u64 tsf)4045 static void ath10k_dfs_radar_report(struct ath10k *ar,
4046 				    struct wmi_phyerr_ev_arg *phyerr,
4047 				    const struct phyerr_radar_report *rr,
4048 				    u64 tsf)
4049 {
4050 	u32 reg0, reg1, tsf32l;
4051 	struct ieee80211_channel *ch;
4052 	struct pulse_event pe;
4053 	struct radar_detector_specs rs;
4054 	u64 tsf64;
4055 	u8 rssi, width;
4056 	struct ath10k_radar_found_info *radar_info;
4057 
4058 	reg0 = __le32_to_cpu(rr->reg0);
4059 	reg1 = __le32_to_cpu(rr->reg1);
4060 
4061 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4062 		   "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
4063 		   MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
4064 		   MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
4065 		   MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
4066 		   MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
4067 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4068 		   "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
4069 		   MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
4070 		   MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
4071 		   MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
4072 		   MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
4073 		   MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
4074 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4075 		   "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
4076 		   MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
4077 		   MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
4078 
4079 	if (!ar->dfs_detector)
4080 		return;
4081 
4082 	spin_lock_bh(&ar->data_lock);
4083 	ch = ar->rx_channel;
4084 
4085 	/* fetch target operating channel during channel change */
4086 	if (!ch)
4087 		ch = ar->tgt_oper_chan;
4088 
4089 	spin_unlock_bh(&ar->data_lock);
4090 
4091 	if (!ch) {
4092 		ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n");
4093 		goto radar_detected;
4094 	}
4095 
4096 	/* report event to DFS pattern detector */
4097 	tsf32l = phyerr->tsf_timestamp;
4098 	tsf64 = tsf & (~0xFFFFFFFFULL);
4099 	tsf64 |= tsf32l;
4100 
4101 	width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
4102 	rssi = phyerr->rssi_combined;
4103 
4104 	/* hardware store this as 8 bit signed value,
4105 	 * set to zero if negative number
4106 	 */
4107 	if (rssi & 0x80)
4108 		rssi = 0;
4109 
4110 	pe.ts = tsf64;
4111 	pe.freq = ch->center_freq;
4112 	pe.width = width;
4113 	pe.rssi = rssi;
4114 	pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0);
4115 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4116 		   "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
4117 		   pe.freq, pe.width, pe.rssi, pe.ts);
4118 
4119 	ATH10K_DFS_STAT_INC(ar, pulses_detected);
4120 
4121 	if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe, &rs)) {
4122 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4123 			   "dfs no pulse pattern detected, yet\n");
4124 		return;
4125 	}
4126 
4127 	if ((test_bit(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, ar->wmi.svc_map)) &&
4128 	    ar->dfs_detector->region == NL80211_DFS_FCC) {
4129 		/* Consecutive radar indications need not be
4130 		 * sent to the firmware until we get confirmation
4131 		 * for the previous detected radar.
4132 		 */
4133 		spin_lock_bh(&ar->data_lock);
4134 		if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_IDLE) {
4135 			spin_unlock_bh(&ar->data_lock);
4136 			return;
4137 		}
4138 		ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_INPROGRESS;
4139 		radar_info = &ar->last_radar_info;
4140 
4141 		radar_info->pri_min = rs.pri_min;
4142 		radar_info->pri_max = rs.pri_max;
4143 		radar_info->width_min = rs.width_min;
4144 		radar_info->width_max = rs.width_max;
4145 		/*TODO Find sidx_min and sidx_max */
4146 		radar_info->sidx_min = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX);
4147 		radar_info->sidx_max = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX);
4148 
4149 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4150 			   "sending wmi radar found cmd pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n",
4151 			   radar_info->pri_min, radar_info->pri_max,
4152 			   radar_info->width_min, radar_info->width_max,
4153 			   radar_info->sidx_min, radar_info->sidx_max);
4154 		ieee80211_queue_work(ar->hw, &ar->radar_confirmation_work);
4155 		spin_unlock_bh(&ar->data_lock);
4156 		return;
4157 	}
4158 
4159 radar_detected:
4160 	ath10k_radar_detected(ar);
4161 }
4162 
ath10k_dfs_fft_report(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,const struct phyerr_fft_report * fftr,u64 tsf)4163 static int ath10k_dfs_fft_report(struct ath10k *ar,
4164 				 struct wmi_phyerr_ev_arg *phyerr,
4165 				 const struct phyerr_fft_report *fftr,
4166 				 u64 tsf)
4167 {
4168 	u32 reg0, reg1;
4169 	u8 rssi, peak_mag;
4170 
4171 	reg0 = __le32_to_cpu(fftr->reg0);
4172 	reg1 = __le32_to_cpu(fftr->reg1);
4173 	rssi = phyerr->rssi_combined;
4174 
4175 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4176 		   "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
4177 		   MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
4178 		   MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
4179 		   MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
4180 		   MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
4181 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4182 		   "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
4183 		   MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
4184 		   MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
4185 		   MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG),
4186 		   MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB));
4187 
4188 	peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
4189 
4190 	/* false event detection */
4191 	if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
4192 	    peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
4193 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
4194 		ATH10K_DFS_STAT_INC(ar, pulses_discarded);
4195 		return -EINVAL;
4196 	}
4197 
4198 	return 0;
4199 }
4200 
ath10k_wmi_event_dfs(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,u64 tsf)4201 void ath10k_wmi_event_dfs(struct ath10k *ar,
4202 			  struct wmi_phyerr_ev_arg *phyerr,
4203 			  u64 tsf)
4204 {
4205 	int buf_len, tlv_len, res, i = 0;
4206 	const struct phyerr_tlv *tlv;
4207 	const struct phyerr_radar_report *rr;
4208 	const struct phyerr_fft_report *fftr;
4209 	const u8 *tlv_buf;
4210 
4211 	buf_len = phyerr->buf_len;
4212 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4213 		   "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
4214 		   phyerr->phy_err_code, phyerr->rssi_combined,
4215 		   phyerr->tsf_timestamp, tsf, buf_len);
4216 
4217 	/* Skip event if DFS disabled */
4218 	if (!IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED))
4219 		return;
4220 
4221 	ATH10K_DFS_STAT_INC(ar, pulses_total);
4222 
4223 	while (i < buf_len) {
4224 		if (i + sizeof(*tlv) > buf_len) {
4225 			ath10k_warn(ar, "too short buf for tlv header (%d)\n",
4226 				    i);
4227 			return;
4228 		}
4229 
4230 		tlv = (struct phyerr_tlv *)&phyerr->buf[i];
4231 		tlv_len = __le16_to_cpu(tlv->len);
4232 		tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
4233 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4234 			   "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
4235 			   tlv_len, tlv->tag, tlv->sig);
4236 
4237 		switch (tlv->tag) {
4238 		case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
4239 			if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
4240 				ath10k_warn(ar, "too short radar pulse summary (%d)\n",
4241 					    i);
4242 				return;
4243 			}
4244 
4245 			rr = (struct phyerr_radar_report *)tlv_buf;
4246 			ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
4247 			break;
4248 		case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
4249 			if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
4250 				ath10k_warn(ar, "too short fft report (%d)\n",
4251 					    i);
4252 				return;
4253 			}
4254 
4255 			fftr = (struct phyerr_fft_report *)tlv_buf;
4256 			res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
4257 			if (res)
4258 				return;
4259 			break;
4260 		}
4261 
4262 		i += sizeof(*tlv) + tlv_len;
4263 	}
4264 }
4265 
ath10k_wmi_event_spectral_scan(struct ath10k * ar,struct wmi_phyerr_ev_arg * phyerr,u64 tsf)4266 void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
4267 				    struct wmi_phyerr_ev_arg *phyerr,
4268 				    u64 tsf)
4269 {
4270 	int buf_len, tlv_len, res, i = 0;
4271 	struct phyerr_tlv *tlv;
4272 	const void *tlv_buf;
4273 	const struct phyerr_fft_report *fftr;
4274 	size_t fftr_len;
4275 
4276 	buf_len = phyerr->buf_len;
4277 
4278 	while (i < buf_len) {
4279 		if (i + sizeof(*tlv) > buf_len) {
4280 			ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
4281 				    i);
4282 			return;
4283 		}
4284 
4285 		tlv = (struct phyerr_tlv *)&phyerr->buf[i];
4286 		tlv_len = __le16_to_cpu(tlv->len);
4287 		tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
4288 
4289 		if (i + sizeof(*tlv) + tlv_len > buf_len) {
4290 			ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
4291 				    i);
4292 			return;
4293 		}
4294 
4295 		switch (tlv->tag) {
4296 		case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
4297 			if (sizeof(*fftr) > tlv_len) {
4298 				ath10k_warn(ar, "failed to parse fft report at byte %d\n",
4299 					    i);
4300 				return;
4301 			}
4302 
4303 			fftr_len = tlv_len - sizeof(*fftr);
4304 			fftr = tlv_buf;
4305 			res = ath10k_spectral_process_fft(ar, phyerr,
4306 							  fftr, fftr_len,
4307 							  tsf);
4308 			if (res < 0) {
4309 				ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
4310 					   res);
4311 				return;
4312 			}
4313 			break;
4314 		}
4315 
4316 		i += sizeof(*tlv) + tlv_len;
4317 	}
4318 }
4319 
ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k * ar,struct sk_buff * skb,struct wmi_phyerr_hdr_arg * arg)4320 static int ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k *ar,
4321 					    struct sk_buff *skb,
4322 					    struct wmi_phyerr_hdr_arg *arg)
4323 {
4324 	struct wmi_phyerr_event *ev = (void *)skb->data;
4325 
4326 	if (skb->len < sizeof(*ev))
4327 		return -EPROTO;
4328 
4329 	arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs);
4330 	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
4331 	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
4332 	arg->buf_len = skb->len - sizeof(*ev);
4333 	arg->phyerrs = ev->phyerrs;
4334 
4335 	return 0;
4336 }
4337 
ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k * ar,struct sk_buff * skb,struct wmi_phyerr_hdr_arg * arg)4338 static int ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k *ar,
4339 						 struct sk_buff *skb,
4340 						 struct wmi_phyerr_hdr_arg *arg)
4341 {
4342 	struct wmi_10_4_phyerr_event *ev = (void *)skb->data;
4343 
4344 	if (skb->len < sizeof(*ev))
4345 		return -EPROTO;
4346 
4347 	/* 10.4 firmware always reports only one phyerr */
4348 	arg->num_phyerrs = 1;
4349 
4350 	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
4351 	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
4352 	arg->buf_len = skb->len;
4353 	arg->phyerrs = skb->data;
4354 
4355 	return 0;
4356 }
4357 
ath10k_wmi_op_pull_phyerr_ev(struct ath10k * ar,const void * phyerr_buf,int left_len,struct wmi_phyerr_ev_arg * arg)4358 int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar,
4359 				 const void *phyerr_buf,
4360 				 int left_len,
4361 				 struct wmi_phyerr_ev_arg *arg)
4362 {
4363 	const struct wmi_phyerr *phyerr = phyerr_buf;
4364 	int i;
4365 
4366 	if (left_len < sizeof(*phyerr)) {
4367 		ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
4368 			    left_len, sizeof(*phyerr));
4369 		return -EINVAL;
4370 	}
4371 
4372 	arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
4373 	arg->freq1 = __le16_to_cpu(phyerr->freq1);
4374 	arg->freq2 = __le16_to_cpu(phyerr->freq2);
4375 	arg->rssi_combined = phyerr->rssi_combined;
4376 	arg->chan_width_mhz = phyerr->chan_width_mhz;
4377 	arg->buf_len = __le32_to_cpu(phyerr->buf_len);
4378 	arg->buf = phyerr->buf;
4379 	arg->hdr_len = sizeof(*phyerr);
4380 
4381 	for (i = 0; i < 4; i++)
4382 		arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
4383 
4384 	switch (phyerr->phy_err_code) {
4385 	case PHY_ERROR_GEN_SPECTRAL_SCAN:
4386 		arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
4387 		break;
4388 	case PHY_ERROR_GEN_FALSE_RADAR_EXT:
4389 		arg->phy_err_code = PHY_ERROR_FALSE_RADAR_EXT;
4390 		break;
4391 	case PHY_ERROR_GEN_RADAR:
4392 		arg->phy_err_code = PHY_ERROR_RADAR;
4393 		break;
4394 	default:
4395 		arg->phy_err_code = PHY_ERROR_UNKNOWN;
4396 		break;
4397 	}
4398 
4399 	return 0;
4400 }
4401 
ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k * ar,const void * phyerr_buf,int left_len,struct wmi_phyerr_ev_arg * arg)4402 static int ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k *ar,
4403 					     const void *phyerr_buf,
4404 					     int left_len,
4405 					     struct wmi_phyerr_ev_arg *arg)
4406 {
4407 	const struct wmi_10_4_phyerr_event *phyerr = phyerr_buf;
4408 	u32 phy_err_mask;
4409 	int i;
4410 
4411 	if (left_len < sizeof(*phyerr)) {
4412 		ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
4413 			    left_len, sizeof(*phyerr));
4414 		return -EINVAL;
4415 	}
4416 
4417 	arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
4418 	arg->freq1 = __le16_to_cpu(phyerr->freq1);
4419 	arg->freq2 = __le16_to_cpu(phyerr->freq2);
4420 	arg->rssi_combined = phyerr->rssi_combined;
4421 	arg->chan_width_mhz = phyerr->chan_width_mhz;
4422 	arg->buf_len = __le32_to_cpu(phyerr->buf_len);
4423 	arg->buf = phyerr->buf;
4424 	arg->hdr_len = sizeof(*phyerr);
4425 
4426 	for (i = 0; i < 4; i++)
4427 		arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
4428 
4429 	phy_err_mask = __le32_to_cpu(phyerr->phy_err_mask[0]);
4430 
4431 	if (phy_err_mask & PHY_ERROR_10_4_SPECTRAL_SCAN_MASK)
4432 		arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
4433 	else if (phy_err_mask & PHY_ERROR_10_4_RADAR_MASK)
4434 		arg->phy_err_code = PHY_ERROR_RADAR;
4435 	else
4436 		arg->phy_err_code = PHY_ERROR_UNKNOWN;
4437 
4438 	return 0;
4439 }
4440 
ath10k_wmi_event_phyerr(struct ath10k * ar,struct sk_buff * skb)4441 void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
4442 {
4443 	struct wmi_phyerr_hdr_arg hdr_arg = {};
4444 	struct wmi_phyerr_ev_arg phyerr_arg = {};
4445 	const void *phyerr;
4446 	u32 count, i, buf_len, phy_err_code;
4447 	u64 tsf;
4448 	int left_len, ret;
4449 
4450 	ATH10K_DFS_STAT_INC(ar, phy_errors);
4451 
4452 	ret = ath10k_wmi_pull_phyerr_hdr(ar, skb, &hdr_arg);
4453 	if (ret) {
4454 		ath10k_warn(ar, "failed to parse phyerr event hdr: %d\n", ret);
4455 		return;
4456 	}
4457 
4458 	/* Check number of included events */
4459 	count = hdr_arg.num_phyerrs;
4460 
4461 	left_len = hdr_arg.buf_len;
4462 
4463 	tsf = hdr_arg.tsf_u32;
4464 	tsf <<= 32;
4465 	tsf |= hdr_arg.tsf_l32;
4466 
4467 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4468 		   "wmi event phyerr count %d tsf64 0x%llX\n",
4469 		   count, tsf);
4470 
4471 	phyerr = hdr_arg.phyerrs;
4472 	for (i = 0; i < count; i++) {
4473 		ret = ath10k_wmi_pull_phyerr(ar, phyerr, left_len, &phyerr_arg);
4474 		if (ret) {
4475 			ath10k_warn(ar, "failed to parse phyerr event (%d)\n",
4476 				    i);
4477 			return;
4478 		}
4479 
4480 		left_len -= phyerr_arg.hdr_len;
4481 		buf_len = phyerr_arg.buf_len;
4482 		phy_err_code = phyerr_arg.phy_err_code;
4483 
4484 		if (left_len < buf_len) {
4485 			ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
4486 			return;
4487 		}
4488 
4489 		left_len -= buf_len;
4490 
4491 		switch (phy_err_code) {
4492 		case PHY_ERROR_RADAR:
4493 			ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
4494 			break;
4495 		case PHY_ERROR_SPECTRAL_SCAN:
4496 			ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
4497 			break;
4498 		case PHY_ERROR_FALSE_RADAR_EXT:
4499 			ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
4500 			ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
4501 			break;
4502 		default:
4503 			break;
4504 		}
4505 
4506 		phyerr = phyerr + phyerr_arg.hdr_len + buf_len;
4507 	}
4508 }
4509 
4510 static int
ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_dfs_status_ev_arg * arg)4511 ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k *ar, struct sk_buff *skb,
4512 				      struct wmi_dfs_status_ev_arg *arg)
4513 {
4514 	struct wmi_dfs_status_ev_arg *ev = (void *)skb->data;
4515 
4516 	if (skb->len < sizeof(*ev))
4517 		return -EPROTO;
4518 
4519 	arg->status = ev->status;
4520 
4521 	return 0;
4522 }
4523 
4524 static void
ath10k_wmi_event_dfs_status_check(struct ath10k * ar,struct sk_buff * skb)4525 ath10k_wmi_event_dfs_status_check(struct ath10k *ar, struct sk_buff *skb)
4526 {
4527 	struct wmi_dfs_status_ev_arg status_arg = {};
4528 	int ret;
4529 
4530 	ret = ath10k_wmi_pull_dfs_status(ar, skb, &status_arg);
4531 
4532 	if (ret) {
4533 		ath10k_warn(ar, "failed to parse dfs status event: %d\n", ret);
4534 		return;
4535 	}
4536 
4537 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
4538 		   "dfs status event received from fw: %d\n",
4539 		   status_arg.status);
4540 
4541 	/* Even in case of radar detection failure we follow the same
4542 	 * behaviour as if radar is detected i.e to switch to a different
4543 	 * channel.
4544 	 */
4545 	if (status_arg.status == WMI_HW_RADAR_DETECTED ||
4546 	    status_arg.status == WMI_RADAR_DETECTION_FAIL)
4547 		ath10k_radar_detected(ar);
4548 	complete(&ar->wmi.radar_confirm);
4549 }
4550 
ath10k_wmi_event_roam(struct ath10k * ar,struct sk_buff * skb)4551 void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
4552 {
4553 	struct wmi_roam_ev_arg arg = {};
4554 	int ret;
4555 	u32 vdev_id;
4556 	u32 reason;
4557 	s32 rssi;
4558 
4559 	ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg);
4560 	if (ret) {
4561 		ath10k_warn(ar, "failed to parse roam event: %d\n", ret);
4562 		return;
4563 	}
4564 
4565 	vdev_id = __le32_to_cpu(arg.vdev_id);
4566 	reason = __le32_to_cpu(arg.reason);
4567 	rssi = __le32_to_cpu(arg.rssi);
4568 	rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
4569 
4570 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4571 		   "wmi roam event vdev %u reason 0x%08x rssi %d\n",
4572 		   vdev_id, reason, rssi);
4573 
4574 	if (reason >= WMI_ROAM_REASON_MAX)
4575 		ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n",
4576 			    reason, vdev_id);
4577 
4578 	switch (reason) {
4579 	case WMI_ROAM_REASON_BEACON_MISS:
4580 		ath10k_mac_handle_beacon_miss(ar, vdev_id);
4581 		break;
4582 	case WMI_ROAM_REASON_BETTER_AP:
4583 	case WMI_ROAM_REASON_LOW_RSSI:
4584 	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
4585 	case WMI_ROAM_REASON_HO_FAILED:
4586 		ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n",
4587 			    reason, vdev_id);
4588 		break;
4589 	}
4590 }
4591 
ath10k_wmi_event_profile_match(struct ath10k * ar,struct sk_buff * skb)4592 void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
4593 {
4594 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
4595 }
4596 
ath10k_wmi_event_debug_print(struct ath10k * ar,struct sk_buff * skb)4597 void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb)
4598 {
4599 	char buf[101], c;
4600 	int i;
4601 
4602 	for (i = 0; i < sizeof(buf) - 1; i++) {
4603 		if (i >= skb->len)
4604 			break;
4605 
4606 		c = skb->data[i];
4607 
4608 		if (c == '\0')
4609 			break;
4610 
4611 		if (isascii(c) && isprint(c))
4612 			buf[i] = c;
4613 		else
4614 			buf[i] = '.';
4615 	}
4616 
4617 	if (i == sizeof(buf) - 1)
4618 		ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
4619 
4620 	/* for some reason the debug prints end with \n, remove that */
4621 	if (skb->data[i - 1] == '\n')
4622 		i--;
4623 
4624 	/* the last byte is always reserved for the null character */
4625 	buf[i] = '\0';
4626 
4627 	ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
4628 }
4629 
ath10k_wmi_event_pdev_qvit(struct ath10k * ar,struct sk_buff * skb)4630 void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
4631 {
4632 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
4633 }
4634 
ath10k_wmi_event_wlan_profile_data(struct ath10k * ar,struct sk_buff * skb)4635 void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb)
4636 {
4637 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
4638 }
4639 
ath10k_wmi_event_rtt_measurement_report(struct ath10k * ar,struct sk_buff * skb)4640 void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
4641 					     struct sk_buff *skb)
4642 {
4643 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
4644 }
4645 
ath10k_wmi_event_tsf_measurement_report(struct ath10k * ar,struct sk_buff * skb)4646 void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
4647 					     struct sk_buff *skb)
4648 {
4649 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
4650 }
4651 
ath10k_wmi_event_rtt_error_report(struct ath10k * ar,struct sk_buff * skb)4652 void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
4653 {
4654 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
4655 }
4656 
ath10k_wmi_event_wow_wakeup_host(struct ath10k * ar,struct sk_buff * skb)4657 void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
4658 {
4659 	struct wmi_wow_ev_arg ev = {};
4660 	int ret;
4661 
4662 	complete(&ar->wow.wakeup_completed);
4663 
4664 	ret = ath10k_wmi_pull_wow_event(ar, skb, &ev);
4665 	if (ret) {
4666 		ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret);
4667 		return;
4668 	}
4669 
4670 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n",
4671 		   wow_reason(ev.wake_reason));
4672 }
4673 
ath10k_wmi_event_dcs_interference(struct ath10k * ar,struct sk_buff * skb)4674 void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
4675 {
4676 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
4677 }
4678 
ath10k_tpc_config_get_rate(struct ath10k * ar,struct wmi_pdev_tpc_config_event * ev,u32 rate_idx,u32 num_chains,u32 rate_code,u8 type)4679 static u8 ath10k_tpc_config_get_rate(struct ath10k *ar,
4680 				     struct wmi_pdev_tpc_config_event *ev,
4681 				     u32 rate_idx, u32 num_chains,
4682 				     u32 rate_code, u8 type)
4683 {
4684 	u8 tpc, num_streams, preamble, ch, stm_idx;
4685 
4686 	num_streams = ATH10K_HW_NSS(rate_code);
4687 	preamble = ATH10K_HW_PREAMBLE(rate_code);
4688 	ch = num_chains - 1;
4689 
4690 	tpc = min_t(u8, ev->rates_array[rate_idx], ev->max_reg_allow_pow[ch]);
4691 
4692 	if (__le32_to_cpu(ev->num_tx_chain) <= 1)
4693 		goto out;
4694 
4695 	if (preamble == WMI_RATE_PREAMBLE_CCK)
4696 		goto out;
4697 
4698 	stm_idx = num_streams - 1;
4699 	if (num_chains <= num_streams)
4700 		goto out;
4701 
4702 	switch (type) {
4703 	case WMI_TPC_TABLE_TYPE_STBC:
4704 		tpc = min_t(u8, tpc,
4705 			    ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx]);
4706 		break;
4707 	case WMI_TPC_TABLE_TYPE_TXBF:
4708 		tpc = min_t(u8, tpc,
4709 			    ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx]);
4710 		break;
4711 	case WMI_TPC_TABLE_TYPE_CDD:
4712 		tpc = min_t(u8, tpc,
4713 			    ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx]);
4714 		break;
4715 	default:
4716 		ath10k_warn(ar, "unknown wmi tpc table type: %d\n", type);
4717 		tpc = 0;
4718 		break;
4719 	}
4720 
4721 out:
4722 	return tpc;
4723 }
4724 
ath10k_tpc_config_disp_tables(struct ath10k * ar,struct wmi_pdev_tpc_config_event * ev,struct ath10k_tpc_stats * tpc_stats,u8 * rate_code,u16 * pream_table,u8 type)4725 static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
4726 					  struct wmi_pdev_tpc_config_event *ev,
4727 					  struct ath10k_tpc_stats *tpc_stats,
4728 					  u8 *rate_code, u16 *pream_table, u8 type)
4729 {
4730 	u32 i, j, pream_idx, flags;
4731 	u8 tpc[WMI_TPC_TX_N_CHAIN];
4732 	char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
4733 	char buff[WMI_TPC_BUF_SIZE];
4734 
4735 	flags = __le32_to_cpu(ev->flags);
4736 
4737 	switch (type) {
4738 	case WMI_TPC_TABLE_TYPE_CDD:
4739 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
4740 			ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
4741 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4742 			return;
4743 		}
4744 		break;
4745 	case WMI_TPC_TABLE_TYPE_STBC:
4746 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
4747 			ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
4748 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4749 			return;
4750 		}
4751 		break;
4752 	case WMI_TPC_TABLE_TYPE_TXBF:
4753 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
4754 			ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
4755 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
4756 			return;
4757 		}
4758 		break;
4759 	default:
4760 		ath10k_dbg(ar, ATH10K_DBG_WMI,
4761 			   "invalid table type in wmi tpc event: %d\n", type);
4762 		return;
4763 	}
4764 
4765 	pream_idx = 0;
4766 	for (i = 0; i < tpc_stats->rate_max; i++) {
4767 		memset(tpc_value, 0, sizeof(tpc_value));
4768 		memset(buff, 0, sizeof(buff));
4769 		if (i == pream_table[pream_idx])
4770 			pream_idx++;
4771 
4772 		for (j = 0; j < tpc_stats->num_tx_chain; j++) {
4773 			tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
4774 							    rate_code[i],
4775 							    type);
4776 			snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
4777 			strlcat(tpc_value, buff, sizeof(tpc_value));
4778 		}
4779 		tpc_stats->tpc_table[type].pream_idx[i] = pream_idx;
4780 		tpc_stats->tpc_table[type].rate_code[i] = rate_code[i];
4781 		memcpy(tpc_stats->tpc_table[type].tpc_value[i],
4782 		       tpc_value, sizeof(tpc_value));
4783 	}
4784 }
4785 
ath10k_wmi_tpc_config_get_rate_code(u8 * rate_code,u16 * pream_table,u32 num_tx_chain)4786 void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table,
4787 					 u32 num_tx_chain)
4788 {
4789 	u32 i, j, pream_idx;
4790 	u8 rate_idx;
4791 
4792 	/* Create the rate code table based on the chains supported */
4793 	rate_idx = 0;
4794 	pream_idx = 0;
4795 
4796 	/* Fill CCK rate code */
4797 	for (i = 0; i < 4; i++) {
4798 		rate_code[rate_idx] =
4799 			ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_CCK);
4800 		rate_idx++;
4801 	}
4802 	pream_table[pream_idx] = rate_idx;
4803 	pream_idx++;
4804 
4805 	/* Fill OFDM rate code */
4806 	for (i = 0; i < 8; i++) {
4807 		rate_code[rate_idx] =
4808 			ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_OFDM);
4809 		rate_idx++;
4810 	}
4811 	pream_table[pream_idx] = rate_idx;
4812 	pream_idx++;
4813 
4814 	/* Fill HT20 rate code */
4815 	for (i = 0; i < num_tx_chain; i++) {
4816 		for (j = 0; j < 8; j++) {
4817 			rate_code[rate_idx] =
4818 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
4819 			rate_idx++;
4820 		}
4821 	}
4822 	pream_table[pream_idx] = rate_idx;
4823 	pream_idx++;
4824 
4825 	/* Fill HT40 rate code */
4826 	for (i = 0; i < num_tx_chain; i++) {
4827 		for (j = 0; j < 8; j++) {
4828 			rate_code[rate_idx] =
4829 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
4830 			rate_idx++;
4831 		}
4832 	}
4833 	pream_table[pream_idx] = rate_idx;
4834 	pream_idx++;
4835 
4836 	/* Fill VHT20 rate code */
4837 	for (i = 0; i < num_tx_chain; i++) {
4838 		for (j = 0; j < 10; j++) {
4839 			rate_code[rate_idx] =
4840 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4841 			rate_idx++;
4842 		}
4843 	}
4844 	pream_table[pream_idx] = rate_idx;
4845 	pream_idx++;
4846 
4847 	/* Fill VHT40 rate code */
4848 	for (i = 0; i < num_tx_chain; i++) {
4849 		for (j = 0; j < 10; j++) {
4850 			rate_code[rate_idx] =
4851 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4852 			rate_idx++;
4853 		}
4854 	}
4855 	pream_table[pream_idx] = rate_idx;
4856 	pream_idx++;
4857 
4858 	/* Fill VHT80 rate code */
4859 	for (i = 0; i < num_tx_chain; i++) {
4860 		for (j = 0; j < 10; j++) {
4861 			rate_code[rate_idx] =
4862 			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
4863 			rate_idx++;
4864 		}
4865 	}
4866 	pream_table[pream_idx] = rate_idx;
4867 	pream_idx++;
4868 
4869 	rate_code[rate_idx++] =
4870 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
4871 	rate_code[rate_idx++] =
4872 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4873 	rate_code[rate_idx++] =
4874 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
4875 	rate_code[rate_idx++] =
4876 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4877 	rate_code[rate_idx++] =
4878 		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
4879 
4880 	pream_table[pream_idx] = ATH10K_TPC_PREAM_TABLE_END;
4881 }
4882 
ath10k_wmi_event_pdev_tpc_config(struct ath10k * ar,struct sk_buff * skb)4883 void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
4884 {
4885 	u32 num_tx_chain, rate_max;
4886 	u8 rate_code[WMI_TPC_RATE_MAX];
4887 	u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
4888 	struct wmi_pdev_tpc_config_event *ev;
4889 	struct ath10k_tpc_stats *tpc_stats;
4890 
4891 	ev = (struct wmi_pdev_tpc_config_event *)skb->data;
4892 
4893 	num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
4894 
4895 	if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
4896 		ath10k_warn(ar, "number of tx chain is %d greater than TPC configured tx chain %d\n",
4897 			    num_tx_chain, WMI_TPC_TX_N_CHAIN);
4898 		return;
4899 	}
4900 
4901 	rate_max = __le32_to_cpu(ev->rate_max);
4902 	if (rate_max > WMI_TPC_RATE_MAX) {
4903 		ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n",
4904 			    rate_max, WMI_TPC_RATE_MAX);
4905 		rate_max = WMI_TPC_RATE_MAX;
4906 	}
4907 
4908 	tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
4909 	if (!tpc_stats)
4910 		return;
4911 
4912 	ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
4913 					    num_tx_chain);
4914 
4915 	tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
4916 	tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
4917 	tpc_stats->ctl = __le32_to_cpu(ev->ctl);
4918 	tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
4919 	tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
4920 	tpc_stats->twice_antenna_reduction =
4921 		__le32_to_cpu(ev->twice_antenna_reduction);
4922 	tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
4923 	tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
4924 	tpc_stats->num_tx_chain = num_tx_chain;
4925 	tpc_stats->rate_max = rate_max;
4926 
4927 	ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
4928 				      rate_code, pream_table,
4929 				      WMI_TPC_TABLE_TYPE_CDD);
4930 	ath10k_tpc_config_disp_tables(ar, ev,  tpc_stats,
4931 				      rate_code, pream_table,
4932 				      WMI_TPC_TABLE_TYPE_STBC);
4933 	ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
4934 				      rate_code, pream_table,
4935 				      WMI_TPC_TABLE_TYPE_TXBF);
4936 
4937 	ath10k_debug_tpc_stats_process(ar, tpc_stats);
4938 
4939 	ath10k_dbg(ar, ATH10K_DBG_WMI,
4940 		   "wmi event tpc config channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
4941 		   __le32_to_cpu(ev->chan_freq),
4942 		   __le32_to_cpu(ev->phy_mode),
4943 		   __le32_to_cpu(ev->ctl),
4944 		   __le32_to_cpu(ev->reg_domain),
4945 		   a_sle32_to_cpu(ev->twice_antenna_gain),
4946 		   __le32_to_cpu(ev->twice_antenna_reduction),
4947 		   __le32_to_cpu(ev->power_limit),
4948 		   __le32_to_cpu(ev->twice_max_rd_power) / 2,
4949 		   __le32_to_cpu(ev->num_tx_chain),
4950 		   __le32_to_cpu(ev->rate_max));
4951 }
4952 
4953 static u8
ath10k_wmi_tpc_final_get_rate(struct ath10k * ar,struct wmi_pdev_tpc_final_table_event * ev,u32 rate_idx,u32 num_chains,u32 rate_code,u8 type,u32 pream_idx)4954 ath10k_wmi_tpc_final_get_rate(struct ath10k *ar,
4955 			      struct wmi_pdev_tpc_final_table_event *ev,
4956 			      u32 rate_idx, u32 num_chains,
4957 			      u32 rate_code, u8 type, u32 pream_idx)
4958 {
4959 	u8 tpc, num_streams, preamble, ch, stm_idx;
4960 	s8 pow_agcdd, pow_agstbc, pow_agtxbf;
4961 	int pream;
4962 
4963 	num_streams = ATH10K_HW_NSS(rate_code);
4964 	preamble = ATH10K_HW_PREAMBLE(rate_code);
4965 	ch = num_chains - 1;
4966 	stm_idx = num_streams - 1;
4967 	pream = -1;
4968 
4969 	if (__le32_to_cpu(ev->chan_freq) <= 2483) {
4970 		switch (pream_idx) {
4971 		case WMI_TPC_PREAM_2GHZ_CCK:
4972 			pream = 0;
4973 			break;
4974 		case WMI_TPC_PREAM_2GHZ_OFDM:
4975 			pream = 1;
4976 			break;
4977 		case WMI_TPC_PREAM_2GHZ_HT20:
4978 		case WMI_TPC_PREAM_2GHZ_VHT20:
4979 			pream = 2;
4980 			break;
4981 		case WMI_TPC_PREAM_2GHZ_HT40:
4982 		case WMI_TPC_PREAM_2GHZ_VHT40:
4983 			pream = 3;
4984 			break;
4985 		case WMI_TPC_PREAM_2GHZ_VHT80:
4986 			pream = 4;
4987 			break;
4988 		default:
4989 			pream = -1;
4990 			break;
4991 		}
4992 	}
4993 
4994 	if (__le32_to_cpu(ev->chan_freq) >= 5180) {
4995 		switch (pream_idx) {
4996 		case WMI_TPC_PREAM_5GHZ_OFDM:
4997 			pream = 0;
4998 			break;
4999 		case WMI_TPC_PREAM_5GHZ_HT20:
5000 		case WMI_TPC_PREAM_5GHZ_VHT20:
5001 			pream = 1;
5002 			break;
5003 		case WMI_TPC_PREAM_5GHZ_HT40:
5004 		case WMI_TPC_PREAM_5GHZ_VHT40:
5005 			pream = 2;
5006 			break;
5007 		case WMI_TPC_PREAM_5GHZ_VHT80:
5008 			pream = 3;
5009 			break;
5010 		case WMI_TPC_PREAM_5GHZ_HTCUP:
5011 			pream = 4;
5012 			break;
5013 		default:
5014 			pream = -1;
5015 			break;
5016 		}
5017 	}
5018 
5019 	if (pream == -1) {
5020 		ath10k_warn(ar, "unknown wmi tpc final index and frequency: %u, %u\n",
5021 			    pream_idx, __le32_to_cpu(ev->chan_freq));
5022 		tpc = 0;
5023 		goto out;
5024 	}
5025 
5026 	if (pream == 4)
5027 		tpc = min_t(u8, ev->rates_array[rate_idx],
5028 			    ev->max_reg_allow_pow[ch]);
5029 	else
5030 		tpc = min_t(u8, min_t(u8, ev->rates_array[rate_idx],
5031 				      ev->max_reg_allow_pow[ch]),
5032 			    ev->ctl_power_table[0][pream][stm_idx]);
5033 
5034 	if (__le32_to_cpu(ev->num_tx_chain) <= 1)
5035 		goto out;
5036 
5037 	if (preamble == WMI_RATE_PREAMBLE_CCK)
5038 		goto out;
5039 
5040 	if (num_chains <= num_streams)
5041 		goto out;
5042 
5043 	switch (type) {
5044 	case WMI_TPC_TABLE_TYPE_STBC:
5045 		pow_agstbc = ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx];
5046 		if (pream == 4)
5047 			tpc = min_t(u8, tpc, pow_agstbc);
5048 		else
5049 			tpc = min_t(u8, min_t(u8, tpc, pow_agstbc),
5050 				    ev->ctl_power_table[0][pream][stm_idx]);
5051 		break;
5052 	case WMI_TPC_TABLE_TYPE_TXBF:
5053 		pow_agtxbf = ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx];
5054 		if (pream == 4)
5055 			tpc = min_t(u8, tpc, pow_agtxbf);
5056 		else
5057 			tpc = min_t(u8, min_t(u8, tpc, pow_agtxbf),
5058 				    ev->ctl_power_table[1][pream][stm_idx]);
5059 		break;
5060 	case WMI_TPC_TABLE_TYPE_CDD:
5061 		pow_agcdd = ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx];
5062 		if (pream == 4)
5063 			tpc = min_t(u8, tpc, pow_agcdd);
5064 		else
5065 			tpc = min_t(u8, min_t(u8, tpc, pow_agcdd),
5066 				    ev->ctl_power_table[0][pream][stm_idx]);
5067 		break;
5068 	default:
5069 		ath10k_warn(ar, "unknown wmi tpc final table type: %d\n", type);
5070 		tpc = 0;
5071 		break;
5072 	}
5073 
5074 out:
5075 	return tpc;
5076 }
5077 
5078 static void
ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k * ar,struct wmi_pdev_tpc_final_table_event * ev,struct ath10k_tpc_stats_final * tpc_stats,u8 * rate_code,u16 * pream_table,u8 type)5079 ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
5080 				       struct wmi_pdev_tpc_final_table_event *ev,
5081 				       struct ath10k_tpc_stats_final *tpc_stats,
5082 				       u8 *rate_code, u16 *pream_table, u8 type)
5083 {
5084 	u32 i, j, pream_idx, flags;
5085 	u8 tpc[WMI_TPC_TX_N_CHAIN];
5086 	char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
5087 	char buff[WMI_TPC_BUF_SIZE];
5088 
5089 	flags = __le32_to_cpu(ev->flags);
5090 
5091 	switch (type) {
5092 	case WMI_TPC_TABLE_TYPE_CDD:
5093 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
5094 			ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
5095 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
5096 			return;
5097 		}
5098 		break;
5099 	case WMI_TPC_TABLE_TYPE_STBC:
5100 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
5101 			ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
5102 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
5103 			return;
5104 		}
5105 		break;
5106 	case WMI_TPC_TABLE_TYPE_TXBF:
5107 		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
5108 			ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
5109 			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
5110 			return;
5111 		}
5112 		break;
5113 	default:
5114 		ath10k_dbg(ar, ATH10K_DBG_WMI,
5115 			   "invalid table type in wmi tpc event: %d\n", type);
5116 		return;
5117 	}
5118 
5119 	pream_idx = 0;
5120 	for (i = 0; i < tpc_stats->rate_max; i++) {
5121 		memset(tpc_value, 0, sizeof(tpc_value));
5122 		memset(buff, 0, sizeof(buff));
5123 		if (i == pream_table[pream_idx])
5124 			pream_idx++;
5125 
5126 		for (j = 0; j < tpc_stats->num_tx_chain; j++) {
5127 			tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1,
5128 							       rate_code[i],
5129 							       type, pream_idx);
5130 			snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
5131 			strlcat(tpc_value, buff, sizeof(tpc_value));
5132 		}
5133 		tpc_stats->tpc_table_final[type].pream_idx[i] = pream_idx;
5134 		tpc_stats->tpc_table_final[type].rate_code[i] = rate_code[i];
5135 		memcpy(tpc_stats->tpc_table_final[type].tpc_value[i],
5136 		       tpc_value, sizeof(tpc_value));
5137 	}
5138 }
5139 
ath10k_wmi_event_tpc_final_table(struct ath10k * ar,struct sk_buff * skb)5140 void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
5141 {
5142 	u32 num_tx_chain, rate_max;
5143 	u8 rate_code[WMI_TPC_FINAL_RATE_MAX];
5144 	u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
5145 	struct wmi_pdev_tpc_final_table_event *ev;
5146 	struct ath10k_tpc_stats_final *tpc_stats;
5147 
5148 	ev = (struct wmi_pdev_tpc_final_table_event *)skb->data;
5149 
5150 	num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
5151 	if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
5152 		ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n",
5153 			    num_tx_chain, WMI_TPC_TX_N_CHAIN);
5154 		return;
5155 	}
5156 
5157 	rate_max = __le32_to_cpu(ev->rate_max);
5158 	if (rate_max > WMI_TPC_FINAL_RATE_MAX) {
5159 		ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n",
5160 			    rate_max, WMI_TPC_FINAL_RATE_MAX);
5161 		rate_max = WMI_TPC_FINAL_RATE_MAX;
5162 	}
5163 
5164 	tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
5165 	if (!tpc_stats)
5166 		return;
5167 
5168 	ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
5169 					    num_tx_chain);
5170 
5171 	tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
5172 	tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
5173 	tpc_stats->ctl = __le32_to_cpu(ev->ctl);
5174 	tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
5175 	tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
5176 	tpc_stats->twice_antenna_reduction =
5177 		__le32_to_cpu(ev->twice_antenna_reduction);
5178 	tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
5179 	tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
5180 	tpc_stats->num_tx_chain = num_tx_chain;
5181 	tpc_stats->rate_max = rate_max;
5182 
5183 	ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
5184 					       rate_code, pream_table,
5185 					       WMI_TPC_TABLE_TYPE_CDD);
5186 	ath10k_wmi_tpc_stats_final_disp_tables(ar, ev,  tpc_stats,
5187 					       rate_code, pream_table,
5188 					       WMI_TPC_TABLE_TYPE_STBC);
5189 	ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
5190 					       rate_code, pream_table,
5191 					       WMI_TPC_TABLE_TYPE_TXBF);
5192 
5193 	ath10k_debug_tpc_stats_final_process(ar, tpc_stats);
5194 
5195 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5196 		   "wmi event tpc final table channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
5197 		   __le32_to_cpu(ev->chan_freq),
5198 		   __le32_to_cpu(ev->phy_mode),
5199 		   __le32_to_cpu(ev->ctl),
5200 		   __le32_to_cpu(ev->reg_domain),
5201 		   a_sle32_to_cpu(ev->twice_antenna_gain),
5202 		   __le32_to_cpu(ev->twice_antenna_reduction),
5203 		   __le32_to_cpu(ev->power_limit),
5204 		   __le32_to_cpu(ev->twice_max_rd_power) / 2,
5205 		   __le32_to_cpu(ev->num_tx_chain),
5206 		   __le32_to_cpu(ev->rate_max));
5207 }
5208 
5209 static void
ath10k_wmi_handle_tdls_peer_event(struct ath10k * ar,struct sk_buff * skb)5210 ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb)
5211 {
5212 	struct wmi_tdls_peer_event *ev;
5213 	struct ath10k_peer *peer;
5214 	struct ath10k_vif *arvif;
5215 	int vdev_id;
5216 	int peer_status;
5217 	int peer_reason;
5218 	u8 reason;
5219 
5220 	if (skb->len < sizeof(*ev)) {
5221 		ath10k_err(ar, "received tdls peer event with invalid size (%d bytes)\n",
5222 			   skb->len);
5223 		return;
5224 	}
5225 
5226 	ev = (struct wmi_tdls_peer_event *)skb->data;
5227 	vdev_id = __le32_to_cpu(ev->vdev_id);
5228 	peer_status = __le32_to_cpu(ev->peer_status);
5229 	peer_reason = __le32_to_cpu(ev->peer_reason);
5230 
5231 	spin_lock_bh(&ar->data_lock);
5232 	peer = ath10k_peer_find(ar, vdev_id, ev->peer_macaddr.addr);
5233 	spin_unlock_bh(&ar->data_lock);
5234 
5235 	if (!peer) {
5236 		ath10k_warn(ar, "failed to find peer entry for %pM\n",
5237 			    ev->peer_macaddr.addr);
5238 		return;
5239 	}
5240 
5241 	switch (peer_status) {
5242 	case WMI_TDLS_SHOULD_TEARDOWN:
5243 		switch (peer_reason) {
5244 		case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
5245 		case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE:
5246 		case WMI_TDLS_TEARDOWN_REASON_RSSI:
5247 			reason = WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE;
5248 			break;
5249 		default:
5250 			reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
5251 			break;
5252 		}
5253 
5254 		arvif = ath10k_get_arvif(ar, vdev_id);
5255 		if (!arvif) {
5256 			ath10k_warn(ar, "received tdls peer event for invalid vdev id %u\n",
5257 				    vdev_id);
5258 			return;
5259 		}
5260 
5261 		ieee80211_tdls_oper_request(arvif->vif, ev->peer_macaddr.addr,
5262 					    NL80211_TDLS_TEARDOWN, reason,
5263 					    GFP_ATOMIC);
5264 
5265 		ath10k_dbg(ar, ATH10K_DBG_WMI,
5266 			   "received tdls teardown event for peer %pM reason %u\n",
5267 			   ev->peer_macaddr.addr, peer_reason);
5268 		break;
5269 	default:
5270 		ath10k_dbg(ar, ATH10K_DBG_WMI,
5271 			   "received unknown tdls peer event %u\n",
5272 			   peer_status);
5273 		break;
5274 	}
5275 }
5276 
5277 static void
ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k * ar,struct sk_buff * skb)5278 ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k *ar, struct sk_buff *skb)
5279 {
5280 	struct wmi_peer_sta_ps_state_chg_event *ev;
5281 	struct ieee80211_sta *sta;
5282 	struct ath10k_sta *arsta;
5283 	u8 peer_addr[ETH_ALEN];
5284 
5285 	lockdep_assert_held(&ar->data_lock);
5286 
5287 	ev = (struct wmi_peer_sta_ps_state_chg_event *)skb->data;
5288 	ether_addr_copy(peer_addr, ev->peer_macaddr.addr);
5289 
5290 	rcu_read_lock();
5291 
5292 	sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer_addr, NULL);
5293 
5294 	if (!sta) {
5295 		ath10k_warn(ar, "failed to find station entry %pM\n",
5296 			    peer_addr);
5297 		goto exit;
5298 	}
5299 
5300 	arsta = (struct ath10k_sta *)sta->drv_priv;
5301 	arsta->peer_ps_state = __le32_to_cpu(ev->peer_ps_state);
5302 
5303 exit:
5304 	rcu_read_unlock();
5305 }
5306 
ath10k_wmi_event_pdev_ftm_intg(struct ath10k * ar,struct sk_buff * skb)5307 void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
5308 {
5309 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
5310 }
5311 
ath10k_wmi_event_gtk_offload_status(struct ath10k * ar,struct sk_buff * skb)5312 void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb)
5313 {
5314 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
5315 }
5316 
ath10k_wmi_event_gtk_rekey_fail(struct ath10k * ar,struct sk_buff * skb)5317 void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb)
5318 {
5319 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
5320 }
5321 
ath10k_wmi_event_delba_complete(struct ath10k * ar,struct sk_buff * skb)5322 void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb)
5323 {
5324 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
5325 }
5326 
ath10k_wmi_event_addba_complete(struct ath10k * ar,struct sk_buff * skb)5327 void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb)
5328 {
5329 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
5330 }
5331 
ath10k_wmi_event_vdev_install_key_complete(struct ath10k * ar,struct sk_buff * skb)5332 void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
5333 						struct sk_buff *skb)
5334 {
5335 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
5336 }
5337 
ath10k_wmi_event_inst_rssi_stats(struct ath10k * ar,struct sk_buff * skb)5338 void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb)
5339 {
5340 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
5341 }
5342 
ath10k_wmi_event_vdev_standby_req(struct ath10k * ar,struct sk_buff * skb)5343 void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb)
5344 {
5345 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
5346 }
5347 
ath10k_wmi_event_vdev_resume_req(struct ath10k * ar,struct sk_buff * skb)5348 void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
5349 {
5350 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
5351 }
5352 
ath10k_wmi_alloc_chunk(struct ath10k * ar,u32 req_id,u32 num_units,u32 unit_len)5353 static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
5354 				  u32 num_units, u32 unit_len)
5355 {
5356 	dma_addr_t paddr;
5357 	u32 pool_size;
5358 	int idx = ar->wmi.num_mem_chunks;
5359 	void *vaddr;
5360 
5361 	pool_size = num_units * round_up(unit_len, 4);
5362 	vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
5363 
5364 	if (!vaddr)
5365 		return -ENOMEM;
5366 
5367 	ar->wmi.mem_chunks[idx].vaddr = vaddr;
5368 	ar->wmi.mem_chunks[idx].paddr = paddr;
5369 	ar->wmi.mem_chunks[idx].len = pool_size;
5370 	ar->wmi.mem_chunks[idx].req_id = req_id;
5371 	ar->wmi.num_mem_chunks++;
5372 
5373 	return num_units;
5374 }
5375 
ath10k_wmi_alloc_host_mem(struct ath10k * ar,u32 req_id,u32 num_units,u32 unit_len)5376 static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
5377 				     u32 num_units, u32 unit_len)
5378 {
5379 	int ret;
5380 
5381 	while (num_units) {
5382 		ret = ath10k_wmi_alloc_chunk(ar, req_id, num_units, unit_len);
5383 		if (ret < 0)
5384 			return ret;
5385 
5386 		num_units -= ret;
5387 	}
5388 
5389 	return 0;
5390 }
5391 
5392 static bool
ath10k_wmi_is_host_mem_allocated(struct ath10k * ar,const struct wlan_host_mem_req ** mem_reqs,u32 num_mem_reqs)5393 ath10k_wmi_is_host_mem_allocated(struct ath10k *ar,
5394 				 const struct wlan_host_mem_req **mem_reqs,
5395 				 u32 num_mem_reqs)
5396 {
5397 	u32 req_id, num_units, unit_size, num_unit_info;
5398 	u32 pool_size;
5399 	int i, j;
5400 	bool found;
5401 
5402 	if (ar->wmi.num_mem_chunks != num_mem_reqs)
5403 		return false;
5404 
5405 	for (i = 0; i < num_mem_reqs; ++i) {
5406 		req_id = __le32_to_cpu(mem_reqs[i]->req_id);
5407 		num_units = __le32_to_cpu(mem_reqs[i]->num_units);
5408 		unit_size = __le32_to_cpu(mem_reqs[i]->unit_size);
5409 		num_unit_info = __le32_to_cpu(mem_reqs[i]->num_unit_info);
5410 
5411 		if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
5412 			if (ar->num_active_peers)
5413 				num_units = ar->num_active_peers + 1;
5414 			else
5415 				num_units = ar->max_num_peers + 1;
5416 		} else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
5417 			num_units = ar->max_num_peers + 1;
5418 		} else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
5419 			num_units = ar->max_num_vdevs + 1;
5420 		}
5421 
5422 		found = false;
5423 		for (j = 0; j < ar->wmi.num_mem_chunks; j++) {
5424 			if (ar->wmi.mem_chunks[j].req_id == req_id) {
5425 				pool_size = num_units * round_up(unit_size, 4);
5426 				if (ar->wmi.mem_chunks[j].len == pool_size) {
5427 					found = true;
5428 					break;
5429 				}
5430 			}
5431 		}
5432 		if (!found)
5433 			return false;
5434 	}
5435 
5436 	return true;
5437 }
5438 
5439 static int
ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_svc_rdy_ev_arg * arg)5440 ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5441 				   struct wmi_svc_rdy_ev_arg *arg)
5442 {
5443 	struct wmi_service_ready_event *ev;
5444 	size_t i, n;
5445 
5446 	if (skb->len < sizeof(*ev))
5447 		return -EPROTO;
5448 
5449 	ev = (void *)skb->data;
5450 	skb_pull(skb, sizeof(*ev));
5451 	arg->min_tx_power = ev->hw_min_tx_power;
5452 	arg->max_tx_power = ev->hw_max_tx_power;
5453 	arg->ht_cap = ev->ht_cap_info;
5454 	arg->vht_cap = ev->vht_cap_info;
5455 	arg->vht_supp_mcs = ev->vht_supp_mcs;
5456 	arg->sw_ver0 = ev->sw_version;
5457 	arg->sw_ver1 = ev->sw_version_1;
5458 	arg->phy_capab = ev->phy_capability;
5459 	arg->num_rf_chains = ev->num_rf_chains;
5460 	arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
5461 	arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan;
5462 	arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan;
5463 	arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
5464 	arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
5465 	arg->num_mem_reqs = ev->num_mem_reqs;
5466 	arg->service_map = ev->wmi_service_bitmap;
5467 	arg->service_map_len = sizeof(ev->wmi_service_bitmap);
5468 
5469 	n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
5470 		  ARRAY_SIZE(arg->mem_reqs));
5471 	for (i = 0; i < n; i++)
5472 		arg->mem_reqs[i] = &ev->mem_reqs[i];
5473 
5474 	if (skb->len <
5475 	    __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
5476 		return -EPROTO;
5477 
5478 	return 0;
5479 }
5480 
5481 static int
ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_svc_rdy_ev_arg * arg)5482 ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5483 				  struct wmi_svc_rdy_ev_arg *arg)
5484 {
5485 	struct wmi_10x_service_ready_event *ev;
5486 	int i, n;
5487 
5488 	if (skb->len < sizeof(*ev))
5489 		return -EPROTO;
5490 
5491 	ev = (void *)skb->data;
5492 	skb_pull(skb, sizeof(*ev));
5493 	arg->min_tx_power = ev->hw_min_tx_power;
5494 	arg->max_tx_power = ev->hw_max_tx_power;
5495 	arg->ht_cap = ev->ht_cap_info;
5496 	arg->vht_cap = ev->vht_cap_info;
5497 	arg->vht_supp_mcs = ev->vht_supp_mcs;
5498 	arg->sw_ver0 = ev->sw_version;
5499 	arg->phy_capab = ev->phy_capability;
5500 	arg->num_rf_chains = ev->num_rf_chains;
5501 	arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
5502 	arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan;
5503 	arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan;
5504 	arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
5505 	arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
5506 	arg->num_mem_reqs = ev->num_mem_reqs;
5507 	arg->service_map = ev->wmi_service_bitmap;
5508 	arg->service_map_len = sizeof(ev->wmi_service_bitmap);
5509 
5510 	/* Deliberately skipping ev->sys_cap_info as WMI and WMI-TLV have
5511 	 * different values. We would need a translation to handle that,
5512 	 * but as we don't currently need anything from sys_cap_info from
5513 	 * WMI interface (only from WMI-TLV) safest it to skip it.
5514 	 */
5515 
5516 	n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
5517 		  ARRAY_SIZE(arg->mem_reqs));
5518 	for (i = 0; i < n; i++)
5519 		arg->mem_reqs[i] = &ev->mem_reqs[i];
5520 
5521 	if (skb->len <
5522 	    __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
5523 		return -EPROTO;
5524 
5525 	return 0;
5526 }
5527 
ath10k_wmi_event_service_ready_work(struct work_struct * work)5528 static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
5529 {
5530 	struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work);
5531 	struct sk_buff *skb = ar->svc_rdy_skb;
5532 	struct wmi_svc_rdy_ev_arg arg = {};
5533 	u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
5534 	int ret;
5535 	bool allocated;
5536 
5537 	if (!skb) {
5538 		ath10k_warn(ar, "invalid service ready event skb\n");
5539 		return;
5540 	}
5541 
5542 	ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
5543 	if (ret) {
5544 		ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
5545 		return;
5546 	}
5547 
5548 	ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map,
5549 			   arg.service_map_len);
5550 
5551 	ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
5552 	ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
5553 	ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
5554 	ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
5555 	ar->vht_supp_mcs = __le32_to_cpu(arg.vht_supp_mcs);
5556 	ar->fw_version_major =
5557 		(__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
5558 	ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
5559 	ar->fw_version_release =
5560 		(__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
5561 	ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
5562 	ar->phy_capability = __le32_to_cpu(arg.phy_capab);
5563 	ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
5564 	ar->hw_eeprom_rd = __le32_to_cpu(arg.eeprom_rd);
5565 	ar->low_2ghz_chan = __le32_to_cpu(arg.low_2ghz_chan);
5566 	ar->high_2ghz_chan = __le32_to_cpu(arg.high_2ghz_chan);
5567 	ar->low_5ghz_chan = __le32_to_cpu(arg.low_5ghz_chan);
5568 	ar->high_5ghz_chan = __le32_to_cpu(arg.high_5ghz_chan);
5569 	ar->sys_cap_info = __le32_to_cpu(arg.sys_cap_info);
5570 
5571 	ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
5572 			arg.service_map, arg.service_map_len);
5573 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sys_cap_info 0x%x\n",
5574 		   ar->sys_cap_info);
5575 
5576 	if (ar->num_rf_chains > ar->max_spatial_stream) {
5577 		ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
5578 			    ar->num_rf_chains, ar->max_spatial_stream);
5579 		ar->num_rf_chains = ar->max_spatial_stream;
5580 	}
5581 
5582 	if (!ar->cfg_tx_chainmask) {
5583 		ar->cfg_tx_chainmask = (1 << ar->num_rf_chains) - 1;
5584 		ar->cfg_rx_chainmask = (1 << ar->num_rf_chains) - 1;
5585 	}
5586 
5587 	if (strlen(ar->hw->wiphy->fw_version) == 0) {
5588 		snprintf(ar->hw->wiphy->fw_version,
5589 			 sizeof(ar->hw->wiphy->fw_version),
5590 			 "%u.%u.%u.%u",
5591 			 ar->fw_version_major,
5592 			 ar->fw_version_minor,
5593 			 ar->fw_version_release,
5594 			 ar->fw_version_build);
5595 	}
5596 
5597 	num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
5598 	if (num_mem_reqs > WMI_MAX_MEM_REQS) {
5599 		ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
5600 			    num_mem_reqs);
5601 		return;
5602 	}
5603 
5604 	if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
5605 		if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
5606 			     ar->running_fw->fw_file.fw_features))
5607 			ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC +
5608 					       ar->max_num_vdevs;
5609 		else
5610 			ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
5611 					       ar->max_num_vdevs;
5612 
5613 		ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
5614 				    ar->max_num_vdevs;
5615 		ar->num_tids = ar->num_active_peers * 2;
5616 		ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
5617 	}
5618 
5619 	/* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE
5620 	 * and WMI_SERVICE_IRAM_TIDS, etc.
5621 	 */
5622 
5623 	allocated = ath10k_wmi_is_host_mem_allocated(ar, arg.mem_reqs,
5624 						     num_mem_reqs);
5625 	if (allocated)
5626 		goto skip_mem_alloc;
5627 
5628 	/* Either this event is received during boot time or there is a change
5629 	 * in memory requirement from firmware when compared to last request.
5630 	 * Free any old memory and do a fresh allocation based on the current
5631 	 * memory requirement.
5632 	 */
5633 	ath10k_wmi_free_host_mem(ar);
5634 
5635 	for (i = 0; i < num_mem_reqs; ++i) {
5636 		req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
5637 		num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
5638 		unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
5639 		num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
5640 
5641 		if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
5642 			if (ar->num_active_peers)
5643 				num_units = ar->num_active_peers + 1;
5644 			else
5645 				num_units = ar->max_num_peers + 1;
5646 		} else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
5647 			/* number of units to allocate is number of
5648 			 * peers, 1 extra for self peer on target
5649 			 * this needs to be tied, host and target
5650 			 * can get out of sync
5651 			 */
5652 			num_units = ar->max_num_peers + 1;
5653 		} else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
5654 			num_units = ar->max_num_vdevs + 1;
5655 		}
5656 
5657 		ath10k_dbg(ar, ATH10K_DBG_WMI,
5658 			   "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
5659 			   req_id,
5660 			   __le32_to_cpu(arg.mem_reqs[i]->num_units),
5661 			   num_unit_info,
5662 			   unit_size,
5663 			   num_units);
5664 
5665 		ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
5666 						unit_size);
5667 		if (ret)
5668 			return;
5669 	}
5670 
5671 skip_mem_alloc:
5672 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5673 		   "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_mcs 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x low_2ghz_chan %d high_2ghz_chan %d low_5ghz_chan %d high_5ghz_chan %d num_mem_reqs 0x%08x\n",
5674 		   __le32_to_cpu(arg.min_tx_power),
5675 		   __le32_to_cpu(arg.max_tx_power),
5676 		   __le32_to_cpu(arg.ht_cap),
5677 		   __le32_to_cpu(arg.vht_cap),
5678 		   __le32_to_cpu(arg.vht_supp_mcs),
5679 		   __le32_to_cpu(arg.sw_ver0),
5680 		   __le32_to_cpu(arg.sw_ver1),
5681 		   __le32_to_cpu(arg.fw_build),
5682 		   __le32_to_cpu(arg.phy_capab),
5683 		   __le32_to_cpu(arg.num_rf_chains),
5684 		   __le32_to_cpu(arg.eeprom_rd),
5685 		   __le32_to_cpu(arg.low_2ghz_chan),
5686 		   __le32_to_cpu(arg.high_2ghz_chan),
5687 		   __le32_to_cpu(arg.low_5ghz_chan),
5688 		   __le32_to_cpu(arg.high_5ghz_chan),
5689 		   __le32_to_cpu(arg.num_mem_reqs));
5690 
5691 	dev_kfree_skb(skb);
5692 	ar->svc_rdy_skb = NULL;
5693 	complete(&ar->wmi.service_ready);
5694 }
5695 
ath10k_wmi_event_service_ready(struct ath10k * ar,struct sk_buff * skb)5696 void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
5697 {
5698 	ar->svc_rdy_skb = skb;
5699 	queue_work(ar->workqueue_aux, &ar->svc_rdy_work);
5700 }
5701 
ath10k_wmi_op_pull_rdy_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_rdy_ev_arg * arg)5702 static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
5703 				     struct wmi_rdy_ev_arg *arg)
5704 {
5705 	struct wmi_ready_event *ev = (void *)skb->data;
5706 
5707 	if (skb->len < sizeof(*ev))
5708 		return -EPROTO;
5709 
5710 	skb_pull(skb, sizeof(*ev));
5711 	arg->sw_version = ev->sw_version;
5712 	arg->abi_version = ev->abi_version;
5713 	arg->status = ev->status;
5714 	arg->mac_addr = ev->mac_addr.addr;
5715 
5716 	return 0;
5717 }
5718 
ath10k_wmi_op_pull_roam_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_roam_ev_arg * arg)5719 static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
5720 				      struct wmi_roam_ev_arg *arg)
5721 {
5722 	struct wmi_roam_ev *ev = (void *)skb->data;
5723 
5724 	if (skb->len < sizeof(*ev))
5725 		return -EPROTO;
5726 
5727 	skb_pull(skb, sizeof(*ev));
5728 	arg->vdev_id = ev->vdev_id;
5729 	arg->reason = ev->reason;
5730 
5731 	return 0;
5732 }
5733 
ath10k_wmi_op_pull_echo_ev(struct ath10k * ar,struct sk_buff * skb,struct wmi_echo_ev_arg * arg)5734 static int ath10k_wmi_op_pull_echo_ev(struct ath10k *ar,
5735 				      struct sk_buff *skb,
5736 				      struct wmi_echo_ev_arg *arg)
5737 {
5738 	struct wmi_echo_event *ev = (void *)skb->data;
5739 
5740 	arg->value = ev->value;
5741 
5742 	return 0;
5743 }
5744 
ath10k_wmi_event_ready(struct ath10k * ar,struct sk_buff * skb)5745 int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
5746 {
5747 	struct wmi_rdy_ev_arg arg = {};
5748 	int ret;
5749 
5750 	ret = ath10k_wmi_pull_rdy(ar, skb, &arg);
5751 	if (ret) {
5752 		ath10k_warn(ar, "failed to parse ready event: %d\n", ret);
5753 		return ret;
5754 	}
5755 
5756 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5757 		   "wmi event ready sw_version 0x%08x abi_version %u mac_addr %pM status %d\n",
5758 		   __le32_to_cpu(arg.sw_version),
5759 		   __le32_to_cpu(arg.abi_version),
5760 		   arg.mac_addr,
5761 		   __le32_to_cpu(arg.status));
5762 
5763 	if (is_zero_ether_addr(ar->mac_addr))
5764 		ether_addr_copy(ar->mac_addr, arg.mac_addr);
5765 	complete(&ar->wmi.unified_ready);
5766 	return 0;
5767 }
5768 
ath10k_wmi_event_service_available(struct ath10k * ar,struct sk_buff * skb)5769 void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb)
5770 {
5771 	int ret;
5772 	struct wmi_svc_avail_ev_arg arg = {};
5773 
5774 	ret = ath10k_wmi_pull_svc_avail(ar, skb, &arg);
5775 	if (ret) {
5776 		ath10k_warn(ar, "failed to parse service available event: %d\n",
5777 			    ret);
5778 	}
5779 
5780 	/*
5781 	 * Initialization of "arg.service_map_ext_valid" to ZERO is necessary
5782 	 * for the below logic to work.
5783 	 */
5784 	if (arg.service_map_ext_valid)
5785 		ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map,
5786 				       __le32_to_cpu(arg.service_map_ext_len));
5787 }
5788 
ath10k_wmi_event_temperature(struct ath10k * ar,struct sk_buff * skb)5789 static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
5790 {
5791 	const struct wmi_pdev_temperature_event *ev;
5792 
5793 	ev = (struct wmi_pdev_temperature_event *)skb->data;
5794 	if (WARN_ON(skb->len < sizeof(*ev)))
5795 		return -EPROTO;
5796 
5797 	ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
5798 	return 0;
5799 }
5800 
ath10k_wmi_event_pdev_bss_chan_info(struct ath10k * ar,struct sk_buff * skb)5801 static int ath10k_wmi_event_pdev_bss_chan_info(struct ath10k *ar,
5802 					       struct sk_buff *skb)
5803 {
5804 	struct wmi_pdev_bss_chan_info_event *ev;
5805 	struct survey_info *survey;
5806 	u64 busy, total, tx, rx, rx_bss;
5807 	u32 freq, noise_floor;
5808 	u32 cc_freq_hz = ar->hw_params.channel_counters_freq_hz;
5809 	int idx;
5810 
5811 	ev = (struct wmi_pdev_bss_chan_info_event *)skb->data;
5812 	if (WARN_ON(skb->len < sizeof(*ev)))
5813 		return -EPROTO;
5814 
5815 	freq        = __le32_to_cpu(ev->freq);
5816 	noise_floor = __le32_to_cpu(ev->noise_floor);
5817 	busy        = __le64_to_cpu(ev->cycle_busy);
5818 	total       = __le64_to_cpu(ev->cycle_total);
5819 	tx          = __le64_to_cpu(ev->cycle_tx);
5820 	rx          = __le64_to_cpu(ev->cycle_rx);
5821 	rx_bss      = __le64_to_cpu(ev->cycle_rx_bss);
5822 
5823 	ath10k_dbg(ar, ATH10K_DBG_WMI,
5824 		   "wmi event pdev bss chan info:\n freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
5825 		   freq, noise_floor, busy, total, tx, rx, rx_bss);
5826 
5827 	spin_lock_bh(&ar->data_lock);
5828 	idx = freq_to_idx(ar, freq);
5829 	if (idx >= ARRAY_SIZE(ar->survey)) {
5830 		ath10k_warn(ar, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
5831 			    freq, idx);
5832 		goto exit;
5833 	}
5834 
5835 	survey = &ar->survey[idx];
5836 
5837 	survey->noise     = noise_floor;
5838 	survey->time      = div_u64(total, cc_freq_hz);
5839 	survey->time_busy = div_u64(busy, cc_freq_hz);
5840 	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
5841 	survey->time_tx   = div_u64(tx, cc_freq_hz);
5842 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
5843 			     SURVEY_INFO_TIME |
5844 			     SURVEY_INFO_TIME_BUSY |
5845 			     SURVEY_INFO_TIME_RX |
5846 			     SURVEY_INFO_TIME_TX);
5847 exit:
5848 	spin_unlock_bh(&ar->data_lock);
5849 	complete(&ar->bss_survey_done);
5850 	return 0;
5851 }
5852 
ath10k_wmi_queue_set_coverage_class_work(struct ath10k * ar)5853 static inline void ath10k_wmi_queue_set_coverage_class_work(struct ath10k *ar)
5854 {
5855 	if (ar->hw_params.hw_ops->set_coverage_class) {
5856 		spin_lock_bh(&ar->data_lock);
5857 
5858 		/* This call only ensures that the modified coverage class
5859 		 * persists in case the firmware sets the registers back to
5860 		 * their default value. So calling it is only necessary if the
5861 		 * coverage class has a non-zero value.
5862 		 */
5863 		if (ar->fw_coverage.coverage_class)
5864 			queue_work(ar->workqueue, &ar->set_coverage_class_work);
5865 
5866 		spin_unlock_bh(&ar->data_lock);
5867 	}
5868 }
5869 
ath10k_wmi_op_rx(struct ath10k * ar,struct sk_buff * skb)5870 static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
5871 {
5872 	struct wmi_cmd_hdr *cmd_hdr;
5873 	enum wmi_event_id id;
5874 
5875 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
5876 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
5877 
5878 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
5879 		goto out;
5880 
5881 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
5882 
5883 	switch (id) {
5884 	case WMI_MGMT_RX_EVENTID:
5885 		ath10k_wmi_event_mgmt_rx(ar, skb);
5886 		/* mgmt_rx() owns the skb now! */
5887 		return;
5888 	case WMI_SCAN_EVENTID:
5889 		ath10k_wmi_event_scan(ar, skb);
5890 		ath10k_wmi_queue_set_coverage_class_work(ar);
5891 		break;
5892 	case WMI_CHAN_INFO_EVENTID:
5893 		ath10k_wmi_event_chan_info(ar, skb);
5894 		break;
5895 	case WMI_ECHO_EVENTID:
5896 		ath10k_wmi_event_echo(ar, skb);
5897 		break;
5898 	case WMI_DEBUG_MESG_EVENTID:
5899 		ath10k_wmi_event_debug_mesg(ar, skb);
5900 		ath10k_wmi_queue_set_coverage_class_work(ar);
5901 		break;
5902 	case WMI_UPDATE_STATS_EVENTID:
5903 		ath10k_wmi_event_update_stats(ar, skb);
5904 		break;
5905 	case WMI_VDEV_START_RESP_EVENTID:
5906 		ath10k_wmi_event_vdev_start_resp(ar, skb);
5907 		ath10k_wmi_queue_set_coverage_class_work(ar);
5908 		break;
5909 	case WMI_VDEV_STOPPED_EVENTID:
5910 		ath10k_wmi_event_vdev_stopped(ar, skb);
5911 		ath10k_wmi_queue_set_coverage_class_work(ar);
5912 		break;
5913 	case WMI_PEER_STA_KICKOUT_EVENTID:
5914 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
5915 		break;
5916 	case WMI_HOST_SWBA_EVENTID:
5917 		ath10k_wmi_event_host_swba(ar, skb);
5918 		break;
5919 	case WMI_TBTTOFFSET_UPDATE_EVENTID:
5920 		ath10k_wmi_event_tbttoffset_update(ar, skb);
5921 		break;
5922 	case WMI_PHYERR_EVENTID:
5923 		ath10k_wmi_event_phyerr(ar, skb);
5924 		break;
5925 	case WMI_ROAM_EVENTID:
5926 		ath10k_wmi_event_roam(ar, skb);
5927 		ath10k_wmi_queue_set_coverage_class_work(ar);
5928 		break;
5929 	case WMI_PROFILE_MATCH:
5930 		ath10k_wmi_event_profile_match(ar, skb);
5931 		break;
5932 	case WMI_DEBUG_PRINT_EVENTID:
5933 		ath10k_wmi_event_debug_print(ar, skb);
5934 		ath10k_wmi_queue_set_coverage_class_work(ar);
5935 		break;
5936 	case WMI_PDEV_QVIT_EVENTID:
5937 		ath10k_wmi_event_pdev_qvit(ar, skb);
5938 		break;
5939 	case WMI_WLAN_PROFILE_DATA_EVENTID:
5940 		ath10k_wmi_event_wlan_profile_data(ar, skb);
5941 		break;
5942 	case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
5943 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
5944 		break;
5945 	case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
5946 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
5947 		break;
5948 	case WMI_RTT_ERROR_REPORT_EVENTID:
5949 		ath10k_wmi_event_rtt_error_report(ar, skb);
5950 		break;
5951 	case WMI_WOW_WAKEUP_HOST_EVENTID:
5952 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
5953 		break;
5954 	case WMI_DCS_INTERFERENCE_EVENTID:
5955 		ath10k_wmi_event_dcs_interference(ar, skb);
5956 		break;
5957 	case WMI_PDEV_TPC_CONFIG_EVENTID:
5958 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
5959 		break;
5960 	case WMI_PDEV_FTM_INTG_EVENTID:
5961 		ath10k_wmi_event_pdev_ftm_intg(ar, skb);
5962 		break;
5963 	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
5964 		ath10k_wmi_event_gtk_offload_status(ar, skb);
5965 		break;
5966 	case WMI_GTK_REKEY_FAIL_EVENTID:
5967 		ath10k_wmi_event_gtk_rekey_fail(ar, skb);
5968 		break;
5969 	case WMI_TX_DELBA_COMPLETE_EVENTID:
5970 		ath10k_wmi_event_delba_complete(ar, skb);
5971 		break;
5972 	case WMI_TX_ADDBA_COMPLETE_EVENTID:
5973 		ath10k_wmi_event_addba_complete(ar, skb);
5974 		break;
5975 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
5976 		ath10k_wmi_event_vdev_install_key_complete(ar, skb);
5977 		break;
5978 	case WMI_SERVICE_READY_EVENTID:
5979 		ath10k_wmi_event_service_ready(ar, skb);
5980 		return;
5981 	case WMI_READY_EVENTID:
5982 		ath10k_wmi_event_ready(ar, skb);
5983 		ath10k_wmi_queue_set_coverage_class_work(ar);
5984 		break;
5985 	case WMI_SERVICE_AVAILABLE_EVENTID:
5986 		ath10k_wmi_event_service_available(ar, skb);
5987 		break;
5988 	default:
5989 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
5990 		break;
5991 	}
5992 
5993 out:
5994 	dev_kfree_skb(skb);
5995 }
5996 
ath10k_wmi_10_1_op_rx(struct ath10k * ar,struct sk_buff * skb)5997 static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
5998 {
5999 	struct wmi_cmd_hdr *cmd_hdr;
6000 	enum wmi_10x_event_id id;
6001 	bool consumed;
6002 
6003 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6004 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
6005 
6006 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
6007 		goto out;
6008 
6009 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
6010 
6011 	consumed = ath10k_tm_event_wmi(ar, id, skb);
6012 
6013 	/* Ready event must be handled normally also in UTF mode so that we
6014 	 * know the UTF firmware has booted, others we are just bypass WMI
6015 	 * events to testmode.
6016 	 */
6017 	if (consumed && id != WMI_10X_READY_EVENTID) {
6018 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6019 			   "wmi testmode consumed 0x%x\n", id);
6020 		goto out;
6021 	}
6022 
6023 	switch (id) {
6024 	case WMI_10X_MGMT_RX_EVENTID:
6025 		ath10k_wmi_event_mgmt_rx(ar, skb);
6026 		/* mgmt_rx() owns the skb now! */
6027 		return;
6028 	case WMI_10X_SCAN_EVENTID:
6029 		ath10k_wmi_event_scan(ar, skb);
6030 		ath10k_wmi_queue_set_coverage_class_work(ar);
6031 		break;
6032 	case WMI_10X_CHAN_INFO_EVENTID:
6033 		ath10k_wmi_event_chan_info(ar, skb);
6034 		break;
6035 	case WMI_10X_ECHO_EVENTID:
6036 		ath10k_wmi_event_echo(ar, skb);
6037 		break;
6038 	case WMI_10X_DEBUG_MESG_EVENTID:
6039 		ath10k_wmi_event_debug_mesg(ar, skb);
6040 		ath10k_wmi_queue_set_coverage_class_work(ar);
6041 		break;
6042 	case WMI_10X_UPDATE_STATS_EVENTID:
6043 		ath10k_wmi_event_update_stats(ar, skb);
6044 		break;
6045 	case WMI_10X_VDEV_START_RESP_EVENTID:
6046 		ath10k_wmi_event_vdev_start_resp(ar, skb);
6047 		ath10k_wmi_queue_set_coverage_class_work(ar);
6048 		break;
6049 	case WMI_10X_VDEV_STOPPED_EVENTID:
6050 		ath10k_wmi_event_vdev_stopped(ar, skb);
6051 		ath10k_wmi_queue_set_coverage_class_work(ar);
6052 		break;
6053 	case WMI_10X_PEER_STA_KICKOUT_EVENTID:
6054 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
6055 		break;
6056 	case WMI_10X_HOST_SWBA_EVENTID:
6057 		ath10k_wmi_event_host_swba(ar, skb);
6058 		break;
6059 	case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
6060 		ath10k_wmi_event_tbttoffset_update(ar, skb);
6061 		break;
6062 	case WMI_10X_PHYERR_EVENTID:
6063 		ath10k_wmi_event_phyerr(ar, skb);
6064 		break;
6065 	case WMI_10X_ROAM_EVENTID:
6066 		ath10k_wmi_event_roam(ar, skb);
6067 		ath10k_wmi_queue_set_coverage_class_work(ar);
6068 		break;
6069 	case WMI_10X_PROFILE_MATCH:
6070 		ath10k_wmi_event_profile_match(ar, skb);
6071 		break;
6072 	case WMI_10X_DEBUG_PRINT_EVENTID:
6073 		ath10k_wmi_event_debug_print(ar, skb);
6074 		ath10k_wmi_queue_set_coverage_class_work(ar);
6075 		break;
6076 	case WMI_10X_PDEV_QVIT_EVENTID:
6077 		ath10k_wmi_event_pdev_qvit(ar, skb);
6078 		break;
6079 	case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
6080 		ath10k_wmi_event_wlan_profile_data(ar, skb);
6081 		break;
6082 	case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
6083 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
6084 		break;
6085 	case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
6086 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
6087 		break;
6088 	case WMI_10X_RTT_ERROR_REPORT_EVENTID:
6089 		ath10k_wmi_event_rtt_error_report(ar, skb);
6090 		break;
6091 	case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
6092 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
6093 		break;
6094 	case WMI_10X_DCS_INTERFERENCE_EVENTID:
6095 		ath10k_wmi_event_dcs_interference(ar, skb);
6096 		break;
6097 	case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
6098 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
6099 		break;
6100 	case WMI_10X_INST_RSSI_STATS_EVENTID:
6101 		ath10k_wmi_event_inst_rssi_stats(ar, skb);
6102 		break;
6103 	case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
6104 		ath10k_wmi_event_vdev_standby_req(ar, skb);
6105 		break;
6106 	case WMI_10X_VDEV_RESUME_REQ_EVENTID:
6107 		ath10k_wmi_event_vdev_resume_req(ar, skb);
6108 		break;
6109 	case WMI_10X_SERVICE_READY_EVENTID:
6110 		ath10k_wmi_event_service_ready(ar, skb);
6111 		return;
6112 	case WMI_10X_READY_EVENTID:
6113 		ath10k_wmi_event_ready(ar, skb);
6114 		ath10k_wmi_queue_set_coverage_class_work(ar);
6115 		break;
6116 	case WMI_10X_PDEV_UTF_EVENTID:
6117 		/* ignore utf events */
6118 		break;
6119 	default:
6120 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
6121 		break;
6122 	}
6123 
6124 out:
6125 	dev_kfree_skb(skb);
6126 }
6127 
ath10k_wmi_10_2_op_rx(struct ath10k * ar,struct sk_buff * skb)6128 static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
6129 {
6130 	struct wmi_cmd_hdr *cmd_hdr;
6131 	enum wmi_10_2_event_id id;
6132 	bool consumed;
6133 
6134 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6135 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
6136 
6137 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
6138 		goto out;
6139 
6140 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
6141 
6142 	consumed = ath10k_tm_event_wmi(ar, id, skb);
6143 
6144 	/* Ready event must be handled normally also in UTF mode so that we
6145 	 * know the UTF firmware has booted, others we are just bypass WMI
6146 	 * events to testmode.
6147 	 */
6148 	if (consumed && id != WMI_10_2_READY_EVENTID) {
6149 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6150 			   "wmi testmode consumed 0x%x\n", id);
6151 		goto out;
6152 	}
6153 
6154 	switch (id) {
6155 	case WMI_10_2_MGMT_RX_EVENTID:
6156 		ath10k_wmi_event_mgmt_rx(ar, skb);
6157 		/* mgmt_rx() owns the skb now! */
6158 		return;
6159 	case WMI_10_2_SCAN_EVENTID:
6160 		ath10k_wmi_event_scan(ar, skb);
6161 		ath10k_wmi_queue_set_coverage_class_work(ar);
6162 		break;
6163 	case WMI_10_2_CHAN_INFO_EVENTID:
6164 		ath10k_wmi_event_chan_info(ar, skb);
6165 		break;
6166 	case WMI_10_2_ECHO_EVENTID:
6167 		ath10k_wmi_event_echo(ar, skb);
6168 		break;
6169 	case WMI_10_2_DEBUG_MESG_EVENTID:
6170 		ath10k_wmi_event_debug_mesg(ar, skb);
6171 		ath10k_wmi_queue_set_coverage_class_work(ar);
6172 		break;
6173 	case WMI_10_2_UPDATE_STATS_EVENTID:
6174 		ath10k_wmi_event_update_stats(ar, skb);
6175 		break;
6176 	case WMI_10_2_VDEV_START_RESP_EVENTID:
6177 		ath10k_wmi_event_vdev_start_resp(ar, skb);
6178 		ath10k_wmi_queue_set_coverage_class_work(ar);
6179 		break;
6180 	case WMI_10_2_VDEV_STOPPED_EVENTID:
6181 		ath10k_wmi_event_vdev_stopped(ar, skb);
6182 		ath10k_wmi_queue_set_coverage_class_work(ar);
6183 		break;
6184 	case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
6185 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
6186 		break;
6187 	case WMI_10_2_HOST_SWBA_EVENTID:
6188 		ath10k_wmi_event_host_swba(ar, skb);
6189 		break;
6190 	case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID:
6191 		ath10k_wmi_event_tbttoffset_update(ar, skb);
6192 		break;
6193 	case WMI_10_2_PHYERR_EVENTID:
6194 		ath10k_wmi_event_phyerr(ar, skb);
6195 		break;
6196 	case WMI_10_2_ROAM_EVENTID:
6197 		ath10k_wmi_event_roam(ar, skb);
6198 		ath10k_wmi_queue_set_coverage_class_work(ar);
6199 		break;
6200 	case WMI_10_2_PROFILE_MATCH:
6201 		ath10k_wmi_event_profile_match(ar, skb);
6202 		break;
6203 	case WMI_10_2_DEBUG_PRINT_EVENTID:
6204 		ath10k_wmi_event_debug_print(ar, skb);
6205 		ath10k_wmi_queue_set_coverage_class_work(ar);
6206 		break;
6207 	case WMI_10_2_PDEV_QVIT_EVENTID:
6208 		ath10k_wmi_event_pdev_qvit(ar, skb);
6209 		break;
6210 	case WMI_10_2_WLAN_PROFILE_DATA_EVENTID:
6211 		ath10k_wmi_event_wlan_profile_data(ar, skb);
6212 		break;
6213 	case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID:
6214 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
6215 		break;
6216 	case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID:
6217 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
6218 		break;
6219 	case WMI_10_2_RTT_ERROR_REPORT_EVENTID:
6220 		ath10k_wmi_event_rtt_error_report(ar, skb);
6221 		break;
6222 	case WMI_10_2_WOW_WAKEUP_HOST_EVENTID:
6223 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
6224 		break;
6225 	case WMI_10_2_DCS_INTERFERENCE_EVENTID:
6226 		ath10k_wmi_event_dcs_interference(ar, skb);
6227 		break;
6228 	case WMI_10_2_PDEV_TPC_CONFIG_EVENTID:
6229 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
6230 		break;
6231 	case WMI_10_2_INST_RSSI_STATS_EVENTID:
6232 		ath10k_wmi_event_inst_rssi_stats(ar, skb);
6233 		break;
6234 	case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
6235 		ath10k_wmi_event_vdev_standby_req(ar, skb);
6236 		ath10k_wmi_queue_set_coverage_class_work(ar);
6237 		break;
6238 	case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
6239 		ath10k_wmi_event_vdev_resume_req(ar, skb);
6240 		ath10k_wmi_queue_set_coverage_class_work(ar);
6241 		break;
6242 	case WMI_10_2_SERVICE_READY_EVENTID:
6243 		ath10k_wmi_event_service_ready(ar, skb);
6244 		return;
6245 	case WMI_10_2_READY_EVENTID:
6246 		ath10k_wmi_event_ready(ar, skb);
6247 		ath10k_wmi_queue_set_coverage_class_work(ar);
6248 		break;
6249 	case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
6250 		ath10k_wmi_event_temperature(ar, skb);
6251 		break;
6252 	case WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID:
6253 		ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
6254 		break;
6255 	case WMI_10_2_RTT_KEEPALIVE_EVENTID:
6256 	case WMI_10_2_GPIO_INPUT_EVENTID:
6257 	case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
6258 	case WMI_10_2_GENERIC_BUFFER_EVENTID:
6259 	case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
6260 	case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
6261 	case WMI_10_2_WDS_PEER_EVENTID:
6262 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6263 			   "received event id %d not implemented\n", id);
6264 		break;
6265 	case WMI_10_2_PEER_STA_PS_STATECHG_EVENTID:
6266 		ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
6267 		break;
6268 	default:
6269 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
6270 		break;
6271 	}
6272 
6273 out:
6274 	dev_kfree_skb(skb);
6275 }
6276 
ath10k_wmi_10_4_op_rx(struct ath10k * ar,struct sk_buff * skb)6277 static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
6278 {
6279 	struct wmi_cmd_hdr *cmd_hdr;
6280 	enum wmi_10_4_event_id id;
6281 	bool consumed;
6282 
6283 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6284 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
6285 
6286 	if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
6287 		goto out;
6288 
6289 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
6290 
6291 	consumed = ath10k_tm_event_wmi(ar, id, skb);
6292 
6293 	/* Ready event must be handled normally also in UTF mode so that we
6294 	 * know the UTF firmware has booted, others we are just bypass WMI
6295 	 * events to testmode.
6296 	 */
6297 	if (consumed && id != WMI_10_4_READY_EVENTID) {
6298 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6299 			   "wmi testmode consumed 0x%x\n", id);
6300 		goto out;
6301 	}
6302 
6303 	switch (id) {
6304 	case WMI_10_4_MGMT_RX_EVENTID:
6305 		ath10k_wmi_event_mgmt_rx(ar, skb);
6306 		/* mgmt_rx() owns the skb now! */
6307 		return;
6308 	case WMI_10_4_ECHO_EVENTID:
6309 		ath10k_wmi_event_echo(ar, skb);
6310 		break;
6311 	case WMI_10_4_DEBUG_MESG_EVENTID:
6312 		ath10k_wmi_event_debug_mesg(ar, skb);
6313 		ath10k_wmi_queue_set_coverage_class_work(ar);
6314 		break;
6315 	case WMI_10_4_SERVICE_READY_EVENTID:
6316 		ath10k_wmi_event_service_ready(ar, skb);
6317 		return;
6318 	case WMI_10_4_SCAN_EVENTID:
6319 		ath10k_wmi_event_scan(ar, skb);
6320 		ath10k_wmi_queue_set_coverage_class_work(ar);
6321 		break;
6322 	case WMI_10_4_CHAN_INFO_EVENTID:
6323 		ath10k_wmi_event_chan_info(ar, skb);
6324 		break;
6325 	case WMI_10_4_PHYERR_EVENTID:
6326 		ath10k_wmi_event_phyerr(ar, skb);
6327 		break;
6328 	case WMI_10_4_READY_EVENTID:
6329 		ath10k_wmi_event_ready(ar, skb);
6330 		ath10k_wmi_queue_set_coverage_class_work(ar);
6331 		break;
6332 	case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
6333 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
6334 		break;
6335 	case WMI_10_4_ROAM_EVENTID:
6336 		ath10k_wmi_event_roam(ar, skb);
6337 		ath10k_wmi_queue_set_coverage_class_work(ar);
6338 		break;
6339 	case WMI_10_4_HOST_SWBA_EVENTID:
6340 		ath10k_wmi_event_host_swba(ar, skb);
6341 		break;
6342 	case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID:
6343 		ath10k_wmi_event_tbttoffset_update(ar, skb);
6344 		break;
6345 	case WMI_10_4_DEBUG_PRINT_EVENTID:
6346 		ath10k_wmi_event_debug_print(ar, skb);
6347 		ath10k_wmi_queue_set_coverage_class_work(ar);
6348 		break;
6349 	case WMI_10_4_VDEV_START_RESP_EVENTID:
6350 		ath10k_wmi_event_vdev_start_resp(ar, skb);
6351 		ath10k_wmi_queue_set_coverage_class_work(ar);
6352 		break;
6353 	case WMI_10_4_VDEV_STOPPED_EVENTID:
6354 		ath10k_wmi_event_vdev_stopped(ar, skb);
6355 		ath10k_wmi_queue_set_coverage_class_work(ar);
6356 		break;
6357 	case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
6358 	case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
6359 	case WMI_10_4_WDS_PEER_EVENTID:
6360 	case WMI_10_4_DEBUG_FATAL_CONDITION_EVENTID:
6361 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6362 			   "received event id %d not implemented\n", id);
6363 		break;
6364 	case WMI_10_4_UPDATE_STATS_EVENTID:
6365 		ath10k_wmi_event_update_stats(ar, skb);
6366 		break;
6367 	case WMI_10_4_PDEV_TEMPERATURE_EVENTID:
6368 		ath10k_wmi_event_temperature(ar, skb);
6369 		break;
6370 	case WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID:
6371 		ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
6372 		break;
6373 	case WMI_10_4_PDEV_TPC_CONFIG_EVENTID:
6374 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
6375 		break;
6376 	case WMI_10_4_TDLS_PEER_EVENTID:
6377 		ath10k_wmi_handle_tdls_peer_event(ar, skb);
6378 		break;
6379 	case WMI_10_4_PDEV_TPC_TABLE_EVENTID:
6380 		ath10k_wmi_event_tpc_final_table(ar, skb);
6381 		break;
6382 	case WMI_10_4_DFS_STATUS_CHECK_EVENTID:
6383 		ath10k_wmi_event_dfs_status_check(ar, skb);
6384 		break;
6385 	case WMI_10_4_PEER_STA_PS_STATECHG_EVENTID:
6386 		ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
6387 		break;
6388 	default:
6389 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
6390 		break;
6391 	}
6392 
6393 out:
6394 	dev_kfree_skb(skb);
6395 }
6396 
ath10k_wmi_process_rx(struct ath10k * ar,struct sk_buff * skb)6397 static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
6398 {
6399 	int ret;
6400 
6401 	ret = ath10k_wmi_rx(ar, skb);
6402 	if (ret)
6403 		ath10k_warn(ar, "failed to process wmi rx: %d\n", ret);
6404 }
6405 
ath10k_wmi_connect(struct ath10k * ar)6406 int ath10k_wmi_connect(struct ath10k *ar)
6407 {
6408 	int status;
6409 	struct ath10k_htc_svc_conn_req conn_req;
6410 	struct ath10k_htc_svc_conn_resp conn_resp;
6411 
6412 	memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
6413 
6414 	memset(&conn_req, 0, sizeof(conn_req));
6415 	memset(&conn_resp, 0, sizeof(conn_resp));
6416 
6417 	/* these fields are the same for all service endpoints */
6418 	conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
6419 	conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
6420 	conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
6421 
6422 	/* connect to control service */
6423 	conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
6424 
6425 	status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
6426 	if (status) {
6427 		ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
6428 			    status);
6429 		return status;
6430 	}
6431 
6432 	ar->wmi.eid = conn_resp.eid;
6433 	return 0;
6434 }
6435 
6436 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_base_macaddr(struct ath10k * ar,const u8 macaddr[ETH_ALEN])6437 ath10k_wmi_op_gen_pdev_set_base_macaddr(struct ath10k *ar,
6438 					const u8 macaddr[ETH_ALEN])
6439 {
6440 	struct wmi_pdev_set_base_macaddr_cmd *cmd;
6441 	struct sk_buff *skb;
6442 
6443 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6444 	if (!skb)
6445 		return ERR_PTR(-ENOMEM);
6446 
6447 	cmd = (struct wmi_pdev_set_base_macaddr_cmd *)skb->data;
6448 	ether_addr_copy(cmd->mac_addr.addr, macaddr);
6449 
6450 	ath10k_dbg(ar, ATH10K_DBG_WMI,
6451 		   "wmi pdev basemac %pM\n", macaddr);
6452 	return skb;
6453 }
6454 
6455 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_rd(struct ath10k * ar,u16 rd,u16 rd2g,u16 rd5g,u16 ctl2g,u16 ctl5g,enum wmi_dfs_region dfs_reg)6456 ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
6457 			      u16 ctl2g, u16 ctl5g,
6458 			      enum wmi_dfs_region dfs_reg)
6459 {
6460 	struct wmi_pdev_set_regdomain_cmd *cmd;
6461 	struct sk_buff *skb;
6462 
6463 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6464 	if (!skb)
6465 		return ERR_PTR(-ENOMEM);
6466 
6467 	cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
6468 	cmd->reg_domain = __cpu_to_le32(rd);
6469 	cmd->reg_domain_2G = __cpu_to_le32(rd2g);
6470 	cmd->reg_domain_5G = __cpu_to_le32(rd5g);
6471 	cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
6472 	cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
6473 
6474 	ath10k_dbg(ar, ATH10K_DBG_WMI,
6475 		   "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
6476 		   rd, rd2g, rd5g, ctl2g, ctl5g);
6477 	return skb;
6478 }
6479 
6480 static struct sk_buff *
ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k * ar,u16 rd,u16 rd2g,u16 rd5g,u16 ctl2g,u16 ctl5g,enum wmi_dfs_region dfs_reg)6481 ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16
6482 				  rd5g, u16 ctl2g, u16 ctl5g,
6483 				  enum wmi_dfs_region dfs_reg)
6484 {
6485 	struct wmi_pdev_set_regdomain_cmd_10x *cmd;
6486 	struct sk_buff *skb;
6487 
6488 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6489 	if (!skb)
6490 		return ERR_PTR(-ENOMEM);
6491 
6492 	cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
6493 	cmd->reg_domain = __cpu_to_le32(rd);
6494 	cmd->reg_domain_2G = __cpu_to_le32(rd2g);
6495 	cmd->reg_domain_5G = __cpu_to_le32(rd5g);
6496 	cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
6497 	cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
6498 	cmd->dfs_domain = __cpu_to_le32(dfs_reg);
6499 
6500 	ath10k_dbg(ar, ATH10K_DBG_WMI,
6501 		   "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
6502 		   rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
6503 	return skb;
6504 }
6505 
6506 static struct sk_buff *
ath10k_wmi_op_gen_pdev_suspend(struct ath10k * ar,u32 suspend_opt)6507 ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt)
6508 {
6509 	struct wmi_pdev_suspend_cmd *cmd;
6510 	struct sk_buff *skb;
6511 
6512 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6513 	if (!skb)
6514 		return ERR_PTR(-ENOMEM);
6515 
6516 	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
6517 	cmd->suspend_opt = __cpu_to_le32(suspend_opt);
6518 
6519 	return skb;
6520 }
6521 
6522 static struct sk_buff *
ath10k_wmi_op_gen_pdev_resume(struct ath10k * ar)6523 ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar)
6524 {
6525 	struct sk_buff *skb;
6526 
6527 	skb = ath10k_wmi_alloc_skb(ar, 0);
6528 	if (!skb)
6529 		return ERR_PTR(-ENOMEM);
6530 
6531 	return skb;
6532 }
6533 
6534 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_param(struct ath10k * ar,u32 id,u32 value)6535 ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
6536 {
6537 	struct wmi_pdev_set_param_cmd *cmd;
6538 	struct sk_buff *skb;
6539 
6540 	if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
6541 		ath10k_warn(ar, "pdev param %d not supported by firmware\n",
6542 			    id);
6543 		return ERR_PTR(-EOPNOTSUPP);
6544 	}
6545 
6546 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
6547 	if (!skb)
6548 		return ERR_PTR(-ENOMEM);
6549 
6550 	cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
6551 	cmd->param_id    = __cpu_to_le32(id);
6552 	cmd->param_value = __cpu_to_le32(value);
6553 
6554 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
6555 		   id, value);
6556 	return skb;
6557 }
6558 
ath10k_wmi_put_host_mem_chunks(struct ath10k * ar,struct wmi_host_mem_chunks * chunks)6559 void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
6560 				    struct wmi_host_mem_chunks *chunks)
6561 {
6562 	struct host_memory_chunk *chunk;
6563 	int i;
6564 
6565 	chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
6566 
6567 	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
6568 		chunk = &chunks->items[i];
6569 		chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
6570 		chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
6571 		chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
6572 
6573 		ath10k_dbg(ar, ATH10K_DBG_WMI,
6574 			   "wmi chunk %d len %d requested, addr 0x%llx\n",
6575 			   i,
6576 			   ar->wmi.mem_chunks[i].len,
6577 			   (unsigned long long)ar->wmi.mem_chunks[i].paddr);
6578 	}
6579 }
6580 
ath10k_wmi_op_gen_init(struct ath10k * ar)6581 static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
6582 {
6583 	struct wmi_init_cmd *cmd;
6584 	struct sk_buff *buf;
6585 	struct wmi_resource_config config = {};
6586 	u32 val;
6587 
6588 	config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
6589 	config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
6590 	config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
6591 
6592 	config.num_offload_reorder_bufs =
6593 		__cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
6594 
6595 	config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
6596 	config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
6597 	config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
6598 	config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
6599 	config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
6600 	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6601 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6602 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
6603 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
6604 	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6605 	config.scan_max_pending_reqs =
6606 		__cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
6607 
6608 	config.bmiss_offload_max_vdev =
6609 		__cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
6610 
6611 	config.roam_offload_max_vdev =
6612 		__cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
6613 
6614 	config.roam_offload_max_ap_profiles =
6615 		__cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
6616 
6617 	config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
6618 	config.num_mcast_table_elems =
6619 		__cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
6620 
6621 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
6622 	config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
6623 	config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
6624 	config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
6625 	config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
6626 
6627 	val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6628 	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6629 
6630 	config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
6631 
6632 	config.gtk_offload_max_vdev =
6633 		__cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
6634 
6635 	config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
6636 	config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
6637 
6638 	buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6639 						   ar->wmi.num_mem_chunks));
6640 	if (!buf)
6641 		return ERR_PTR(-ENOMEM);
6642 
6643 	cmd = (struct wmi_init_cmd *)buf->data;
6644 
6645 	memcpy(&cmd->resource_config, &config, sizeof(config));
6646 	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6647 
6648 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
6649 	return buf;
6650 }
6651 
ath10k_wmi_10_1_op_gen_init(struct ath10k * ar)6652 static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
6653 {
6654 	struct wmi_init_cmd_10x *cmd;
6655 	struct sk_buff *buf;
6656 	struct wmi_resource_config_10x config = {};
6657 	u32 val;
6658 
6659 	config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
6660 	config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
6661 	config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
6662 	config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
6663 	config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
6664 	config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
6665 	config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
6666 	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6667 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6668 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6669 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
6670 	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6671 	config.scan_max_pending_reqs =
6672 		__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
6673 
6674 	config.bmiss_offload_max_vdev =
6675 		__cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
6676 
6677 	config.roam_offload_max_vdev =
6678 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
6679 
6680 	config.roam_offload_max_ap_profiles =
6681 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
6682 
6683 	config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
6684 	config.num_mcast_table_elems =
6685 		__cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
6686 
6687 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
6688 	config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
6689 	config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
6690 	config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
6691 	config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
6692 
6693 	val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6694 	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6695 
6696 	config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
6697 
6698 	config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
6699 	config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
6700 
6701 	buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6702 						   ar->wmi.num_mem_chunks));
6703 	if (!buf)
6704 		return ERR_PTR(-ENOMEM);
6705 
6706 	cmd = (struct wmi_init_cmd_10x *)buf->data;
6707 
6708 	memcpy(&cmd->resource_config, &config, sizeof(config));
6709 	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6710 
6711 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
6712 	return buf;
6713 }
6714 
ath10k_wmi_10_2_op_gen_init(struct ath10k * ar)6715 static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
6716 {
6717 	struct wmi_init_cmd_10_2 *cmd;
6718 	struct sk_buff *buf;
6719 	struct wmi_resource_config_10x config = {};
6720 	u32 val, features;
6721 
6722 	config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
6723 	config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
6724 
6725 	if (ath10k_peer_stats_enabled(ar)) {
6726 		config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS);
6727 		config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS);
6728 	} else {
6729 		config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
6730 		config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
6731 	}
6732 
6733 	config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
6734 	config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
6735 	config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
6736 	config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6737 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6738 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
6739 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
6740 	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
6741 
6742 	config.scan_max_pending_reqs =
6743 		__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
6744 
6745 	config.bmiss_offload_max_vdev =
6746 		__cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
6747 
6748 	config.roam_offload_max_vdev =
6749 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
6750 
6751 	config.roam_offload_max_ap_profiles =
6752 		__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
6753 
6754 	config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
6755 	config.num_mcast_table_elems =
6756 		__cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
6757 
6758 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
6759 	config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
6760 	config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
6761 	config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE);
6762 	config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
6763 
6764 	val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
6765 	config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
6766 
6767 	config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
6768 
6769 	config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
6770 	config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
6771 
6772 	buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6773 						   ar->wmi.num_mem_chunks));
6774 	if (!buf)
6775 		return ERR_PTR(-ENOMEM);
6776 
6777 	cmd = (struct wmi_init_cmd_10_2 *)buf->data;
6778 
6779 	features = WMI_10_2_RX_BATCH_MODE;
6780 
6781 	if (test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) &&
6782 	    test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
6783 		features |= WMI_10_2_COEX_GPIO;
6784 
6785 	if (ath10k_peer_stats_enabled(ar))
6786 		features |= WMI_10_2_PEER_STATS;
6787 
6788 	if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
6789 		features |= WMI_10_2_BSS_CHAN_INFO;
6790 
6791 	cmd->resource_config.feature_mask = __cpu_to_le32(features);
6792 
6793 	memcpy(&cmd->resource_config.common, &config, sizeof(config));
6794 	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6795 
6796 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
6797 	return buf;
6798 }
6799 
ath10k_wmi_10_4_op_gen_init(struct ath10k * ar)6800 static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
6801 {
6802 	struct wmi_init_cmd_10_4 *cmd;
6803 	struct sk_buff *buf;
6804 	struct wmi_resource_config_10_4 config = {};
6805 
6806 	config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs);
6807 	config.num_peers = __cpu_to_le32(ar->max_num_peers);
6808 	config.num_active_peers = __cpu_to_le32(ar->num_active_peers);
6809 	config.num_tids = __cpu_to_le32(ar->num_tids);
6810 
6811 	config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS);
6812 	config.num_offload_reorder_buffs =
6813 			__cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS);
6814 	config.num_peer_keys  = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS);
6815 	config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT);
6816 	config.tx_chain_mask  = __cpu_to_le32(ar->hw_params.tx_chain_mask);
6817 	config.rx_chain_mask  = __cpu_to_le32(ar->hw_params.rx_chain_mask);
6818 
6819 	config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6820 	config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6821 	config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
6822 	config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI);
6823 
6824 	config.rx_decap_mode	    = __cpu_to_le32(ar->wmi.rx_decap_mode);
6825 	config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS);
6826 	config.bmiss_offload_max_vdev =
6827 			__cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV);
6828 	config.roam_offload_max_vdev  =
6829 			__cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV);
6830 	config.roam_offload_max_ap_profiles =
6831 			__cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES);
6832 	config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS);
6833 	config.num_mcast_table_elems =
6834 			__cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS);
6835 
6836 	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE);
6837 	config.tx_dbg_log_size  = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE);
6838 	config.num_wds_entries  = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES);
6839 	config.dma_burst_size   = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE);
6840 	config.mac_aggr_delim   = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM);
6841 
6842 	config.rx_skip_defrag_timeout_dup_detection_check =
6843 	  __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK);
6844 
6845 	config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG);
6846 	config.gtk_offload_max_vdev =
6847 			__cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV);
6848 	config.num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
6849 	config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS);
6850 	config.max_peer_ext_stats =
6851 			__cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS);
6852 	config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP);
6853 
6854 	config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE);
6855 	config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE);
6856 	config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE);
6857 	config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE);
6858 
6859 	config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE);
6860 	config.tt_support =
6861 			__cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG);
6862 	config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG);
6863 	config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG);
6864 	config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG);
6865 
6866 	buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
6867 						   ar->wmi.num_mem_chunks));
6868 	if (!buf)
6869 		return ERR_PTR(-ENOMEM);
6870 
6871 	cmd = (struct wmi_init_cmd_10_4 *)buf->data;
6872 	memcpy(&cmd->resource_config, &config, sizeof(config));
6873 	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
6874 
6875 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n");
6876 	return buf;
6877 }
6878 
ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg * arg)6879 int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
6880 {
6881 	if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
6882 		return -EINVAL;
6883 	if (arg->n_channels > ARRAY_SIZE(arg->channels))
6884 		return -EINVAL;
6885 	if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
6886 		return -EINVAL;
6887 	if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
6888 		return -EINVAL;
6889 
6890 	return 0;
6891 }
6892 
6893 static size_t
ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg * arg)6894 ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
6895 {
6896 	int len = 0;
6897 
6898 	if (arg->ie_len) {
6899 		len += sizeof(struct wmi_ie_data);
6900 		len += roundup(arg->ie_len, 4);
6901 	}
6902 
6903 	if (arg->n_channels) {
6904 		len += sizeof(struct wmi_chan_list);
6905 		len += sizeof(__le32) * arg->n_channels;
6906 	}
6907 
6908 	if (arg->n_ssids) {
6909 		len += sizeof(struct wmi_ssid_list);
6910 		len += sizeof(struct wmi_ssid) * arg->n_ssids;
6911 	}
6912 
6913 	if (arg->n_bssids) {
6914 		len += sizeof(struct wmi_bssid_list);
6915 		len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
6916 	}
6917 
6918 	return len;
6919 }
6920 
ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common * cmn,const struct wmi_start_scan_arg * arg)6921 void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
6922 				      const struct wmi_start_scan_arg *arg)
6923 {
6924 	u32 scan_id;
6925 	u32 scan_req_id;
6926 
6927 	scan_id  = WMI_HOST_SCAN_REQ_ID_PREFIX;
6928 	scan_id |= arg->scan_id;
6929 
6930 	scan_req_id  = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
6931 	scan_req_id |= arg->scan_req_id;
6932 
6933 	cmn->scan_id            = __cpu_to_le32(scan_id);
6934 	cmn->scan_req_id        = __cpu_to_le32(scan_req_id);
6935 	cmn->vdev_id            = __cpu_to_le32(arg->vdev_id);
6936 	cmn->scan_priority      = __cpu_to_le32(arg->scan_priority);
6937 	cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
6938 	cmn->dwell_time_active  = __cpu_to_le32(arg->dwell_time_active);
6939 	cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
6940 	cmn->min_rest_time      = __cpu_to_le32(arg->min_rest_time);
6941 	cmn->max_rest_time      = __cpu_to_le32(arg->max_rest_time);
6942 	cmn->repeat_probe_time  = __cpu_to_le32(arg->repeat_probe_time);
6943 	cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
6944 	cmn->idle_time          = __cpu_to_le32(arg->idle_time);
6945 	cmn->max_scan_time      = __cpu_to_le32(arg->max_scan_time);
6946 	cmn->probe_delay        = __cpu_to_le32(arg->probe_delay);
6947 	cmn->scan_ctrl_flags    = __cpu_to_le32(arg->scan_ctrl_flags);
6948 }
6949 
6950 static void
ath10k_wmi_put_start_scan_tlvs(u8 * tlvs,const struct wmi_start_scan_arg * arg)6951 ath10k_wmi_put_start_scan_tlvs(u8 *tlvs,
6952 			       const struct wmi_start_scan_arg *arg)
6953 {
6954 	struct wmi_ie_data *ie;
6955 	struct wmi_chan_list *channels;
6956 	struct wmi_ssid_list *ssids;
6957 	struct wmi_bssid_list *bssids;
6958 	void *ptr = tlvs;
6959 	int i;
6960 
6961 	if (arg->n_channels) {
6962 		channels = ptr;
6963 		channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
6964 		channels->num_chan = __cpu_to_le32(arg->n_channels);
6965 
6966 		for (i = 0; i < arg->n_channels; i++)
6967 			channels->channel_list[i].freq =
6968 				__cpu_to_le16(arg->channels[i]);
6969 
6970 		ptr += sizeof(*channels);
6971 		ptr += sizeof(__le32) * arg->n_channels;
6972 	}
6973 
6974 	if (arg->n_ssids) {
6975 		ssids = ptr;
6976 		ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
6977 		ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
6978 
6979 		for (i = 0; i < arg->n_ssids; i++) {
6980 			ssids->ssids[i].ssid_len =
6981 				__cpu_to_le32(arg->ssids[i].len);
6982 			memcpy(&ssids->ssids[i].ssid,
6983 			       arg->ssids[i].ssid,
6984 			       arg->ssids[i].len);
6985 		}
6986 
6987 		ptr += sizeof(*ssids);
6988 		ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
6989 	}
6990 
6991 	if (arg->n_bssids) {
6992 		bssids = ptr;
6993 		bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
6994 		bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
6995 
6996 		for (i = 0; i < arg->n_bssids; i++)
6997 			ether_addr_copy(bssids->bssid_list[i].addr,
6998 					arg->bssids[i].bssid);
6999 
7000 		ptr += sizeof(*bssids);
7001 		ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
7002 	}
7003 
7004 	if (arg->ie_len) {
7005 		ie = ptr;
7006 		ie->tag = __cpu_to_le32(WMI_IE_TAG);
7007 		ie->ie_len = __cpu_to_le32(arg->ie_len);
7008 		memcpy(ie->ie_data, arg->ie, arg->ie_len);
7009 
7010 		ptr += sizeof(*ie);
7011 		ptr += roundup(arg->ie_len, 4);
7012 	}
7013 }
7014 
7015 static struct sk_buff *
ath10k_wmi_op_gen_start_scan(struct ath10k * ar,const struct wmi_start_scan_arg * arg)7016 ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
7017 			     const struct wmi_start_scan_arg *arg)
7018 {
7019 	struct wmi_start_scan_cmd *cmd;
7020 	struct sk_buff *skb;
7021 	size_t len;
7022 	int ret;
7023 
7024 	ret = ath10k_wmi_start_scan_verify(arg);
7025 	if (ret)
7026 		return ERR_PTR(ret);
7027 
7028 	len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
7029 	skb = ath10k_wmi_alloc_skb(ar, len);
7030 	if (!skb)
7031 		return ERR_PTR(-ENOMEM);
7032 
7033 	cmd = (struct wmi_start_scan_cmd *)skb->data;
7034 
7035 	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
7036 	ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg);
7037 
7038 	cmd->burst_duration_ms = __cpu_to_le32(0);
7039 
7040 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
7041 	return skb;
7042 }
7043 
7044 static struct sk_buff *
ath10k_wmi_10x_op_gen_start_scan(struct ath10k * ar,const struct wmi_start_scan_arg * arg)7045 ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
7046 				 const struct wmi_start_scan_arg *arg)
7047 {
7048 	struct wmi_10x_start_scan_cmd *cmd;
7049 	struct sk_buff *skb;
7050 	size_t len;
7051 	int ret;
7052 
7053 	ret = ath10k_wmi_start_scan_verify(arg);
7054 	if (ret)
7055 		return ERR_PTR(ret);
7056 
7057 	len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
7058 	skb = ath10k_wmi_alloc_skb(ar, len);
7059 	if (!skb)
7060 		return ERR_PTR(-ENOMEM);
7061 
7062 	cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
7063 
7064 	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
7065 	ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg);
7066 
7067 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
7068 	return skb;
7069 }
7070 
ath10k_wmi_start_scan_init(struct ath10k * ar,struct wmi_start_scan_arg * arg)7071 void ath10k_wmi_start_scan_init(struct ath10k *ar,
7072 				struct wmi_start_scan_arg *arg)
7073 {
7074 	/* setup commonly used values */
7075 	arg->scan_req_id = 1;
7076 	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
7077 	arg->dwell_time_active = 50;
7078 	arg->dwell_time_passive = 150;
7079 	arg->min_rest_time = 50;
7080 	arg->max_rest_time = 500;
7081 	arg->repeat_probe_time = 0;
7082 	arg->probe_spacing_time = 0;
7083 	arg->idle_time = 0;
7084 	arg->max_scan_time = 20000;
7085 	arg->probe_delay = 5;
7086 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
7087 		| WMI_SCAN_EVENT_COMPLETED
7088 		| WMI_SCAN_EVENT_BSS_CHANNEL
7089 		| WMI_SCAN_EVENT_FOREIGN_CHANNEL
7090 		| WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT
7091 		| WMI_SCAN_EVENT_DEQUEUED;
7092 	arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
7093 	arg->n_bssids = 1;
7094 	arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
7095 }
7096 
7097 static struct sk_buff *
ath10k_wmi_op_gen_stop_scan(struct ath10k * ar,const struct wmi_stop_scan_arg * arg)7098 ath10k_wmi_op_gen_stop_scan(struct ath10k *ar,
7099 			    const struct wmi_stop_scan_arg *arg)
7100 {
7101 	struct wmi_stop_scan_cmd *cmd;
7102 	struct sk_buff *skb;
7103 	u32 scan_id;
7104 	u32 req_id;
7105 
7106 	if (arg->req_id > 0xFFF)
7107 		return ERR_PTR(-EINVAL);
7108 	if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
7109 		return ERR_PTR(-EINVAL);
7110 
7111 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7112 	if (!skb)
7113 		return ERR_PTR(-ENOMEM);
7114 
7115 	scan_id = arg->u.scan_id;
7116 	scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
7117 
7118 	req_id = arg->req_id;
7119 	req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
7120 
7121 	cmd = (struct wmi_stop_scan_cmd *)skb->data;
7122 	cmd->req_type    = __cpu_to_le32(arg->req_type);
7123 	cmd->vdev_id     = __cpu_to_le32(arg->u.vdev_id);
7124 	cmd->scan_id     = __cpu_to_le32(scan_id);
7125 	cmd->scan_req_id = __cpu_to_le32(req_id);
7126 
7127 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7128 		   "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
7129 		   arg->req_id, arg->req_type, arg->u.scan_id);
7130 	return skb;
7131 }
7132 
7133 static struct sk_buff *
ath10k_wmi_op_gen_vdev_create(struct ath10k * ar,u32 vdev_id,enum wmi_vdev_type type,enum wmi_vdev_subtype subtype,const u8 macaddr[ETH_ALEN])7134 ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id,
7135 			      enum wmi_vdev_type type,
7136 			      enum wmi_vdev_subtype subtype,
7137 			      const u8 macaddr[ETH_ALEN])
7138 {
7139 	struct wmi_vdev_create_cmd *cmd;
7140 	struct sk_buff *skb;
7141 
7142 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7143 	if (!skb)
7144 		return ERR_PTR(-ENOMEM);
7145 
7146 	cmd = (struct wmi_vdev_create_cmd *)skb->data;
7147 	cmd->vdev_id      = __cpu_to_le32(vdev_id);
7148 	cmd->vdev_type    = __cpu_to_le32(type);
7149 	cmd->vdev_subtype = __cpu_to_le32(subtype);
7150 	ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
7151 
7152 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7153 		   "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
7154 		   vdev_id, type, subtype, macaddr);
7155 	return skb;
7156 }
7157 
7158 static struct sk_buff *
ath10k_wmi_op_gen_vdev_delete(struct ath10k * ar,u32 vdev_id)7159 ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
7160 {
7161 	struct wmi_vdev_delete_cmd *cmd;
7162 	struct sk_buff *skb;
7163 
7164 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7165 	if (!skb)
7166 		return ERR_PTR(-ENOMEM);
7167 
7168 	cmd = (struct wmi_vdev_delete_cmd *)skb->data;
7169 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7170 
7171 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7172 		   "WMI vdev delete id %d\n", vdev_id);
7173 	return skb;
7174 }
7175 
7176 static struct sk_buff *
ath10k_wmi_op_gen_vdev_start(struct ath10k * ar,const struct wmi_vdev_start_request_arg * arg,bool restart)7177 ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
7178 			     const struct wmi_vdev_start_request_arg *arg,
7179 			     bool restart)
7180 {
7181 	struct wmi_vdev_start_request_cmd *cmd;
7182 	struct sk_buff *skb;
7183 	const char *cmdname;
7184 	u32 flags = 0;
7185 
7186 	if (WARN_ON(arg->hidden_ssid && !arg->ssid))
7187 		return ERR_PTR(-EINVAL);
7188 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
7189 		return ERR_PTR(-EINVAL);
7190 
7191 	if (restart)
7192 		cmdname = "restart";
7193 	else
7194 		cmdname = "start";
7195 
7196 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7197 	if (!skb)
7198 		return ERR_PTR(-ENOMEM);
7199 
7200 	if (arg->hidden_ssid)
7201 		flags |= WMI_VDEV_START_HIDDEN_SSID;
7202 	if (arg->pmf_enabled)
7203 		flags |= WMI_VDEV_START_PMF_ENABLED;
7204 
7205 	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
7206 	cmd->vdev_id         = __cpu_to_le32(arg->vdev_id);
7207 	cmd->disable_hw_ack  = __cpu_to_le32(arg->disable_hw_ack);
7208 	cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
7209 	cmd->dtim_period     = __cpu_to_le32(arg->dtim_period);
7210 	cmd->flags           = __cpu_to_le32(flags);
7211 	cmd->bcn_tx_rate     = __cpu_to_le32(arg->bcn_tx_rate);
7212 	cmd->bcn_tx_power    = __cpu_to_le32(arg->bcn_tx_power);
7213 
7214 	if (arg->ssid) {
7215 		cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
7216 		memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
7217 	}
7218 
7219 	ath10k_wmi_put_wmi_channel(ar, &cmd->chan, &arg->channel);
7220 
7221 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7222 		   "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
7223 		   cmdname, arg->vdev_id,
7224 		   flags, arg->channel.freq, arg->channel.mode,
7225 		   cmd->chan.flags, arg->channel.max_power);
7226 
7227 	return skb;
7228 }
7229 
7230 static struct sk_buff *
ath10k_wmi_op_gen_vdev_stop(struct ath10k * ar,u32 vdev_id)7231 ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
7232 {
7233 	struct wmi_vdev_stop_cmd *cmd;
7234 	struct sk_buff *skb;
7235 
7236 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7237 	if (!skb)
7238 		return ERR_PTR(-ENOMEM);
7239 
7240 	cmd = (struct wmi_vdev_stop_cmd *)skb->data;
7241 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7242 
7243 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
7244 	return skb;
7245 }
7246 
7247 static struct sk_buff *
ath10k_wmi_op_gen_vdev_up(struct ath10k * ar,u32 vdev_id,u32 aid,const u8 * bssid)7248 ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
7249 			  const u8 *bssid)
7250 {
7251 	struct wmi_vdev_up_cmd *cmd;
7252 	struct sk_buff *skb;
7253 
7254 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7255 	if (!skb)
7256 		return ERR_PTR(-ENOMEM);
7257 
7258 	cmd = (struct wmi_vdev_up_cmd *)skb->data;
7259 	cmd->vdev_id       = __cpu_to_le32(vdev_id);
7260 	cmd->vdev_assoc_id = __cpu_to_le32(aid);
7261 	ether_addr_copy(cmd->vdev_bssid.addr, bssid);
7262 
7263 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7264 		   "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
7265 		   vdev_id, aid, bssid);
7266 	return skb;
7267 }
7268 
7269 static struct sk_buff *
ath10k_wmi_op_gen_vdev_down(struct ath10k * ar,u32 vdev_id)7270 ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
7271 {
7272 	struct wmi_vdev_down_cmd *cmd;
7273 	struct sk_buff *skb;
7274 
7275 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7276 	if (!skb)
7277 		return ERR_PTR(-ENOMEM);
7278 
7279 	cmd = (struct wmi_vdev_down_cmd *)skb->data;
7280 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7281 
7282 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7283 		   "wmi mgmt vdev down id 0x%x\n", vdev_id);
7284 	return skb;
7285 }
7286 
7287 static struct sk_buff *
ath10k_wmi_op_gen_vdev_set_param(struct ath10k * ar,u32 vdev_id,u32 param_id,u32 param_value)7288 ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
7289 				 u32 param_id, u32 param_value)
7290 {
7291 	struct wmi_vdev_set_param_cmd *cmd;
7292 	struct sk_buff *skb;
7293 
7294 	if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
7295 		ath10k_dbg(ar, ATH10K_DBG_WMI,
7296 			   "vdev param %d not supported by firmware\n",
7297 			    param_id);
7298 		return ERR_PTR(-EOPNOTSUPP);
7299 	}
7300 
7301 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7302 	if (!skb)
7303 		return ERR_PTR(-ENOMEM);
7304 
7305 	cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
7306 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
7307 	cmd->param_id    = __cpu_to_le32(param_id);
7308 	cmd->param_value = __cpu_to_le32(param_value);
7309 
7310 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7311 		   "wmi vdev id 0x%x set param %d value %d\n",
7312 		   vdev_id, param_id, param_value);
7313 	return skb;
7314 }
7315 
7316 static struct sk_buff *
ath10k_wmi_op_gen_vdev_install_key(struct ath10k * ar,const struct wmi_vdev_install_key_arg * arg)7317 ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar,
7318 				   const struct wmi_vdev_install_key_arg *arg)
7319 {
7320 	struct wmi_vdev_install_key_cmd *cmd;
7321 	struct sk_buff *skb;
7322 
7323 	if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
7324 		return ERR_PTR(-EINVAL);
7325 	if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
7326 		return ERR_PTR(-EINVAL);
7327 
7328 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
7329 	if (!skb)
7330 		return ERR_PTR(-ENOMEM);
7331 
7332 	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
7333 	cmd->vdev_id       = __cpu_to_le32(arg->vdev_id);
7334 	cmd->key_idx       = __cpu_to_le32(arg->key_idx);
7335 	cmd->key_flags     = __cpu_to_le32(arg->key_flags);
7336 	cmd->key_cipher    = __cpu_to_le32(arg->key_cipher);
7337 	cmd->key_len       = __cpu_to_le32(arg->key_len);
7338 	cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
7339 	cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
7340 
7341 	if (arg->macaddr)
7342 		ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
7343 	if (arg->key_data)
7344 		memcpy(cmd->key_data, arg->key_data, arg->key_len);
7345 
7346 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7347 		   "wmi vdev install key idx %d cipher %d len %d\n",
7348 		   arg->key_idx, arg->key_cipher, arg->key_len);
7349 	return skb;
7350 }
7351 
7352 static struct sk_buff *
ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k * ar,const struct wmi_vdev_spectral_conf_arg * arg)7353 ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar,
7354 				     const struct wmi_vdev_spectral_conf_arg *arg)
7355 {
7356 	struct wmi_vdev_spectral_conf_cmd *cmd;
7357 	struct sk_buff *skb;
7358 
7359 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7360 	if (!skb)
7361 		return ERR_PTR(-ENOMEM);
7362 
7363 	cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
7364 	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
7365 	cmd->scan_count = __cpu_to_le32(arg->scan_count);
7366 	cmd->scan_period = __cpu_to_le32(arg->scan_period);
7367 	cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
7368 	cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
7369 	cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
7370 	cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
7371 	cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
7372 	cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
7373 	cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
7374 	cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
7375 	cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
7376 	cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
7377 	cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
7378 	cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
7379 	cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
7380 	cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
7381 	cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
7382 	cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
7383 
7384 	return skb;
7385 }
7386 
7387 static struct sk_buff *
ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k * ar,u32 vdev_id,u32 trigger,u32 enable)7388 ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
7389 				       u32 trigger, u32 enable)
7390 {
7391 	struct wmi_vdev_spectral_enable_cmd *cmd;
7392 	struct sk_buff *skb;
7393 
7394 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7395 	if (!skb)
7396 		return ERR_PTR(-ENOMEM);
7397 
7398 	cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
7399 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7400 	cmd->trigger_cmd = __cpu_to_le32(trigger);
7401 	cmd->enable_cmd = __cpu_to_le32(enable);
7402 
7403 	return skb;
7404 }
7405 
7406 static struct sk_buff *
ath10k_wmi_op_gen_peer_create(struct ath10k * ar,u32 vdev_id,const u8 peer_addr[ETH_ALEN],enum wmi_peer_type peer_type)7407 ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
7408 			      const u8 peer_addr[ETH_ALEN],
7409 			      enum wmi_peer_type peer_type)
7410 {
7411 	struct wmi_peer_create_cmd *cmd;
7412 	struct sk_buff *skb;
7413 
7414 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7415 	if (!skb)
7416 		return ERR_PTR(-ENOMEM);
7417 
7418 	cmd = (struct wmi_peer_create_cmd *)skb->data;
7419 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7420 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7421 	cmd->peer_type = __cpu_to_le32(peer_type);
7422 
7423 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7424 		   "wmi peer create vdev_id %d peer_addr %pM\n",
7425 		   vdev_id, peer_addr);
7426 	return skb;
7427 }
7428 
7429 static struct sk_buff *
ath10k_wmi_op_gen_peer_delete(struct ath10k * ar,u32 vdev_id,const u8 peer_addr[ETH_ALEN])7430 ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
7431 			      const u8 peer_addr[ETH_ALEN])
7432 {
7433 	struct wmi_peer_delete_cmd *cmd;
7434 	struct sk_buff *skb;
7435 
7436 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7437 	if (!skb)
7438 		return ERR_PTR(-ENOMEM);
7439 
7440 	cmd = (struct wmi_peer_delete_cmd *)skb->data;
7441 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7442 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7443 
7444 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7445 		   "wmi peer delete vdev_id %d peer_addr %pM\n",
7446 		   vdev_id, peer_addr);
7447 	return skb;
7448 }
7449 
7450 static struct sk_buff *
ath10k_wmi_op_gen_peer_flush(struct ath10k * ar,u32 vdev_id,const u8 peer_addr[ETH_ALEN],u32 tid_bitmap)7451 ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
7452 			     const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
7453 {
7454 	struct wmi_peer_flush_tids_cmd *cmd;
7455 	struct sk_buff *skb;
7456 
7457 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7458 	if (!skb)
7459 		return ERR_PTR(-ENOMEM);
7460 
7461 	cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
7462 	cmd->vdev_id         = __cpu_to_le32(vdev_id);
7463 	cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
7464 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7465 
7466 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7467 		   "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
7468 		   vdev_id, peer_addr, tid_bitmap);
7469 	return skb;
7470 }
7471 
7472 static struct sk_buff *
ath10k_wmi_op_gen_peer_set_param(struct ath10k * ar,u32 vdev_id,const u8 * peer_addr,enum wmi_peer_param param_id,u32 param_value)7473 ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
7474 				 const u8 *peer_addr,
7475 				 enum wmi_peer_param param_id,
7476 				 u32 param_value)
7477 {
7478 	struct wmi_peer_set_param_cmd *cmd;
7479 	struct sk_buff *skb;
7480 
7481 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7482 	if (!skb)
7483 		return ERR_PTR(-ENOMEM);
7484 
7485 	cmd = (struct wmi_peer_set_param_cmd *)skb->data;
7486 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
7487 	cmd->param_id    = __cpu_to_le32(param_id);
7488 	cmd->param_value = __cpu_to_le32(param_value);
7489 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
7490 
7491 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7492 		   "wmi vdev %d peer 0x%pM set param %d value %d\n",
7493 		   vdev_id, peer_addr, param_id, param_value);
7494 	return skb;
7495 }
7496 
ath10k_wmi_op_gen_gpio_config(struct ath10k * ar,u32 gpio_num,u32 input,u32 pull_type,u32 intr_mode)7497 static struct sk_buff *ath10k_wmi_op_gen_gpio_config(struct ath10k *ar,
7498 						     u32 gpio_num, u32 input,
7499 						     u32 pull_type, u32 intr_mode)
7500 {
7501 	struct wmi_gpio_config_cmd *cmd;
7502 	struct sk_buff *skb;
7503 
7504 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7505 	if (!skb)
7506 		return ERR_PTR(-ENOMEM);
7507 
7508 	cmd = (struct wmi_gpio_config_cmd *)skb->data;
7509 	cmd->pull_type = __cpu_to_le32(pull_type);
7510 	cmd->gpio_num = __cpu_to_le32(gpio_num);
7511 	cmd->input = __cpu_to_le32(input);
7512 	cmd->intr_mode = __cpu_to_le32(intr_mode);
7513 
7514 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_config gpio_num 0x%08x input 0x%08x pull_type 0x%08x intr_mode 0x%08x\n",
7515 		   gpio_num, input, pull_type, intr_mode);
7516 
7517 	return skb;
7518 }
7519 
ath10k_wmi_op_gen_gpio_output(struct ath10k * ar,u32 gpio_num,u32 set)7520 static struct sk_buff *ath10k_wmi_op_gen_gpio_output(struct ath10k *ar,
7521 						     u32 gpio_num, u32 set)
7522 {
7523 	struct wmi_gpio_output_cmd *cmd;
7524 	struct sk_buff *skb;
7525 
7526 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7527 	if (!skb)
7528 		return ERR_PTR(-ENOMEM);
7529 
7530 	cmd = (struct wmi_gpio_output_cmd *)skb->data;
7531 	cmd->gpio_num = __cpu_to_le32(gpio_num);
7532 	cmd->set = __cpu_to_le32(set);
7533 
7534 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_output gpio_num 0x%08x set 0x%08x\n",
7535 		   gpio_num, set);
7536 
7537 	return skb;
7538 }
7539 
7540 static struct sk_buff *
ath10k_wmi_op_gen_set_psmode(struct ath10k * ar,u32 vdev_id,enum wmi_sta_ps_mode psmode)7541 ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
7542 			     enum wmi_sta_ps_mode psmode)
7543 {
7544 	struct wmi_sta_powersave_mode_cmd *cmd;
7545 	struct sk_buff *skb;
7546 
7547 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7548 	if (!skb)
7549 		return ERR_PTR(-ENOMEM);
7550 
7551 	cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
7552 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
7553 	cmd->sta_ps_mode = __cpu_to_le32(psmode);
7554 
7555 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7556 		   "wmi set powersave id 0x%x mode %d\n",
7557 		   vdev_id, psmode);
7558 	return skb;
7559 }
7560 
7561 static struct sk_buff *
ath10k_wmi_op_gen_set_sta_ps(struct ath10k * ar,u32 vdev_id,enum wmi_sta_powersave_param param_id,u32 value)7562 ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
7563 			     enum wmi_sta_powersave_param param_id,
7564 			     u32 value)
7565 {
7566 	struct wmi_sta_powersave_param_cmd *cmd;
7567 	struct sk_buff *skb;
7568 
7569 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7570 	if (!skb)
7571 		return ERR_PTR(-ENOMEM);
7572 
7573 	cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
7574 	cmd->vdev_id     = __cpu_to_le32(vdev_id);
7575 	cmd->param_id    = __cpu_to_le32(param_id);
7576 	cmd->param_value = __cpu_to_le32(value);
7577 
7578 	ath10k_dbg(ar, ATH10K_DBG_STA,
7579 		   "wmi sta ps param vdev_id 0x%x param %d value %d\n",
7580 		   vdev_id, param_id, value);
7581 	return skb;
7582 }
7583 
7584 static struct sk_buff *
ath10k_wmi_op_gen_set_ap_ps(struct ath10k * ar,u32 vdev_id,const u8 * mac,enum wmi_ap_ps_peer_param param_id,u32 value)7585 ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
7586 			    enum wmi_ap_ps_peer_param param_id, u32 value)
7587 {
7588 	struct wmi_ap_ps_peer_cmd *cmd;
7589 	struct sk_buff *skb;
7590 
7591 	if (!mac)
7592 		return ERR_PTR(-EINVAL);
7593 
7594 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7595 	if (!skb)
7596 		return ERR_PTR(-ENOMEM);
7597 
7598 	cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
7599 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7600 	cmd->param_id = __cpu_to_le32(param_id);
7601 	cmd->param_value = __cpu_to_le32(value);
7602 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
7603 
7604 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7605 		   "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
7606 		   vdev_id, param_id, value, mac);
7607 	return skb;
7608 }
7609 
7610 static struct sk_buff *
ath10k_wmi_op_gen_scan_chan_list(struct ath10k * ar,const struct wmi_scan_chan_list_arg * arg)7611 ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
7612 				 const struct wmi_scan_chan_list_arg *arg)
7613 {
7614 	struct wmi_scan_chan_list_cmd *cmd;
7615 	struct sk_buff *skb;
7616 	struct wmi_channel_arg *ch;
7617 	struct wmi_channel *ci;
7618 	int i;
7619 
7620 	skb = ath10k_wmi_alloc_skb(ar, struct_size(cmd, chan_info, arg->n_channels));
7621 	if (!skb)
7622 		return ERR_PTR(-EINVAL);
7623 
7624 	cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
7625 	cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
7626 
7627 	for (i = 0; i < arg->n_channels; i++) {
7628 		ch = &arg->channels[i];
7629 		ci = &cmd->chan_info[i];
7630 
7631 		ath10k_wmi_put_wmi_channel(ar, ci, ch);
7632 	}
7633 
7634 	return skb;
7635 }
7636 
7637 static void
ath10k_wmi_peer_assoc_fill(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7638 ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf,
7639 			   const struct wmi_peer_assoc_complete_arg *arg)
7640 {
7641 	struct wmi_common_peer_assoc_complete_cmd *cmd = buf;
7642 
7643 	cmd->vdev_id            = __cpu_to_le32(arg->vdev_id);
7644 	cmd->peer_new_assoc     = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
7645 	cmd->peer_associd       = __cpu_to_le32(arg->peer_aid);
7646 	cmd->peer_flags         = __cpu_to_le32(arg->peer_flags);
7647 	cmd->peer_caps          = __cpu_to_le32(arg->peer_caps);
7648 	cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
7649 	cmd->peer_ht_caps       = __cpu_to_le32(arg->peer_ht_caps);
7650 	cmd->peer_max_mpdu      = __cpu_to_le32(arg->peer_max_mpdu);
7651 	cmd->peer_mpdu_density  = __cpu_to_le32(arg->peer_mpdu_density);
7652 	cmd->peer_rate_caps     = __cpu_to_le32(arg->peer_rate_caps);
7653 	cmd->peer_nss           = __cpu_to_le32(arg->peer_num_spatial_streams);
7654 	cmd->peer_vht_caps      = __cpu_to_le32(arg->peer_vht_caps);
7655 	cmd->peer_phymode       = __cpu_to_le32(arg->peer_phymode);
7656 
7657 	ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
7658 
7659 	cmd->peer_legacy_rates.num_rates =
7660 		__cpu_to_le32(arg->peer_legacy_rates.num_rates);
7661 	memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
7662 	       arg->peer_legacy_rates.num_rates);
7663 
7664 	cmd->peer_ht_rates.num_rates =
7665 		__cpu_to_le32(arg->peer_ht_rates.num_rates);
7666 	memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
7667 	       arg->peer_ht_rates.num_rates);
7668 
7669 	cmd->peer_vht_rates.rx_max_rate =
7670 		__cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
7671 	cmd->peer_vht_rates.rx_mcs_set =
7672 		__cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
7673 	cmd->peer_vht_rates.tx_max_rate =
7674 		__cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
7675 	cmd->peer_vht_rates.tx_mcs_set =
7676 		__cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
7677 }
7678 
7679 static void
ath10k_wmi_peer_assoc_fill_main(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7680 ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf,
7681 				const struct wmi_peer_assoc_complete_arg *arg)
7682 {
7683 	struct wmi_main_peer_assoc_complete_cmd *cmd = buf;
7684 
7685 	ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7686 	memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info));
7687 }
7688 
7689 static void
ath10k_wmi_peer_assoc_fill_10_1(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7690 ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf,
7691 				const struct wmi_peer_assoc_complete_arg *arg)
7692 {
7693 	ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7694 }
7695 
7696 static void
ath10k_wmi_peer_assoc_fill_10_2(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7697 ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
7698 				const struct wmi_peer_assoc_complete_arg *arg)
7699 {
7700 	struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf;
7701 	int max_mcs, max_nss;
7702 	u32 info0;
7703 
7704 	/* TODO: Is using max values okay with firmware? */
7705 	max_mcs = 0xf;
7706 	max_nss = 0xf;
7707 
7708 	info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) |
7709 		SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS);
7710 
7711 	ath10k_wmi_peer_assoc_fill(ar, buf, arg);
7712 	cmd->info0 = __cpu_to_le32(info0);
7713 }
7714 
7715 static void
ath10k_wmi_peer_assoc_fill_10_4(struct ath10k * ar,void * buf,const struct wmi_peer_assoc_complete_arg * arg)7716 ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf,
7717 				const struct wmi_peer_assoc_complete_arg *arg)
7718 {
7719 	struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf;
7720 
7721 	ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg);
7722 	cmd->peer_bw_rxnss_override =
7723 		__cpu_to_le32(arg->peer_bw_rxnss_override);
7724 }
7725 
7726 static int
ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg * arg)7727 ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg)
7728 {
7729 	if (arg->peer_mpdu_density > 16)
7730 		return -EINVAL;
7731 	if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
7732 		return -EINVAL;
7733 	if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
7734 		return -EINVAL;
7735 
7736 	return 0;
7737 }
7738 
7739 static struct sk_buff *
ath10k_wmi_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7740 ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar,
7741 			     const struct wmi_peer_assoc_complete_arg *arg)
7742 {
7743 	size_t len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
7744 	struct sk_buff *skb;
7745 	int ret;
7746 
7747 	ret = ath10k_wmi_peer_assoc_check_arg(arg);
7748 	if (ret)
7749 		return ERR_PTR(ret);
7750 
7751 	skb = ath10k_wmi_alloc_skb(ar, len);
7752 	if (!skb)
7753 		return ERR_PTR(-ENOMEM);
7754 
7755 	ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
7756 
7757 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7758 		   "wmi peer assoc vdev %d addr %pM (%s)\n",
7759 		   arg->vdev_id, arg->addr,
7760 		   arg->peer_reassoc ? "reassociate" : "new");
7761 	return skb;
7762 }
7763 
7764 static struct sk_buff *
ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7765 ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar,
7766 				  const struct wmi_peer_assoc_complete_arg *arg)
7767 {
7768 	size_t len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
7769 	struct sk_buff *skb;
7770 	int ret;
7771 
7772 	ret = ath10k_wmi_peer_assoc_check_arg(arg);
7773 	if (ret)
7774 		return ERR_PTR(ret);
7775 
7776 	skb = ath10k_wmi_alloc_skb(ar, len);
7777 	if (!skb)
7778 		return ERR_PTR(-ENOMEM);
7779 
7780 	ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
7781 
7782 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7783 		   "wmi peer assoc vdev %d addr %pM (%s)\n",
7784 		   arg->vdev_id, arg->addr,
7785 		   arg->peer_reassoc ? "reassociate" : "new");
7786 	return skb;
7787 }
7788 
7789 static struct sk_buff *
ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7790 ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar,
7791 				  const struct wmi_peer_assoc_complete_arg *arg)
7792 {
7793 	size_t len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
7794 	struct sk_buff *skb;
7795 	int ret;
7796 
7797 	ret = ath10k_wmi_peer_assoc_check_arg(arg);
7798 	if (ret)
7799 		return ERR_PTR(ret);
7800 
7801 	skb = ath10k_wmi_alloc_skb(ar, len);
7802 	if (!skb)
7803 		return ERR_PTR(-ENOMEM);
7804 
7805 	ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
7806 
7807 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7808 		   "wmi peer assoc vdev %d addr %pM (%s)\n",
7809 		   arg->vdev_id, arg->addr,
7810 		   arg->peer_reassoc ? "reassociate" : "new");
7811 	return skb;
7812 }
7813 
7814 static struct sk_buff *
ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k * ar,const struct wmi_peer_assoc_complete_arg * arg)7815 ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k *ar,
7816 				  const struct wmi_peer_assoc_complete_arg *arg)
7817 {
7818 	size_t len = sizeof(struct wmi_10_4_peer_assoc_complete_cmd);
7819 	struct sk_buff *skb;
7820 	int ret;
7821 
7822 	ret = ath10k_wmi_peer_assoc_check_arg(arg);
7823 	if (ret)
7824 		return ERR_PTR(ret);
7825 
7826 	skb = ath10k_wmi_alloc_skb(ar, len);
7827 	if (!skb)
7828 		return ERR_PTR(-ENOMEM);
7829 
7830 	ath10k_wmi_peer_assoc_fill_10_4(ar, skb->data, arg);
7831 
7832 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7833 		   "wmi peer assoc vdev %d addr %pM (%s)\n",
7834 		   arg->vdev_id, arg->addr,
7835 		   arg->peer_reassoc ? "reassociate" : "new");
7836 	return skb;
7837 }
7838 
7839 static struct sk_buff *
ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k * ar)7840 ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
7841 {
7842 	struct sk_buff *skb;
7843 
7844 	skb = ath10k_wmi_alloc_skb(ar, 0);
7845 	if (!skb)
7846 		return ERR_PTR(-ENOMEM);
7847 
7848 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n");
7849 	return skb;
7850 }
7851 
7852 static struct sk_buff *
ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k * ar,enum wmi_bss_survey_req_type type)7853 ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k *ar,
7854 					  enum wmi_bss_survey_req_type type)
7855 {
7856 	struct wmi_pdev_chan_info_req_cmd *cmd;
7857 	struct sk_buff *skb;
7858 
7859 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7860 	if (!skb)
7861 		return ERR_PTR(-ENOMEM);
7862 
7863 	cmd = (struct wmi_pdev_chan_info_req_cmd *)skb->data;
7864 	cmd->type = __cpu_to_le32(type);
7865 
7866 	ath10k_dbg(ar, ATH10K_DBG_WMI,
7867 		   "wmi pdev bss info request type %d\n", type);
7868 
7869 	return skb;
7870 }
7871 
7872 /* This function assumes the beacon is already DMA mapped */
7873 static struct sk_buff *
ath10k_wmi_op_gen_beacon_dma(struct ath10k * ar,u32 vdev_id,const void * bcn,size_t bcn_len,u32 bcn_paddr,bool dtim_zero,bool deliver_cab)7874 ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
7875 			     size_t bcn_len, u32 bcn_paddr, bool dtim_zero,
7876 			     bool deliver_cab)
7877 {
7878 	struct wmi_bcn_tx_ref_cmd *cmd;
7879 	struct sk_buff *skb;
7880 	struct ieee80211_hdr *hdr;
7881 	u16 fc;
7882 
7883 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7884 	if (!skb)
7885 		return ERR_PTR(-ENOMEM);
7886 
7887 	hdr = (struct ieee80211_hdr *)bcn;
7888 	fc = le16_to_cpu(hdr->frame_control);
7889 
7890 	cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
7891 	cmd->vdev_id = __cpu_to_le32(vdev_id);
7892 	cmd->data_len = __cpu_to_le32(bcn_len);
7893 	cmd->data_ptr = __cpu_to_le32(bcn_paddr);
7894 	cmd->msdu_id = 0;
7895 	cmd->frame_control = __cpu_to_le32(fc);
7896 	cmd->flags = 0;
7897 	cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
7898 
7899 	if (dtim_zero)
7900 		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
7901 
7902 	if (deliver_cab)
7903 		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
7904 
7905 	return skb;
7906 }
7907 
ath10k_wmi_set_wmm_param(struct wmi_wmm_params * params,const struct wmi_wmm_params_arg * arg)7908 void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
7909 			      const struct wmi_wmm_params_arg *arg)
7910 {
7911 	params->cwmin  = __cpu_to_le32(arg->cwmin);
7912 	params->cwmax  = __cpu_to_le32(arg->cwmax);
7913 	params->aifs   = __cpu_to_le32(arg->aifs);
7914 	params->txop   = __cpu_to_le32(arg->txop);
7915 	params->acm    = __cpu_to_le32(arg->acm);
7916 	params->no_ack = __cpu_to_le32(arg->no_ack);
7917 }
7918 
7919 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k * ar,const struct wmi_wmm_params_all_arg * arg)7920 ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
7921 			       const struct wmi_wmm_params_all_arg *arg)
7922 {
7923 	struct wmi_pdev_set_wmm_params *cmd;
7924 	struct sk_buff *skb;
7925 
7926 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7927 	if (!skb)
7928 		return ERR_PTR(-ENOMEM);
7929 
7930 	cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
7931 	ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be);
7932 	ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
7933 	ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
7934 	ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
7935 
7936 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
7937 	return skb;
7938 }
7939 
7940 static struct sk_buff *
ath10k_wmi_op_gen_request_stats(struct ath10k * ar,u32 stats_mask)7941 ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
7942 {
7943 	struct wmi_request_stats_cmd *cmd;
7944 	struct sk_buff *skb;
7945 
7946 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7947 	if (!skb)
7948 		return ERR_PTR(-ENOMEM);
7949 
7950 	cmd = (struct wmi_request_stats_cmd *)skb->data;
7951 	cmd->stats_id = __cpu_to_le32(stats_mask);
7952 
7953 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n",
7954 		   stats_mask);
7955 	return skb;
7956 }
7957 
7958 static struct sk_buff *
ath10k_wmi_op_gen_force_fw_hang(struct ath10k * ar,enum wmi_force_fw_hang_type type,u32 delay_ms)7959 ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
7960 				enum wmi_force_fw_hang_type type, u32 delay_ms)
7961 {
7962 	struct wmi_force_fw_hang_cmd *cmd;
7963 	struct sk_buff *skb;
7964 
7965 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7966 	if (!skb)
7967 		return ERR_PTR(-ENOMEM);
7968 
7969 	cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
7970 	cmd->type = __cpu_to_le32(type);
7971 	cmd->delay_ms = __cpu_to_le32(delay_ms);
7972 
7973 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
7974 		   type, delay_ms);
7975 	return skb;
7976 }
7977 
7978 static struct sk_buff *
ath10k_wmi_op_gen_dbglog_cfg(struct ath10k * ar,u64 module_enable,u32 log_level)7979 ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
7980 			     u32 log_level)
7981 {
7982 	struct wmi_dbglog_cfg_cmd *cmd;
7983 	struct sk_buff *skb;
7984 	u32 cfg;
7985 
7986 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
7987 	if (!skb)
7988 		return ERR_PTR(-ENOMEM);
7989 
7990 	cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
7991 
7992 	if (module_enable) {
7993 		cfg = SM(log_level,
7994 			 ATH10K_DBGLOG_CFG_LOG_LVL);
7995 	} else {
7996 		/* set back defaults, all modules with WARN level */
7997 		cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
7998 			 ATH10K_DBGLOG_CFG_LOG_LVL);
7999 		module_enable = ~0;
8000 	}
8001 
8002 	cmd->module_enable = __cpu_to_le32(module_enable);
8003 	cmd->module_valid = __cpu_to_le32(~0);
8004 	cmd->config_enable = __cpu_to_le32(cfg);
8005 	cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
8006 
8007 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8008 		   "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
8009 		   __le32_to_cpu(cmd->module_enable),
8010 		   __le32_to_cpu(cmd->module_valid),
8011 		   __le32_to_cpu(cmd->config_enable),
8012 		   __le32_to_cpu(cmd->config_valid));
8013 	return skb;
8014 }
8015 
8016 static struct sk_buff *
ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k * ar,u64 module_enable,u32 log_level)8017 ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
8018 				  u32 log_level)
8019 {
8020 	struct wmi_10_4_dbglog_cfg_cmd *cmd;
8021 	struct sk_buff *skb;
8022 	u32 cfg;
8023 
8024 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8025 	if (!skb)
8026 		return ERR_PTR(-ENOMEM);
8027 
8028 	cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data;
8029 
8030 	if (module_enable) {
8031 		cfg = SM(log_level,
8032 			 ATH10K_DBGLOG_CFG_LOG_LVL);
8033 	} else {
8034 		/* set back defaults, all modules with WARN level */
8035 		cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
8036 			 ATH10K_DBGLOG_CFG_LOG_LVL);
8037 		module_enable = ~0;
8038 	}
8039 
8040 	cmd->module_enable = __cpu_to_le64(module_enable);
8041 	cmd->module_valid = __cpu_to_le64(~0);
8042 	cmd->config_enable = __cpu_to_le32(cfg);
8043 	cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
8044 
8045 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8046 		   "wmi dbglog cfg modules 0x%016llx 0x%016llx config %08x %08x\n",
8047 		   __le64_to_cpu(cmd->module_enable),
8048 		   __le64_to_cpu(cmd->module_valid),
8049 		   __le32_to_cpu(cmd->config_enable),
8050 		   __le32_to_cpu(cmd->config_valid));
8051 	return skb;
8052 }
8053 
8054 static struct sk_buff *
ath10k_wmi_op_gen_pktlog_enable(struct ath10k * ar,u32 ev_bitmap)8055 ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
8056 {
8057 	struct wmi_pdev_pktlog_enable_cmd *cmd;
8058 	struct sk_buff *skb;
8059 
8060 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8061 	if (!skb)
8062 		return ERR_PTR(-ENOMEM);
8063 
8064 	ev_bitmap &= ATH10K_PKTLOG_ANY;
8065 
8066 	cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
8067 	cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
8068 
8069 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n",
8070 		   ev_bitmap);
8071 	return skb;
8072 }
8073 
8074 static struct sk_buff *
ath10k_wmi_op_gen_pktlog_disable(struct ath10k * ar)8075 ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar)
8076 {
8077 	struct sk_buff *skb;
8078 
8079 	skb = ath10k_wmi_alloc_skb(ar, 0);
8080 	if (!skb)
8081 		return ERR_PTR(-ENOMEM);
8082 
8083 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
8084 	return skb;
8085 }
8086 
8087 static struct sk_buff *
ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k * ar,u32 period,u32 duration,u32 next_offset,u32 enabled)8088 ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
8089 				      u32 duration, u32 next_offset,
8090 				      u32 enabled)
8091 {
8092 	struct wmi_pdev_set_quiet_cmd *cmd;
8093 	struct sk_buff *skb;
8094 
8095 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8096 	if (!skb)
8097 		return ERR_PTR(-ENOMEM);
8098 
8099 	cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data;
8100 	cmd->period = __cpu_to_le32(period);
8101 	cmd->duration = __cpu_to_le32(duration);
8102 	cmd->next_start = __cpu_to_le32(next_offset);
8103 	cmd->enabled = __cpu_to_le32(enabled);
8104 
8105 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8106 		   "wmi quiet param: period %u duration %u enabled %d\n",
8107 		   period, duration, enabled);
8108 	return skb;
8109 }
8110 
8111 static struct sk_buff *
ath10k_wmi_op_gen_addba_clear_resp(struct ath10k * ar,u32 vdev_id,const u8 * mac)8112 ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id,
8113 				   const u8 *mac)
8114 {
8115 	struct wmi_addba_clear_resp_cmd *cmd;
8116 	struct sk_buff *skb;
8117 
8118 	if (!mac)
8119 		return ERR_PTR(-EINVAL);
8120 
8121 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8122 	if (!skb)
8123 		return ERR_PTR(-ENOMEM);
8124 
8125 	cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
8126 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8127 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
8128 
8129 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8130 		   "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
8131 		   vdev_id, mac);
8132 	return skb;
8133 }
8134 
8135 static struct sk_buff *
ath10k_wmi_op_gen_addba_send(struct ath10k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 buf_size)8136 ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
8137 			     u32 tid, u32 buf_size)
8138 {
8139 	struct wmi_addba_send_cmd *cmd;
8140 	struct sk_buff *skb;
8141 
8142 	if (!mac)
8143 		return ERR_PTR(-EINVAL);
8144 
8145 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8146 	if (!skb)
8147 		return ERR_PTR(-ENOMEM);
8148 
8149 	cmd = (struct wmi_addba_send_cmd *)skb->data;
8150 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8151 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
8152 	cmd->tid = __cpu_to_le32(tid);
8153 	cmd->buffersize = __cpu_to_le32(buf_size);
8154 
8155 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8156 		   "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
8157 		   vdev_id, mac, tid, buf_size);
8158 	return skb;
8159 }
8160 
8161 static struct sk_buff *
ath10k_wmi_op_gen_addba_set_resp(struct ath10k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 status)8162 ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
8163 				 u32 tid, u32 status)
8164 {
8165 	struct wmi_addba_setresponse_cmd *cmd;
8166 	struct sk_buff *skb;
8167 
8168 	if (!mac)
8169 		return ERR_PTR(-EINVAL);
8170 
8171 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8172 	if (!skb)
8173 		return ERR_PTR(-ENOMEM);
8174 
8175 	cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
8176 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8177 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
8178 	cmd->tid = __cpu_to_le32(tid);
8179 	cmd->statuscode = __cpu_to_le32(status);
8180 
8181 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8182 		   "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
8183 		   vdev_id, mac, tid, status);
8184 	return skb;
8185 }
8186 
8187 static struct sk_buff *
ath10k_wmi_op_gen_delba_send(struct ath10k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 initiator,u32 reason)8188 ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
8189 			     u32 tid, u32 initiator, u32 reason)
8190 {
8191 	struct wmi_delba_send_cmd *cmd;
8192 	struct sk_buff *skb;
8193 
8194 	if (!mac)
8195 		return ERR_PTR(-EINVAL);
8196 
8197 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8198 	if (!skb)
8199 		return ERR_PTR(-ENOMEM);
8200 
8201 	cmd = (struct wmi_delba_send_cmd *)skb->data;
8202 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8203 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
8204 	cmd->tid = __cpu_to_le32(tid);
8205 	cmd->initiator = __cpu_to_le32(initiator);
8206 	cmd->reasoncode = __cpu_to_le32(reason);
8207 
8208 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8209 		   "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
8210 		   vdev_id, mac, tid, initiator, reason);
8211 	return skb;
8212 }
8213 
8214 static struct sk_buff *
ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k * ar,u32 param)8215 ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param)
8216 {
8217 	struct wmi_pdev_get_tpc_config_cmd *cmd;
8218 	struct sk_buff *skb;
8219 
8220 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8221 	if (!skb)
8222 		return ERR_PTR(-ENOMEM);
8223 
8224 	cmd = (struct wmi_pdev_get_tpc_config_cmd *)skb->data;
8225 	cmd->param = __cpu_to_le32(param);
8226 
8227 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8228 		   "wmi pdev get tpc config param %d\n", param);
8229 	return skb;
8230 }
8231 
8232 static void
ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8233 ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8234 				   char *buf, u32 *length)
8235 {
8236 	u32 len = *length;
8237 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8238 
8239 	len += scnprintf(buf + len, buf_len - len, "\n");
8240 	len += scnprintf(buf + len, buf_len - len, "%30s\n",
8241 			"ath10k PDEV stats");
8242 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8243 			"=================");
8244 
8245 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8246 			"Channel noise floor", pdev->ch_noise_floor);
8247 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8248 			"Channel TX power", pdev->chan_tx_power);
8249 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8250 			"TX frame count", pdev->tx_frame_count);
8251 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8252 			"RX frame count", pdev->rx_frame_count);
8253 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8254 			"RX clear count", pdev->rx_clear_count);
8255 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8256 			"Cycle count", pdev->cycle_count);
8257 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8258 			"PHY error count", pdev->phy_err_count);
8259 
8260 	*length = len;
8261 }
8262 
8263 static void
ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8264 ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8265 				    char *buf, u32 *length)
8266 {
8267 	u32 len = *length;
8268 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8269 
8270 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8271 			"RTS bad count", pdev->rts_bad);
8272 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8273 			"RTS good count", pdev->rts_good);
8274 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8275 			"FCS bad count", pdev->fcs_bad);
8276 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8277 			"No beacon count", pdev->no_beacons);
8278 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8279 			"MIB int count", pdev->mib_int_count);
8280 
8281 	len += scnprintf(buf + len, buf_len - len, "\n");
8282 	*length = len;
8283 }
8284 
8285 static void
ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8286 ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8287 				 char *buf, u32 *length)
8288 {
8289 	u32 len = *length;
8290 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8291 
8292 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
8293 			 "ath10k PDEV TX stats");
8294 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8295 				 "=================");
8296 
8297 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8298 			 "HTT cookies queued", pdev->comp_queued);
8299 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8300 			 "HTT cookies disp.", pdev->comp_delivered);
8301 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8302 			 "MSDU queued", pdev->msdu_enqued);
8303 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8304 			 "MPDU queued", pdev->mpdu_enqued);
8305 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8306 			 "MSDUs dropped", pdev->wmm_drop);
8307 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8308 			 "Local enqued", pdev->local_enqued);
8309 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8310 			 "Local freed", pdev->local_freed);
8311 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8312 			 "HW queued", pdev->hw_queued);
8313 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8314 			 "PPDUs reaped", pdev->hw_reaped);
8315 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8316 			 "Num underruns", pdev->underrun);
8317 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8318 			 "PPDUs cleaned", pdev->tx_abort);
8319 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8320 			 "MPDUs requeued", pdev->mpdus_requeued);
8321 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8322 			 "Excessive retries", pdev->tx_ko);
8323 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8324 			 "HW rate", pdev->data_rc);
8325 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8326 			 "Sched self triggers", pdev->self_triggers);
8327 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8328 			 "Dropped due to SW retries",
8329 			 pdev->sw_retry_failure);
8330 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8331 			 "Illegal rate phy errors",
8332 			 pdev->illgl_rate_phy_err);
8333 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8334 			 "Pdev continuous xretry", pdev->pdev_cont_xretry);
8335 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8336 			 "TX timeout", pdev->pdev_tx_timeout);
8337 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8338 			 "PDEV resets", pdev->pdev_resets);
8339 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8340 			 "PHY underrun", pdev->phy_underrun);
8341 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8342 			 "MPDU is more than txop limit", pdev->txop_ovf);
8343 	*length = len;
8344 }
8345 
8346 static void
ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev * pdev,char * buf,u32 * length)8347 ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
8348 				 char *buf, u32 *length)
8349 {
8350 	u32 len = *length;
8351 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8352 
8353 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
8354 			 "ath10k PDEV RX stats");
8355 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8356 				 "=================");
8357 
8358 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8359 			 "Mid PPDU route change",
8360 			 pdev->mid_ppdu_route_change);
8361 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8362 			 "Tot. number of statuses", pdev->status_rcvd);
8363 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8364 			 "Extra frags on rings 0", pdev->r0_frags);
8365 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8366 			 "Extra frags on rings 1", pdev->r1_frags);
8367 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8368 			 "Extra frags on rings 2", pdev->r2_frags);
8369 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8370 			 "Extra frags on rings 3", pdev->r3_frags);
8371 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8372 			 "MSDUs delivered to HTT", pdev->htt_msdus);
8373 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8374 			 "MPDUs delivered to HTT", pdev->htt_mpdus);
8375 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8376 			 "MSDUs delivered to stack", pdev->loc_msdus);
8377 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8378 			 "MPDUs delivered to stack", pdev->loc_mpdus);
8379 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8380 			 "Oversized AMSDUs", pdev->oversize_amsdu);
8381 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8382 			 "PHY errors", pdev->phy_errs);
8383 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8384 			 "PHY errors drops", pdev->phy_err_drop);
8385 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8386 			 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
8387 	*length = len;
8388 }
8389 
8390 static void
ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev * vdev,char * buf,u32 * length)8391 ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev *vdev,
8392 			      char *buf, u32 *length)
8393 {
8394 	u32 len = *length;
8395 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8396 	int i;
8397 
8398 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8399 			"vdev id", vdev->vdev_id);
8400 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8401 			"beacon snr", vdev->beacon_snr);
8402 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8403 			"data snr", vdev->data_snr);
8404 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8405 			"num rx frames", vdev->num_rx_frames);
8406 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8407 			"num rts fail", vdev->num_rts_fail);
8408 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8409 			"num rts success", vdev->num_rts_success);
8410 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8411 			"num rx err", vdev->num_rx_err);
8412 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8413 			"num rx discard", vdev->num_rx_discard);
8414 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8415 			"num tx not acked", vdev->num_tx_not_acked);
8416 
8417 	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
8418 		len += scnprintf(buf + len, buf_len - len,
8419 				"%25s [%02d] %u\n",
8420 				"num tx frames", i,
8421 				vdev->num_tx_frames[i]);
8422 
8423 	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
8424 		len += scnprintf(buf + len, buf_len - len,
8425 				"%25s [%02d] %u\n",
8426 				"num tx frames retries", i,
8427 				vdev->num_tx_frames_retries[i]);
8428 
8429 	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
8430 		len += scnprintf(buf + len, buf_len - len,
8431 				"%25s [%02d] %u\n",
8432 				"num tx frames failures", i,
8433 				vdev->num_tx_frames_failures[i]);
8434 
8435 	for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
8436 		len += scnprintf(buf + len, buf_len - len,
8437 				"%25s [%02d] 0x%08x\n",
8438 				"tx rate history", i,
8439 				vdev->tx_rate_history[i]);
8440 
8441 	for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
8442 		len += scnprintf(buf + len, buf_len - len,
8443 				"%25s [%02d] %u\n",
8444 				"beacon rssi history", i,
8445 				vdev->beacon_rssi_history[i]);
8446 
8447 	len += scnprintf(buf + len, buf_len - len, "\n");
8448 	*length = len;
8449 }
8450 
8451 static void
ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer * peer,char * buf,u32 * length,bool extended_peer)8452 ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
8453 			      char *buf, u32 *length, bool extended_peer)
8454 {
8455 	u32 len = *length;
8456 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8457 
8458 	len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
8459 			"Peer MAC address", peer->peer_macaddr);
8460 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8461 			"Peer RSSI", peer->peer_rssi);
8462 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8463 			"Peer TX rate", peer->peer_tx_rate);
8464 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8465 			"Peer RX rate", peer->peer_rx_rate);
8466 	if (!extended_peer)
8467 		len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
8468 				"Peer RX duration", peer->rx_duration);
8469 
8470 	len += scnprintf(buf + len, buf_len - len, "\n");
8471 	*length = len;
8472 }
8473 
8474 static void
ath10k_wmi_fw_extd_peer_stats_fill(const struct ath10k_fw_extd_stats_peer * peer,char * buf,u32 * length)8475 ath10k_wmi_fw_extd_peer_stats_fill(const struct ath10k_fw_extd_stats_peer *peer,
8476 				   char *buf, u32 *length)
8477 {
8478 	u32 len = *length;
8479 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8480 
8481 	len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
8482 			"Peer MAC address", peer->peer_macaddr);
8483 	len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
8484 			"Peer RX duration", peer->rx_duration);
8485 }
8486 
ath10k_wmi_main_op_fw_stats_fill(struct ath10k * ar,struct ath10k_fw_stats * fw_stats,char * buf)8487 void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
8488 				      struct ath10k_fw_stats *fw_stats,
8489 				      char *buf)
8490 {
8491 	u32 len = 0;
8492 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8493 	const struct ath10k_fw_stats_pdev *pdev;
8494 	const struct ath10k_fw_stats_vdev *vdev;
8495 	const struct ath10k_fw_stats_peer *peer;
8496 	size_t num_peers;
8497 	size_t num_vdevs;
8498 
8499 	spin_lock_bh(&ar->data_lock);
8500 
8501 	pdev = list_first_entry_or_null(&fw_stats->pdevs,
8502 					struct ath10k_fw_stats_pdev, list);
8503 	if (!pdev) {
8504 		ath10k_warn(ar, "failed to get pdev stats\n");
8505 		goto unlock;
8506 	}
8507 
8508 	num_peers = list_count_nodes(&fw_stats->peers);
8509 	num_vdevs = list_count_nodes(&fw_stats->vdevs);
8510 
8511 	ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8512 	ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8513 	ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8514 
8515 	len += scnprintf(buf + len, buf_len - len, "\n");
8516 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8517 			 "ath10k VDEV stats", num_vdevs);
8518 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8519 				 "=================");
8520 
8521 	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8522 		ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
8523 	}
8524 
8525 	len += scnprintf(buf + len, buf_len - len, "\n");
8526 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8527 			 "ath10k PEER stats", num_peers);
8528 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8529 				 "=================");
8530 
8531 	list_for_each_entry(peer, &fw_stats->peers, list) {
8532 		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
8533 					      fw_stats->extended);
8534 	}
8535 
8536 unlock:
8537 	spin_unlock_bh(&ar->data_lock);
8538 
8539 	if (len >= buf_len)
8540 		buf[len - 1] = 0;
8541 	else
8542 		buf[len] = 0;
8543 }
8544 
ath10k_wmi_10x_op_fw_stats_fill(struct ath10k * ar,struct ath10k_fw_stats * fw_stats,char * buf)8545 void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
8546 				     struct ath10k_fw_stats *fw_stats,
8547 				     char *buf)
8548 {
8549 	unsigned int len = 0;
8550 	unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
8551 	const struct ath10k_fw_stats_pdev *pdev;
8552 	const struct ath10k_fw_stats_vdev *vdev;
8553 	const struct ath10k_fw_stats_peer *peer;
8554 	size_t num_peers;
8555 	size_t num_vdevs;
8556 
8557 	spin_lock_bh(&ar->data_lock);
8558 
8559 	pdev = list_first_entry_or_null(&fw_stats->pdevs,
8560 					struct ath10k_fw_stats_pdev, list);
8561 	if (!pdev) {
8562 		ath10k_warn(ar, "failed to get pdev stats\n");
8563 		goto unlock;
8564 	}
8565 
8566 	num_peers = list_count_nodes(&fw_stats->peers);
8567 	num_vdevs = list_count_nodes(&fw_stats->vdevs);
8568 
8569 	ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8570 	ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
8571 	ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8572 	ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8573 
8574 	len += scnprintf(buf + len, buf_len - len, "\n");
8575 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8576 			 "ath10k VDEV stats", num_vdevs);
8577 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8578 				 "=================");
8579 
8580 	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8581 		ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
8582 	}
8583 
8584 	len += scnprintf(buf + len, buf_len - len, "\n");
8585 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8586 			 "ath10k PEER stats", num_peers);
8587 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8588 				 "=================");
8589 
8590 	list_for_each_entry(peer, &fw_stats->peers, list) {
8591 		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
8592 					      fw_stats->extended);
8593 	}
8594 
8595 unlock:
8596 	spin_unlock_bh(&ar->data_lock);
8597 
8598 	if (len >= buf_len)
8599 		buf[len - 1] = 0;
8600 	else
8601 		buf[len] = 0;
8602 }
8603 
8604 static struct sk_buff *
ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k * ar,u8 enable,u32 detect_level,u32 detect_margin)8605 ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
8606 					   u32 detect_level, u32 detect_margin)
8607 {
8608 	struct wmi_pdev_set_adaptive_cca_params *cmd;
8609 	struct sk_buff *skb;
8610 
8611 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8612 	if (!skb)
8613 		return ERR_PTR(-ENOMEM);
8614 
8615 	cmd = (struct wmi_pdev_set_adaptive_cca_params *)skb->data;
8616 	cmd->enable = __cpu_to_le32(enable);
8617 	cmd->cca_detect_level = __cpu_to_le32(detect_level);
8618 	cmd->cca_detect_margin = __cpu_to_le32(detect_margin);
8619 
8620 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8621 		   "wmi pdev set adaptive cca params enable:%d detection level:%d detection margin:%d\n",
8622 		   enable, detect_level, detect_margin);
8623 	return skb;
8624 }
8625 
8626 static void
ath10k_wmi_fw_vdev_stats_extd_fill(const struct ath10k_fw_stats_vdev_extd * vdev,char * buf,u32 * length)8627 ath10k_wmi_fw_vdev_stats_extd_fill(const struct ath10k_fw_stats_vdev_extd *vdev,
8628 				   char *buf, u32 *length)
8629 {
8630 	u32 len = *length;
8631 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8632 	u32 val;
8633 
8634 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8635 			 "vdev id", vdev->vdev_id);
8636 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8637 			 "ppdu aggr count", vdev->ppdu_aggr_cnt);
8638 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8639 			 "ppdu noack", vdev->ppdu_noack);
8640 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8641 			 "mpdu queued", vdev->mpdu_queued);
8642 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8643 			 "ppdu nonaggr count", vdev->ppdu_nonaggr_cnt);
8644 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8645 			 "mpdu sw requeued", vdev->mpdu_sw_requeued);
8646 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8647 			 "mpdu success retry", vdev->mpdu_suc_retry);
8648 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8649 			 "mpdu success multitry", vdev->mpdu_suc_multitry);
8650 	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8651 			 "mpdu fail retry", vdev->mpdu_fail_retry);
8652 	val = vdev->tx_ftm_suc;
8653 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8654 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8655 				 "tx ftm success",
8656 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8657 	val = vdev->tx_ftm_suc_retry;
8658 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8659 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8660 				 "tx ftm success retry",
8661 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8662 	val = vdev->tx_ftm_fail;
8663 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8664 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8665 				 "tx ftm fail",
8666 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8667 	val = vdev->rx_ftmr_cnt;
8668 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8669 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8670 				 "rx ftm request count",
8671 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8672 	val = vdev->rx_ftmr_dup_cnt;
8673 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8674 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8675 				 "rx ftm request dup count",
8676 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8677 	val = vdev->rx_iftmr_cnt;
8678 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8679 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8680 				 "rx initial ftm req count",
8681 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8682 	val = vdev->rx_iftmr_dup_cnt;
8683 	if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
8684 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8685 				 "rx initial ftm req dup cnt",
8686 				 MS(val, WMI_VDEV_STATS_FTM_COUNT));
8687 	len += scnprintf(buf + len, buf_len - len, "\n");
8688 
8689 	*length = len;
8690 }
8691 
ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k * ar,struct ath10k_fw_stats * fw_stats,char * buf)8692 void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
8693 				      struct ath10k_fw_stats *fw_stats,
8694 				      char *buf)
8695 {
8696 	u32 len = 0;
8697 	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
8698 	const struct ath10k_fw_stats_pdev *pdev;
8699 	const struct ath10k_fw_stats_vdev_extd *vdev;
8700 	const struct ath10k_fw_stats_peer *peer;
8701 	const struct ath10k_fw_extd_stats_peer *extd_peer;
8702 	size_t num_peers;
8703 	size_t num_vdevs;
8704 
8705 	spin_lock_bh(&ar->data_lock);
8706 
8707 	pdev = list_first_entry_or_null(&fw_stats->pdevs,
8708 					struct ath10k_fw_stats_pdev, list);
8709 	if (!pdev) {
8710 		ath10k_warn(ar, "failed to get pdev stats\n");
8711 		goto unlock;
8712 	}
8713 
8714 	num_peers = list_count_nodes(&fw_stats->peers);
8715 	num_vdevs = list_count_nodes(&fw_stats->vdevs);
8716 
8717 	ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
8718 	ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
8719 	ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
8720 
8721 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8722 			"HW paused", pdev->hw_paused);
8723 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8724 			"Seqs posted", pdev->seq_posted);
8725 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8726 			"Seqs failed queueing", pdev->seq_failed_queueing);
8727 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8728 			"Seqs completed", pdev->seq_completed);
8729 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8730 			"Seqs restarted", pdev->seq_restarted);
8731 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8732 			"MU Seqs posted", pdev->mu_seq_posted);
8733 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8734 			"MPDUs SW flushed", pdev->mpdus_sw_flush);
8735 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8736 			"MPDUs HW filtered", pdev->mpdus_hw_filter);
8737 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8738 			"MPDUs truncated", pdev->mpdus_truncated);
8739 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8740 			"MPDUs receive no ACK", pdev->mpdus_ack_failed);
8741 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8742 			"MPDUs expired", pdev->mpdus_expired);
8743 
8744 	ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
8745 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8746 			"Num Rx Overflow errors", pdev->rx_ovfl_errs);
8747 
8748 	len += scnprintf(buf + len, buf_len - len, "\n");
8749 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8750 			"ath10k VDEV stats", num_vdevs);
8751 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8752 				"=================");
8753 	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8754 		ath10k_wmi_fw_vdev_stats_extd_fill(vdev, buf, &len);
8755 	}
8756 
8757 	len += scnprintf(buf + len, buf_len - len, "\n");
8758 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8759 			"ath10k PEER stats", num_peers);
8760 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8761 				"=================");
8762 
8763 	list_for_each_entry(peer, &fw_stats->peers, list) {
8764 		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
8765 					      fw_stats->extended);
8766 	}
8767 
8768 	if (fw_stats->extended) {
8769 		list_for_each_entry(extd_peer, &fw_stats->peers_extd, list) {
8770 			ath10k_wmi_fw_extd_peer_stats_fill(extd_peer, buf,
8771 							   &len);
8772 		}
8773 	}
8774 
8775 unlock:
8776 	spin_unlock_bh(&ar->data_lock);
8777 
8778 	if (len >= buf_len)
8779 		buf[len - 1] = 0;
8780 	else
8781 		buf[len] = 0;
8782 }
8783 
ath10k_wmi_op_get_vdev_subtype(struct ath10k * ar,enum wmi_vdev_subtype subtype)8784 int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
8785 				   enum wmi_vdev_subtype subtype)
8786 {
8787 	switch (subtype) {
8788 	case WMI_VDEV_SUBTYPE_NONE:
8789 		return WMI_VDEV_SUBTYPE_LEGACY_NONE;
8790 	case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8791 		return WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV;
8792 	case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8793 		return WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI;
8794 	case WMI_VDEV_SUBTYPE_P2P_GO:
8795 		return WMI_VDEV_SUBTYPE_LEGACY_P2P_GO;
8796 	case WMI_VDEV_SUBTYPE_PROXY_STA:
8797 		return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA;
8798 	case WMI_VDEV_SUBTYPE_MESH_11S:
8799 	case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8800 		return -EOPNOTSUPP;
8801 	}
8802 	return -EOPNOTSUPP;
8803 }
8804 
ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k * ar,enum wmi_vdev_subtype subtype)8805 static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
8806 						 enum wmi_vdev_subtype subtype)
8807 {
8808 	switch (subtype) {
8809 	case WMI_VDEV_SUBTYPE_NONE:
8810 		return WMI_VDEV_SUBTYPE_10_2_4_NONE;
8811 	case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8812 		return WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV;
8813 	case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8814 		return WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI;
8815 	case WMI_VDEV_SUBTYPE_P2P_GO:
8816 		return WMI_VDEV_SUBTYPE_10_2_4_P2P_GO;
8817 	case WMI_VDEV_SUBTYPE_PROXY_STA:
8818 		return WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA;
8819 	case WMI_VDEV_SUBTYPE_MESH_11S:
8820 		return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S;
8821 	case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8822 		return -EOPNOTSUPP;
8823 	}
8824 	return -EOPNOTSUPP;
8825 }
8826 
ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k * ar,enum wmi_vdev_subtype subtype)8827 static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
8828 					       enum wmi_vdev_subtype subtype)
8829 {
8830 	switch (subtype) {
8831 	case WMI_VDEV_SUBTYPE_NONE:
8832 		return WMI_VDEV_SUBTYPE_10_4_NONE;
8833 	case WMI_VDEV_SUBTYPE_P2P_DEVICE:
8834 		return WMI_VDEV_SUBTYPE_10_4_P2P_DEV;
8835 	case WMI_VDEV_SUBTYPE_P2P_CLIENT:
8836 		return WMI_VDEV_SUBTYPE_10_4_P2P_CLI;
8837 	case WMI_VDEV_SUBTYPE_P2P_GO:
8838 		return WMI_VDEV_SUBTYPE_10_4_P2P_GO;
8839 	case WMI_VDEV_SUBTYPE_PROXY_STA:
8840 		return WMI_VDEV_SUBTYPE_10_4_PROXY_STA;
8841 	case WMI_VDEV_SUBTYPE_MESH_11S:
8842 		return WMI_VDEV_SUBTYPE_10_4_MESH_11S;
8843 	case WMI_VDEV_SUBTYPE_MESH_NON_11S:
8844 		return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S;
8845 	}
8846 	return -EOPNOTSUPP;
8847 }
8848 
8849 static struct sk_buff *
ath10k_wmi_10_4_ext_resource_config(struct ath10k * ar,enum wmi_host_platform_type type,u32 fw_feature_bitmap)8850 ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
8851 				    enum wmi_host_platform_type type,
8852 				    u32 fw_feature_bitmap)
8853 {
8854 	struct wmi_ext_resource_config_10_4_cmd *cmd;
8855 	struct sk_buff *skb;
8856 	u32 num_tdls_sleep_sta = 0;
8857 
8858 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8859 	if (!skb)
8860 		return ERR_PTR(-ENOMEM);
8861 
8862 	if (test_bit(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, ar->wmi.svc_map))
8863 		num_tdls_sleep_sta = TARGET_10_4_NUM_TDLS_SLEEP_STA;
8864 
8865 	cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
8866 	cmd->host_platform_config = __cpu_to_le32(type);
8867 	cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
8868 	cmd->wlan_gpio_priority = __cpu_to_le32(ar->coex_gpio_pin);
8869 	cmd->coex_version = __cpu_to_le32(WMI_NO_COEX_VERSION_SUPPORT);
8870 	cmd->coex_gpio_pin1 = __cpu_to_le32(-1);
8871 	cmd->coex_gpio_pin2 = __cpu_to_le32(-1);
8872 	cmd->coex_gpio_pin3 = __cpu_to_le32(-1);
8873 	cmd->num_tdls_vdevs = __cpu_to_le32(TARGET_10_4_NUM_TDLS_VDEVS);
8874 	cmd->num_tdls_conn_table_entries = __cpu_to_le32(20);
8875 	cmd->max_tdls_concurrent_sleep_sta = __cpu_to_le32(num_tdls_sleep_sta);
8876 	cmd->max_tdls_concurrent_buffer_sta =
8877 			__cpu_to_le32(TARGET_10_4_NUM_TDLS_BUFFER_STA);
8878 
8879 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8880 		   "wmi ext resource config host type %d firmware feature bitmap %08x\n",
8881 		   type, fw_feature_bitmap);
8882 	return skb;
8883 }
8884 
8885 static struct sk_buff *
ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k * ar,u32 vdev_id,enum wmi_tdls_state state)8886 ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
8887 					 enum wmi_tdls_state state)
8888 {
8889 	struct wmi_10_4_tdls_set_state_cmd *cmd;
8890 	struct sk_buff *skb;
8891 	u32 options = 0;
8892 
8893 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8894 	if (!skb)
8895 		return ERR_PTR(-ENOMEM);
8896 
8897 	if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map) &&
8898 	    state == WMI_TDLS_ENABLE_ACTIVE)
8899 		state = WMI_TDLS_ENABLE_PASSIVE;
8900 
8901 	if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
8902 		options |= WMI_TDLS_BUFFER_STA_EN;
8903 
8904 	cmd = (struct wmi_10_4_tdls_set_state_cmd *)skb->data;
8905 	cmd->vdev_id = __cpu_to_le32(vdev_id);
8906 	cmd->state = __cpu_to_le32(state);
8907 	cmd->notification_interval_ms = __cpu_to_le32(5000);
8908 	cmd->tx_discovery_threshold = __cpu_to_le32(100);
8909 	cmd->tx_teardown_threshold = __cpu_to_le32(5);
8910 	cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
8911 	cmd->rssi_delta = __cpu_to_le32(-20);
8912 	cmd->tdls_options = __cpu_to_le32(options);
8913 	cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
8914 	cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
8915 	cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
8916 	cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
8917 	cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
8918 	cmd->teardown_notification_ms = __cpu_to_le32(10);
8919 	cmd->tdls_peer_kickout_threshold = __cpu_to_le32(96);
8920 
8921 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi update fw tdls state %d for vdev %i\n",
8922 		   state, vdev_id);
8923 	return skb;
8924 }
8925 
ath10k_wmi_prepare_peer_qos(u8 uapsd_queues,u8 sp)8926 static u32 ath10k_wmi_prepare_peer_qos(u8 uapsd_queues, u8 sp)
8927 {
8928 	u32 peer_qos = 0;
8929 
8930 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
8931 		peer_qos |= WMI_TDLS_PEER_QOS_AC_VO;
8932 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
8933 		peer_qos |= WMI_TDLS_PEER_QOS_AC_VI;
8934 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
8935 		peer_qos |= WMI_TDLS_PEER_QOS_AC_BK;
8936 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
8937 		peer_qos |= WMI_TDLS_PEER_QOS_AC_BE;
8938 
8939 	peer_qos |= SM(sp, WMI_TDLS_PEER_SP);
8940 
8941 	return peer_qos;
8942 }
8943 
8944 static struct sk_buff *
ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid(struct ath10k * ar,u32 param)8945 ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
8946 {
8947 	struct wmi_pdev_get_tpc_table_cmd *cmd;
8948 	struct sk_buff *skb;
8949 
8950 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
8951 	if (!skb)
8952 		return ERR_PTR(-ENOMEM);
8953 
8954 	cmd = (struct wmi_pdev_get_tpc_table_cmd *)skb->data;
8955 	cmd->param = __cpu_to_le32(param);
8956 
8957 	ath10k_dbg(ar, ATH10K_DBG_WMI,
8958 		   "wmi pdev get tpc table param:%d\n", param);
8959 	return skb;
8960 }
8961 
8962 static struct sk_buff *
ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k * ar,const struct wmi_tdls_peer_update_cmd_arg * arg,const struct wmi_tdls_peer_capab_arg * cap,const struct wmi_channel_arg * chan_arg)8963 ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar,
8964 				     const struct wmi_tdls_peer_update_cmd_arg *arg,
8965 				     const struct wmi_tdls_peer_capab_arg *cap,
8966 				     const struct wmi_channel_arg *chan_arg)
8967 {
8968 	struct wmi_10_4_tdls_peer_update_cmd *cmd;
8969 	struct wmi_tdls_peer_capabilities *peer_cap;
8970 	struct wmi_channel *chan;
8971 	struct sk_buff *skb;
8972 	u32 peer_qos;
8973 	int len, chan_len;
8974 	int i;
8975 
8976 	/* tdls peer update cmd has place holder for one channel*/
8977 	chan_len = cap->peer_chan_len ? (cap->peer_chan_len - 1) : 0;
8978 
8979 	len = sizeof(*cmd) + chan_len * sizeof(*chan);
8980 
8981 	skb = ath10k_wmi_alloc_skb(ar, len);
8982 	if (!skb)
8983 		return ERR_PTR(-ENOMEM);
8984 
8985 	cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data;
8986 	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
8987 	ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
8988 	cmd->peer_state = __cpu_to_le32(arg->peer_state);
8989 
8990 	peer_qos = ath10k_wmi_prepare_peer_qos(cap->peer_uapsd_queues,
8991 					       cap->peer_max_sp);
8992 
8993 	peer_cap = &cmd->peer_capab;
8994 	peer_cap->peer_qos = __cpu_to_le32(peer_qos);
8995 	peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
8996 	peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
8997 	peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
8998 	peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
8999 	peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
9000 	peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
9001 
9002 	for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
9003 		peer_cap->peer_operclass[i] = cap->peer_operclass[i];
9004 
9005 	peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
9006 	peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
9007 	peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
9008 
9009 	for (i = 0; i < cap->peer_chan_len; i++) {
9010 		chan = (struct wmi_channel *)&peer_cap->peer_chan_list[i];
9011 		ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]);
9012 	}
9013 
9014 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9015 		   "wmi tdls peer update vdev %i state %d n_chans %u\n",
9016 		   arg->vdev_id, arg->peer_state, cap->peer_chan_len);
9017 	return skb;
9018 }
9019 
9020 static struct sk_buff *
ath10k_wmi_10_4_gen_radar_found(struct ath10k * ar,const struct ath10k_radar_found_info * arg)9021 ath10k_wmi_10_4_gen_radar_found(struct ath10k *ar,
9022 				const struct ath10k_radar_found_info *arg)
9023 {
9024 	struct wmi_radar_found_info *cmd;
9025 	struct sk_buff *skb;
9026 
9027 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
9028 	if (!skb)
9029 		return ERR_PTR(-ENOMEM);
9030 
9031 	cmd = (struct wmi_radar_found_info *)skb->data;
9032 	cmd->pri_min   = __cpu_to_le32(arg->pri_min);
9033 	cmd->pri_max   = __cpu_to_le32(arg->pri_max);
9034 	cmd->width_min = __cpu_to_le32(arg->width_min);
9035 	cmd->width_max = __cpu_to_le32(arg->width_max);
9036 	cmd->sidx_min  = __cpu_to_le32(arg->sidx_min);
9037 	cmd->sidx_max  = __cpu_to_le32(arg->sidx_max);
9038 
9039 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9040 		   "wmi radar found pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n",
9041 		   arg->pri_min, arg->pri_max, arg->width_min,
9042 		   arg->width_max, arg->sidx_min, arg->sidx_max);
9043 	return skb;
9044 }
9045 
9046 static struct sk_buff *
ath10k_wmi_10_4_gen_per_peer_per_tid_cfg(struct ath10k * ar,const struct wmi_per_peer_per_tid_cfg_arg * arg)9047 ath10k_wmi_10_4_gen_per_peer_per_tid_cfg(struct ath10k *ar,
9048 					 const struct wmi_per_peer_per_tid_cfg_arg *arg)
9049 {
9050 	struct wmi_peer_per_tid_cfg_cmd *cmd;
9051 	struct sk_buff *skb;
9052 
9053 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
9054 	if (!skb)
9055 		return ERR_PTR(-ENOMEM);
9056 
9057 	memset(skb->data, 0, sizeof(*cmd));
9058 
9059 	cmd = (struct wmi_peer_per_tid_cfg_cmd *)skb->data;
9060 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
9061 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr.addr);
9062 	cmd->tid = cpu_to_le32(arg->tid);
9063 	cmd->ack_policy = cpu_to_le32(arg->ack_policy);
9064 	cmd->aggr_control = cpu_to_le32(arg->aggr_control);
9065 	cmd->rate_control = cpu_to_le32(arg->rate_ctrl);
9066 	cmd->retry_count = cpu_to_le32(arg->retry_count);
9067 	cmd->rcode_flags = cpu_to_le32(arg->rcode_flags);
9068 	cmd->ext_tid_cfg_bitmap = cpu_to_le32(arg->ext_tid_cfg_bitmap);
9069 	cmd->rtscts_ctrl = cpu_to_le32(arg->rtscts_ctrl);
9070 
9071 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9072 		   "wmi noack tid %d vdev id %d ack_policy %d aggr %u rate_ctrl %u rcflag %u retry_count %d rtscts %d ext_tid_cfg_bitmap %d mac_addr %pM\n",
9073 		   arg->tid, arg->vdev_id, arg->ack_policy, arg->aggr_control,
9074 		   arg->rate_ctrl, arg->rcode_flags, arg->retry_count,
9075 		   arg->rtscts_ctrl, arg->ext_tid_cfg_bitmap, arg->peer_macaddr.addr);
9076 	return skb;
9077 }
9078 
9079 static struct sk_buff *
ath10k_wmi_op_gen_echo(struct ath10k * ar,u32 value)9080 ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value)
9081 {
9082 	struct wmi_echo_cmd *cmd;
9083 	struct sk_buff *skb;
9084 
9085 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
9086 	if (!skb)
9087 		return ERR_PTR(-ENOMEM);
9088 
9089 	cmd = (struct wmi_echo_cmd *)skb->data;
9090 	cmd->value = cpu_to_le32(value);
9091 
9092 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9093 		   "wmi echo value 0x%08x\n", value);
9094 	return skb;
9095 }
9096 
9097 int
ath10k_wmi_barrier(struct ath10k * ar)9098 ath10k_wmi_barrier(struct ath10k *ar)
9099 {
9100 	int ret;
9101 	int time_left;
9102 
9103 	spin_lock_bh(&ar->data_lock);
9104 	reinit_completion(&ar->wmi.barrier);
9105 	spin_unlock_bh(&ar->data_lock);
9106 
9107 	ret = ath10k_wmi_echo(ar, ATH10K_WMI_BARRIER_ECHO_ID);
9108 	if (ret) {
9109 		ath10k_warn(ar, "failed to submit wmi echo: %d\n", ret);
9110 		return ret;
9111 	}
9112 
9113 	time_left = wait_for_completion_timeout(&ar->wmi.barrier,
9114 						ATH10K_WMI_BARRIER_TIMEOUT_HZ);
9115 	if (!time_left)
9116 		return -ETIMEDOUT;
9117 
9118 	return 0;
9119 }
9120 
9121 static struct sk_buff *
ath10k_wmi_10_2_4_op_gen_bb_timing(struct ath10k * ar,const struct wmi_bb_timing_cfg_arg * arg)9122 ath10k_wmi_10_2_4_op_gen_bb_timing(struct ath10k *ar,
9123 				   const struct wmi_bb_timing_cfg_arg *arg)
9124 {
9125 	struct wmi_pdev_bb_timing_cfg_cmd *cmd;
9126 	struct sk_buff *skb;
9127 
9128 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
9129 	if (!skb)
9130 		return ERR_PTR(-ENOMEM);
9131 
9132 	cmd = (struct wmi_pdev_bb_timing_cfg_cmd *)skb->data;
9133 	cmd->bb_tx_timing = __cpu_to_le32(arg->bb_tx_timing);
9134 	cmd->bb_xpa_timing = __cpu_to_le32(arg->bb_xpa_timing);
9135 
9136 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9137 		   "wmi pdev bb_tx_timing 0x%x bb_xpa_timing 0x%x\n",
9138 		   arg->bb_tx_timing, arg->bb_xpa_timing);
9139 	return skb;
9140 }
9141 
9142 static const struct wmi_ops wmi_ops = {
9143 	.rx = ath10k_wmi_op_rx,
9144 	.map_svc = wmi_main_svc_map,
9145 
9146 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9147 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9148 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9149 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9150 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9151 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
9152 	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9153 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9154 	.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
9155 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9156 	.pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
9157 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9158 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9159 
9160 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9161 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9162 	.gen_pdev_set_rd = ath10k_wmi_op_gen_pdev_set_rd,
9163 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9164 	.gen_init = ath10k_wmi_op_gen_init,
9165 	.gen_start_scan = ath10k_wmi_op_gen_start_scan,
9166 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9167 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9168 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9169 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9170 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9171 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9172 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9173 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9174 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9175 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9176 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9177 	/* .gen_vdev_wmm_conf not implemented */
9178 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9179 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9180 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9181 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9182 	.gen_peer_assoc = ath10k_wmi_op_gen_peer_assoc,
9183 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9184 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9185 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9186 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9187 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9188 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9189 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9190 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9191 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9192 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9193 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9194 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9195 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9196 	/* .gen_pdev_get_temperature not implemented */
9197 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9198 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9199 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9200 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9201 	.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
9202 	.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
9203 	.gen_echo = ath10k_wmi_op_gen_echo,
9204 	.gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
9205 	.gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
9206 
9207 	/* .gen_bcn_tmpl not implemented */
9208 	/* .gen_prb_tmpl not implemented */
9209 	/* .gen_p2p_go_bcn_ie not implemented */
9210 	/* .gen_adaptive_qcs not implemented */
9211 	/* .gen_pdev_enable_adaptive_cca not implemented */
9212 };
9213 
9214 static const struct wmi_ops wmi_10_1_ops = {
9215 	.rx = ath10k_wmi_10_1_op_rx,
9216 	.map_svc = wmi_10x_svc_map,
9217 	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
9218 	.pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats,
9219 	.gen_init = ath10k_wmi_10_1_op_gen_init,
9220 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9221 	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
9222 	.gen_peer_assoc = ath10k_wmi_10_1_op_gen_peer_assoc,
9223 	/* .gen_pdev_get_temperature not implemented */
9224 
9225 	/* shared with main branch */
9226 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9227 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9228 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9229 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9230 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9231 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
9232 	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9233 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9234 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9235 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9236 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9237 
9238 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9239 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9240 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9241 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9242 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9243 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9244 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9245 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9246 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9247 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9248 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9249 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9250 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9251 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9252 	/* .gen_vdev_wmm_conf not implemented */
9253 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9254 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9255 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9256 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9257 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9258 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9259 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9260 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9261 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9262 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9263 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9264 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9265 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9266 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9267 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9268 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9269 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9270 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9271 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9272 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9273 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9274 	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
9275 	.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
9276 	.gen_echo = ath10k_wmi_op_gen_echo,
9277 	.gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
9278 	.gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
9279 	/* .gen_bcn_tmpl not implemented */
9280 	/* .gen_prb_tmpl not implemented */
9281 	/* .gen_p2p_go_bcn_ie not implemented */
9282 	/* .gen_adaptive_qcs not implemented */
9283 	/* .gen_pdev_enable_adaptive_cca not implemented */
9284 };
9285 
9286 static const struct wmi_ops wmi_10_2_ops = {
9287 	.rx = ath10k_wmi_10_2_op_rx,
9288 	.pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats,
9289 	.gen_init = ath10k_wmi_10_2_op_gen_init,
9290 	.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
9291 	/* .gen_pdev_get_temperature not implemented */
9292 
9293 	/* shared with 10.1 */
9294 	.map_svc = wmi_10x_svc_map,
9295 	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
9296 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9297 	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
9298 	.gen_echo = ath10k_wmi_op_gen_echo,
9299 
9300 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9301 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9302 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9303 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9304 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9305 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
9306 	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9307 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9308 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9309 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9310 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9311 
9312 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9313 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9314 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9315 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9316 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9317 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9318 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9319 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9320 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9321 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9322 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9323 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9324 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9325 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9326 	/* .gen_vdev_wmm_conf not implemented */
9327 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9328 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9329 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9330 	.gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr,
9331 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9332 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9333 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9334 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9335 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9336 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9337 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9338 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9339 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9340 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9341 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9342 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9343 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9344 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9345 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9346 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9347 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9348 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9349 	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
9350 	.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
9351 	.gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
9352 	.gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
9353 	/* .gen_pdev_enable_adaptive_cca not implemented */
9354 };
9355 
9356 static const struct wmi_ops wmi_10_2_4_ops = {
9357 	.rx = ath10k_wmi_10_2_op_rx,
9358 	.pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats,
9359 	.gen_init = ath10k_wmi_10_2_op_gen_init,
9360 	.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
9361 	.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
9362 	.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
9363 
9364 	/* shared with 10.1 */
9365 	.map_svc = wmi_10x_svc_map,
9366 	.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
9367 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9368 	.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
9369 	.gen_echo = ath10k_wmi_op_gen_echo,
9370 
9371 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9372 	.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
9373 	.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
9374 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9375 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9376 	.pull_swba = ath10k_wmi_10_2_4_op_pull_swba_ev,
9377 	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
9378 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
9379 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9380 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9381 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9382 
9383 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9384 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9385 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9386 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9387 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9388 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9389 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9390 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9391 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9392 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9393 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9394 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9395 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9396 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9397 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9398 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9399 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9400 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9401 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9402 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9403 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9404 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9405 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9406 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9407 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9408 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9409 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9410 	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
9411 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9412 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9413 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9414 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9415 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9416 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9417 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9418 	.gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
9419 	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
9420 	.gen_pdev_enable_adaptive_cca =
9421 		ath10k_wmi_op_gen_pdev_enable_adaptive_cca,
9422 	.get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype,
9423 	.gen_bb_timing = ath10k_wmi_10_2_4_op_gen_bb_timing,
9424 	.gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
9425 	.gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
9426 	/* .gen_bcn_tmpl not implemented */
9427 	/* .gen_prb_tmpl not implemented */
9428 	/* .gen_p2p_go_bcn_ie not implemented */
9429 	/* .gen_adaptive_qcs not implemented */
9430 };
9431 
9432 static const struct wmi_ops wmi_10_4_ops = {
9433 	.rx = ath10k_wmi_10_4_op_rx,
9434 	.map_svc = wmi_10_4_svc_map,
9435 
9436 	.pull_fw_stats = ath10k_wmi_10_4_op_pull_fw_stats,
9437 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
9438 	.pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev,
9439 	.pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev,
9440 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
9441 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
9442 	.pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
9443 	.pull_phyerr_hdr = ath10k_wmi_10_4_op_pull_phyerr_ev_hdr,
9444 	.pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev,
9445 	.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
9446 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
9447 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
9448 	.pull_dfs_status_ev = ath10k_wmi_10_4_op_pull_dfs_status_ev,
9449 	.get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
9450 
9451 	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
9452 	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
9453 	.gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr,
9454 	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
9455 	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
9456 	.gen_init = ath10k_wmi_10_4_op_gen_init,
9457 	.gen_start_scan = ath10k_wmi_op_gen_start_scan,
9458 	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
9459 	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
9460 	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
9461 	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
9462 	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
9463 	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
9464 	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
9465 	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
9466 	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
9467 	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
9468 	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
9469 	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
9470 	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
9471 	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
9472 	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
9473 	.gen_peer_assoc = ath10k_wmi_10_4_op_gen_peer_assoc,
9474 	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
9475 	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
9476 	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
9477 	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
9478 	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
9479 	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
9480 	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
9481 	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
9482 	.gen_dbglog_cfg = ath10k_wmi_10_4_op_gen_dbglog_cfg,
9483 	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
9484 	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
9485 	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
9486 	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
9487 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
9488 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
9489 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
9490 	.fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
9491 	.ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
9492 	.gen_update_fw_tdls_state = ath10k_wmi_10_4_gen_update_fw_tdls_state,
9493 	.gen_tdls_peer_update = ath10k_wmi_10_4_gen_tdls_peer_update,
9494 	.gen_pdev_get_tpc_table_cmdid =
9495 			ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid,
9496 	.gen_radar_found = ath10k_wmi_10_4_gen_radar_found,
9497 	.gen_per_peer_per_tid_cfg = ath10k_wmi_10_4_gen_per_peer_per_tid_cfg,
9498 
9499 	/* shared with 10.2 */
9500 	.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
9501 	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
9502 	.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
9503 	.get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
9504 	.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
9505 	.gen_echo = ath10k_wmi_op_gen_echo,
9506 	.gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
9507 	.gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
9508 	.gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
9509 };
9510 
ath10k_wmi_attach(struct ath10k * ar)9511 int ath10k_wmi_attach(struct ath10k *ar)
9512 {
9513 	switch (ar->running_fw->fw_file.wmi_op_version) {
9514 	case ATH10K_FW_WMI_OP_VERSION_10_4:
9515 		ar->wmi.ops = &wmi_10_4_ops;
9516 		ar->wmi.cmd = &wmi_10_4_cmd_map;
9517 		ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
9518 		ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
9519 		ar->wmi.peer_param = &wmi_peer_param_map;
9520 		ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9521 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9522 		break;
9523 	case ATH10K_FW_WMI_OP_VERSION_10_2_4:
9524 		ar->wmi.cmd = &wmi_10_2_4_cmd_map;
9525 		ar->wmi.ops = &wmi_10_2_4_ops;
9526 		ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
9527 		ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
9528 		ar->wmi.peer_param = &wmi_peer_param_map;
9529 		ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9530 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9531 		break;
9532 	case ATH10K_FW_WMI_OP_VERSION_10_2:
9533 		ar->wmi.cmd = &wmi_10_2_cmd_map;
9534 		ar->wmi.ops = &wmi_10_2_ops;
9535 		ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
9536 		ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
9537 		ar->wmi.peer_param = &wmi_peer_param_map;
9538 		ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
9539 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9540 		break;
9541 	case ATH10K_FW_WMI_OP_VERSION_10_1:
9542 		ar->wmi.cmd = &wmi_10x_cmd_map;
9543 		ar->wmi.ops = &wmi_10_1_ops;
9544 		ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
9545 		ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
9546 		ar->wmi.peer_param = &wmi_peer_param_map;
9547 		ar->wmi.peer_flags = &wmi_10x_peer_flags_map;
9548 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9549 		break;
9550 	case ATH10K_FW_WMI_OP_VERSION_MAIN:
9551 		ar->wmi.cmd = &wmi_cmd_map;
9552 		ar->wmi.ops = &wmi_ops;
9553 		ar->wmi.vdev_param = &wmi_vdev_param_map;
9554 		ar->wmi.pdev_param = &wmi_pdev_param_map;
9555 		ar->wmi.peer_param = &wmi_peer_param_map;
9556 		ar->wmi.peer_flags = &wmi_peer_flags_map;
9557 		ar->wmi_key_cipher = wmi_key_cipher_suites;
9558 		break;
9559 	case ATH10K_FW_WMI_OP_VERSION_TLV:
9560 		ath10k_wmi_tlv_attach(ar);
9561 		ar->wmi_key_cipher = wmi_tlv_key_cipher_suites;
9562 		break;
9563 	case ATH10K_FW_WMI_OP_VERSION_UNSET:
9564 	case ATH10K_FW_WMI_OP_VERSION_MAX:
9565 		ath10k_err(ar, "unsupported WMI op version: %d\n",
9566 			   ar->running_fw->fw_file.wmi_op_version);
9567 		return -EINVAL;
9568 	}
9569 
9570 	init_completion(&ar->wmi.service_ready);
9571 	init_completion(&ar->wmi.unified_ready);
9572 	init_completion(&ar->wmi.barrier);
9573 	init_completion(&ar->wmi.radar_confirm);
9574 
9575 	INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
9576 	INIT_WORK(&ar->radar_confirmation_work,
9577 		  ath10k_radar_confirmation_work);
9578 
9579 	if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
9580 		     ar->running_fw->fw_file.fw_features)) {
9581 		idr_init(&ar->wmi.mgmt_pending_tx);
9582 	}
9583 
9584 	return 0;
9585 }
9586 
ath10k_wmi_free_host_mem(struct ath10k * ar)9587 void ath10k_wmi_free_host_mem(struct ath10k *ar)
9588 {
9589 	int i;
9590 
9591 	/* free the host memory chunks requested by firmware */
9592 	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
9593 		dma_free_coherent(ar->dev,
9594 				  ar->wmi.mem_chunks[i].len,
9595 				  ar->wmi.mem_chunks[i].vaddr,
9596 				  ar->wmi.mem_chunks[i].paddr);
9597 	}
9598 
9599 	ar->wmi.num_mem_chunks = 0;
9600 }
9601 
ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id,void * ptr,void * ctx)9602 static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
9603 					       void *ctx)
9604 {
9605 	struct ath10k_mgmt_tx_pkt_addr *pkt_addr = ptr;
9606 	struct ath10k *ar = ctx;
9607 	struct sk_buff *msdu;
9608 
9609 	ath10k_dbg(ar, ATH10K_DBG_WMI,
9610 		   "force cleanup mgmt msdu_id %u\n", msdu_id);
9611 
9612 	msdu = pkt_addr->vaddr;
9613 	dma_unmap_single(ar->dev, pkt_addr->paddr,
9614 			 msdu->len, DMA_TO_DEVICE);
9615 	ieee80211_free_txskb(ar->hw, msdu);
9616 	kfree(pkt_addr);
9617 
9618 	return 0;
9619 }
9620 
ath10k_wmi_detach(struct ath10k * ar)9621 void ath10k_wmi_detach(struct ath10k *ar)
9622 {
9623 	if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
9624 		     ar->running_fw->fw_file.fw_features)) {
9625 		spin_lock_bh(&ar->data_lock);
9626 		idr_for_each(&ar->wmi.mgmt_pending_tx,
9627 			     ath10k_wmi_mgmt_tx_clean_up_pending, ar);
9628 		idr_destroy(&ar->wmi.mgmt_pending_tx);
9629 		spin_unlock_bh(&ar->data_lock);
9630 	}
9631 
9632 	cancel_work_sync(&ar->svc_rdy_work);
9633 	dev_kfree_skb(ar->svc_rdy_skb);
9634 }
9635