1  // SPDX-License-Identifier: BSD-3-Clause-Clear
2  /*
3   * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4   * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5   */
6  #include <linux/skbuff.h>
7  #include <linux/ctype.h>
8  #include <net/mac80211.h>
9  #include <net/cfg80211.h>
10  #include <linux/completion.h>
11  #include <linux/if_ether.h>
12  #include <linux/types.h>
13  #include <linux/pci.h>
14  #include <linux/uuid.h>
15  #include <linux/time.h>
16  #include <linux/of.h>
17  #include "core.h"
18  #include "debug.h"
19  #include "mac.h"
20  #include "hw.h"
21  #include "peer.h"
22  #include "p2p.h"
23  
24  struct ath12k_wmi_svc_ready_parse {
25  	bool wmi_svc_bitmap_done;
26  };
27  
28  struct ath12k_wmi_dma_ring_caps_parse {
29  	struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
30  	u32 n_dma_ring_caps;
31  };
32  
33  struct ath12k_wmi_service_ext_arg {
34  	u32 default_conc_scan_config_bits;
35  	u32 default_fw_config_bits;
36  	struct ath12k_wmi_ppe_threshold_arg ppet;
37  	u32 he_cap_info;
38  	u32 mpdu_density;
39  	u32 max_bssid_rx_filters;
40  	u32 num_hw_modes;
41  	u32 num_phy;
42  };
43  
44  struct ath12k_wmi_svc_rdy_ext_parse {
45  	struct ath12k_wmi_service_ext_arg arg;
46  	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps;
47  	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
48  	u32 n_hw_mode_caps;
49  	u32 tot_phy_id;
50  	struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps;
51  	struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps;
52  	u32 n_mac_phy_caps;
53  	const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps;
54  	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps;
55  	u32 n_ext_hal_reg_caps;
56  	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
57  	bool hw_mode_done;
58  	bool mac_phy_done;
59  	bool ext_hal_reg_done;
60  	bool mac_phy_chainmask_combo_done;
61  	bool mac_phy_chainmask_cap_done;
62  	bool oem_dma_ring_cap_done;
63  	bool dma_ring_cap_done;
64  };
65  
66  struct ath12k_wmi_svc_rdy_ext2_arg {
67  	u32 reg_db_version;
68  	u32 hw_min_max_tx_power_2ghz;
69  	u32 hw_min_max_tx_power_5ghz;
70  	u32 chwidth_num_peer_caps;
71  	u32 preamble_puncture_bw;
72  	u32 max_user_per_ppdu_ofdma;
73  	u32 max_user_per_ppdu_mumimo;
74  	u32 target_cap_flags;
75  	u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE];
76  	u32 max_num_linkview_peers;
77  	u32 max_num_msduq_supported_per_tid;
78  	u32 default_num_msduq_supported_per_tid;
79  };
80  
81  struct ath12k_wmi_svc_rdy_ext2_parse {
82  	struct ath12k_wmi_svc_rdy_ext2_arg arg;
83  	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
84  	bool dma_ring_cap_done;
85  	bool spectral_bin_scaling_done;
86  	bool mac_phy_caps_ext_done;
87  };
88  
89  struct ath12k_wmi_rdy_parse {
90  	u32 num_extra_mac_addr;
91  };
92  
93  struct ath12k_wmi_dma_buf_release_arg {
94  	struct ath12k_wmi_dma_buf_release_fixed_params fixed;
95  	const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry;
96  	const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data;
97  	u32 num_buf_entry;
98  	u32 num_meta;
99  	bool buf_entry_done;
100  	bool meta_data_done;
101  };
102  
103  struct ath12k_wmi_tlv_policy {
104  	size_t min_len;
105  };
106  
107  struct wmi_tlv_mgmt_rx_parse {
108  	const struct ath12k_wmi_mgmt_rx_params *fixed;
109  	const u8 *frame_buf;
110  	bool frame_buf_done;
111  };
112  
113  static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
114  	[WMI_TAG_ARRAY_BYTE] = { .min_len = 0 },
115  	[WMI_TAG_ARRAY_UINT32] = { .min_len = 0 },
116  	[WMI_TAG_SERVICE_READY_EVENT] = {
117  		.min_len = sizeof(struct wmi_service_ready_event) },
118  	[WMI_TAG_SERVICE_READY_EXT_EVENT] = {
119  		.min_len = sizeof(struct wmi_service_ready_ext_event) },
120  	[WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = {
121  		.min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) },
122  	[WMI_TAG_SOC_HAL_REG_CAPABILITIES] = {
123  		.min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) },
124  	[WMI_TAG_VDEV_START_RESPONSE_EVENT] = {
125  		.min_len = sizeof(struct wmi_vdev_start_resp_event) },
126  	[WMI_TAG_PEER_DELETE_RESP_EVENT] = {
127  		.min_len = sizeof(struct wmi_peer_delete_resp_event) },
128  	[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = {
129  		.min_len = sizeof(struct wmi_bcn_tx_status_event) },
130  	[WMI_TAG_VDEV_STOPPED_EVENT] = {
131  		.min_len = sizeof(struct wmi_vdev_stopped_event) },
132  	[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = {
133  		.min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
134  	[WMI_TAG_MGMT_RX_HDR] = {
135  		.min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) },
136  	[WMI_TAG_MGMT_TX_COMPL_EVENT] = {
137  		.min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
138  	[WMI_TAG_SCAN_EVENT] = {
139  		.min_len = sizeof(struct wmi_scan_event) },
140  	[WMI_TAG_PEER_STA_KICKOUT_EVENT] = {
141  		.min_len = sizeof(struct wmi_peer_sta_kickout_event) },
142  	[WMI_TAG_ROAM_EVENT] = {
143  		.min_len = sizeof(struct wmi_roam_event) },
144  	[WMI_TAG_CHAN_INFO_EVENT] = {
145  		.min_len = sizeof(struct wmi_chan_info_event) },
146  	[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = {
147  		.min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
148  	[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = {
149  		.min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
150  	[WMI_TAG_READY_EVENT] = {
151  		.min_len = sizeof(struct ath12k_wmi_ready_event_min_params) },
152  	[WMI_TAG_SERVICE_AVAILABLE_EVENT] = {
153  		.min_len = sizeof(struct wmi_service_available_event) },
154  	[WMI_TAG_PEER_ASSOC_CONF_EVENT] = {
155  		.min_len = sizeof(struct wmi_peer_assoc_conf_event) },
156  	[WMI_TAG_RFKILL_EVENT] = {
157  		.min_len = sizeof(struct wmi_rfkill_state_change_event) },
158  	[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = {
159  		.min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
160  	[WMI_TAG_HOST_SWFDA_EVENT] = {
161  		.min_len = sizeof(struct wmi_fils_discovery_event) },
162  	[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
163  		.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
164  	[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
165  		.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
166  	[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = {
167  		.min_len = sizeof(struct wmi_twt_enable_event) },
168  	[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = {
169  		.min_len = sizeof(struct wmi_twt_disable_event) },
170  	[WMI_TAG_P2P_NOA_INFO] = {
171  		.min_len = sizeof(struct ath12k_wmi_p2p_noa_info) },
172  	[WMI_TAG_P2P_NOA_EVENT] = {
173  		.min_len = sizeof(struct wmi_p2p_noa_event) },
174  };
175  
ath12k_wmi_tlv_hdr(u32 cmd,u32 len)176  static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
177  {
178  	return le32_encode_bits(cmd, WMI_TLV_TAG) |
179  		le32_encode_bits(len, WMI_TLV_LEN);
180  }
181  
ath12k_wmi_tlv_cmd_hdr(u32 cmd,u32 len)182  static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len)
183  {
184  	return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE);
185  }
186  
ath12k_wmi_init_qcn9274(struct ath12k_base * ab,struct ath12k_wmi_resource_config_arg * config)187  void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
188  			     struct ath12k_wmi_resource_config_arg *config)
189  {
190  	config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
191  	config->num_peers = ab->num_radios *
192  		ath12k_core_get_max_peers_per_radio(ab);
193  	config->num_tids = ath12k_core_get_max_num_tids(ab);
194  	config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
195  	config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
196  	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
197  	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
198  	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
199  	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
200  	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
201  	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
202  	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
203  	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
204  
205  	if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
206  		config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
207  	else
208  		config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
209  
210  	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
211  	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
212  	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
213  	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
214  	config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
215  	config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
216  	config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
217  	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
218  	config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
219  	config->dma_burst_size = TARGET_DMA_BURST_SIZE;
220  	config->rx_skip_defrag_timeout_dup_detection_check =
221  		TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
222  	config->vow_config = TARGET_VOW_CONFIG;
223  	config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
224  	config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
225  	config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
226  	config->rx_batchmode = TARGET_RX_BATCHMODE;
227  	/* Indicates host supports peer map v3 and unmap v2 support */
228  	config->peer_map_unmap_version = 0x32;
229  	config->twt_ap_pdev_count = ab->num_radios;
230  	config->twt_ap_sta_count = 1000;
231  	config->ema_max_vap_cnt = ab->num_radios;
232  	config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
233  	config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
234  
235  	if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
236  		config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B;
237  }
238  
ath12k_wmi_init_wcn7850(struct ath12k_base * ab,struct ath12k_wmi_resource_config_arg * config)239  void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
240  			     struct ath12k_wmi_resource_config_arg *config)
241  {
242  	config->num_vdevs = 4;
243  	config->num_peers = 16;
244  	config->num_tids = 32;
245  
246  	config->num_offload_peers = 3;
247  	config->num_offload_reorder_buffs = 3;
248  	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
249  	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
250  	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
251  	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
252  	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
253  	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
254  	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
255  	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
256  	config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
257  	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
258  	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
259  	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
260  	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
261  	config->num_mcast_groups = 0;
262  	config->num_mcast_table_elems = 0;
263  	config->mcast2ucast_mode = 0;
264  	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
265  	config->num_wds_entries = 0;
266  	config->dma_burst_size = 0;
267  	config->rx_skip_defrag_timeout_dup_detection_check = 0;
268  	config->vow_config = TARGET_VOW_CONFIG;
269  	config->gtk_offload_max_vdev = 2;
270  	config->num_msdu_desc = 0x400;
271  	config->beacon_tx_offload_max_vdev = 2;
272  	config->rx_batchmode = TARGET_RX_BATCHMODE;
273  
274  	config->peer_map_unmap_version = 0x1;
275  	config->use_pdev_id = 1;
276  	config->max_frag_entries = 0xa;
277  	config->num_tdls_vdevs = 0x1;
278  	config->num_tdls_conn_table_entries = 8;
279  	config->beacon_tx_offload_max_vdev = 0x2;
280  	config->num_multicast_filter_entries = 0x20;
281  	config->num_wow_filters = 0x16;
282  	config->num_keep_alive_pattern = 0;
283  }
284  
285  #define PRIMAP(_hw_mode_) \
286  	[_hw_mode_] = _hw_mode_##_PRI
287  
288  static const int ath12k_hw_mode_pri_map[] = {
289  	PRIMAP(WMI_HOST_HW_MODE_SINGLE),
290  	PRIMAP(WMI_HOST_HW_MODE_DBS),
291  	PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
292  	PRIMAP(WMI_HOST_HW_MODE_SBS),
293  	PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
294  	PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
295  	/* keep last */
296  	PRIMAP(WMI_HOST_HW_MODE_MAX),
297  };
298  
299  static int
ath12k_wmi_tlv_iter(struct ath12k_base * ab,const void * ptr,size_t len,int (* iter)(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data),void * data)300  ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
301  		    int (*iter)(struct ath12k_base *ab, u16 tag, u16 len,
302  				const void *ptr, void *data),
303  		    void *data)
304  {
305  	const void *begin = ptr;
306  	const struct wmi_tlv *tlv;
307  	u16 tlv_tag, tlv_len;
308  	int ret;
309  
310  	while (len > 0) {
311  		if (len < sizeof(*tlv)) {
312  			ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
313  				   ptr - begin, len, sizeof(*tlv));
314  			return -EINVAL;
315  		}
316  
317  		tlv = ptr;
318  		tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
319  		tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN);
320  		ptr += sizeof(*tlv);
321  		len -= sizeof(*tlv);
322  
323  		if (tlv_len > len) {
324  			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
325  				   tlv_tag, ptr - begin, len, tlv_len);
326  			return -EINVAL;
327  		}
328  
329  		if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) &&
330  		    ath12k_wmi_tlv_policies[tlv_tag].min_len &&
331  		    ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
332  			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
333  				   tlv_tag, ptr - begin, tlv_len,
334  				   ath12k_wmi_tlv_policies[tlv_tag].min_len);
335  			return -EINVAL;
336  		}
337  
338  		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
339  		if (ret)
340  			return ret;
341  
342  		ptr += tlv_len;
343  		len -= tlv_len;
344  	}
345  
346  	return 0;
347  }
348  
ath12k_wmi_tlv_iter_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)349  static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len,
350  				     const void *ptr, void *data)
351  {
352  	const void **tb = data;
353  
354  	if (tag < WMI_TAG_MAX)
355  		tb[tag] = ptr;
356  
357  	return 0;
358  }
359  
ath12k_wmi_tlv_parse(struct ath12k_base * ar,const void ** tb,const void * ptr,size_t len)360  static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
361  				const void *ptr, size_t len)
362  {
363  	return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse,
364  				   (void *)tb);
365  }
366  
367  static const void **
ath12k_wmi_tlv_parse_alloc(struct ath12k_base * ab,struct sk_buff * skb,gfp_t gfp)368  ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab,
369  			   struct sk_buff *skb, gfp_t gfp)
370  {
371  	const void **tb;
372  	int ret;
373  
374  	tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
375  	if (!tb)
376  		return ERR_PTR(-ENOMEM);
377  
378  	ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
379  	if (ret) {
380  		kfree(tb);
381  		return ERR_PTR(ret);
382  	}
383  
384  	return tb;
385  }
386  
ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev * wmi,struct sk_buff * skb,u32 cmd_id)387  static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
388  				      u32 cmd_id)
389  {
390  	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
391  	struct ath12k_base *ab = wmi->wmi_ab->ab;
392  	struct wmi_cmd_hdr *cmd_hdr;
393  	int ret;
394  
395  	if (!skb_push(skb, sizeof(struct wmi_cmd_hdr)))
396  		return -ENOMEM;
397  
398  	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
399  	cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID);
400  
401  	memset(skb_cb, 0, sizeof(*skb_cb));
402  	ret = ath12k_htc_send(&ab->htc, wmi->eid, skb);
403  
404  	if (ret)
405  		goto err_pull;
406  
407  	return 0;
408  
409  err_pull:
410  	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
411  	return ret;
412  }
413  
ath12k_wmi_cmd_send(struct ath12k_wmi_pdev * wmi,struct sk_buff * skb,u32 cmd_id)414  int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
415  			u32 cmd_id)
416  {
417  	struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab;
418  	int ret = -EOPNOTSUPP;
419  
420  	might_sleep();
421  
422  	wait_event_timeout(wmi_ab->tx_credits_wq, ({
423  		ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
424  
425  		if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags))
426  			ret = -ESHUTDOWN;
427  
428  		(ret != -EAGAIN);
429  	}), WMI_SEND_TIMEOUT_HZ);
430  
431  	if (ret == -EAGAIN)
432  		ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id);
433  
434  	return ret;
435  }
436  
ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev * wmi_handle,const void * ptr,struct ath12k_wmi_service_ext_arg * arg)437  static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
438  				     const void *ptr,
439  				     struct ath12k_wmi_service_ext_arg *arg)
440  {
441  	const struct wmi_service_ready_ext_event *ev = ptr;
442  	int i;
443  
444  	if (!ev)
445  		return -EINVAL;
446  
447  	/* Move this to host based bitmap */
448  	arg->default_conc_scan_config_bits =
449  		le32_to_cpu(ev->default_conc_scan_config_bits);
450  	arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits);
451  	arg->he_cap_info = le32_to_cpu(ev->he_cap_info);
452  	arg->mpdu_density = le32_to_cpu(ev->mpdu_density);
453  	arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters);
454  	arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1);
455  	arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info);
456  
457  	for (i = 0; i < WMI_MAX_NUM_SS; i++)
458  		arg->ppet.ppet16_ppet8_ru3_ru0[i] =
459  			le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]);
460  
461  	return 0;
462  }
463  
464  static int
ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev * wmi_handle,struct ath12k_wmi_svc_rdy_ext_parse * svc,u8 hw_mode_id,u8 phy_id,struct ath12k_pdev * pdev)465  ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
466  				      struct ath12k_wmi_svc_rdy_ext_parse *svc,
467  				      u8 hw_mode_id, u8 phy_id,
468  				      struct ath12k_pdev *pdev)
469  {
470  	const struct ath12k_wmi_mac_phy_caps_params *mac_caps;
471  	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps;
472  	const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps;
473  	const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps;
474  	struct ath12k_base *ab = wmi_handle->wmi_ab->ab;
475  	struct ath12k_band_cap *cap_band;
476  	struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
477  	struct ath12k_fw_pdev *fw_pdev;
478  	u32 phy_map;
479  	u32 hw_idx, phy_idx = 0;
480  	int i;
481  
482  	if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps)
483  		return -EINVAL;
484  
485  	for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) {
486  		if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id))
487  			break;
488  
489  		phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map);
490  		phy_idx = fls(phy_map);
491  	}
492  
493  	if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes))
494  		return -EINVAL;
495  
496  	phy_idx += phy_id;
497  	if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy))
498  		return -EINVAL;
499  
500  	mac_caps = wmi_mac_phy_caps + phy_idx;
501  
502  	pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
503  	pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps);
504  	pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
505  	pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
506  
507  	fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
508  	fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
509  	fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
510  	fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
511  	ab->fw_pdev_count++;
512  
513  	/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
514  	 * band to band for a single radio, need to see how this should be
515  	 * handled.
516  	 */
517  	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
518  		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
519  		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
520  	} else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
521  		pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
522  		pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
523  		pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
524  		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
525  		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
526  	} else {
527  		return -EINVAL;
528  	}
529  
530  	/* tx/rx chainmask reported from fw depends on the actual hw chains used,
531  	 * For example, for 4x4 capable macphys, first 4 chains can be used for first
532  	 * mac and the remaining 4 chains can be used for the second mac or vice-versa.
533  	 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
534  	 * will be advertised for second mac or vice-versa. Compute the shift value
535  	 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
536  	 * mac80211.
537  	 */
538  	pdev_cap->tx_chain_mask_shift =
539  			find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
540  	pdev_cap->rx_chain_mask_shift =
541  			find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
542  
543  	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
544  		cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
545  		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
546  		cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
547  		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g);
548  		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g);
549  		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext);
550  		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g);
551  		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
552  			cap_band->he_cap_phy_info[i] =
553  				le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]);
554  
555  		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1);
556  		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info);
557  
558  		for (i = 0; i < WMI_MAX_NUM_SS; i++)
559  			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
560  				le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
561  	}
562  
563  	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
564  		cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
565  		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
566  		cap_band->max_bw_supported =
567  			le32_to_cpu(mac_caps->max_bw_supported_5g);
568  		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
569  		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
570  		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
571  		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
572  		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
573  			cap_band->he_cap_phy_info[i] =
574  				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
575  
576  		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
577  		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
578  
579  		for (i = 0; i < WMI_MAX_NUM_SS; i++)
580  			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
581  				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
582  
583  		cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
584  		cap_band->max_bw_supported =
585  			le32_to_cpu(mac_caps->max_bw_supported_5g);
586  		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
587  		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
588  		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
589  		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
590  		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
591  			cap_band->he_cap_phy_info[i] =
592  				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
593  
594  		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
595  		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
596  
597  		for (i = 0; i < WMI_MAX_NUM_SS; i++)
598  			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
599  				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
600  	}
601  
602  	return 0;
603  }
604  
605  static int
ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev * wmi_handle,const struct ath12k_wmi_soc_hal_reg_caps_params * reg_caps,const struct ath12k_wmi_hal_reg_caps_ext_params * ext_caps,u8 phy_idx,struct ath12k_wmi_hal_reg_capabilities_ext_arg * param)606  ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle,
607  				const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps,
608  				const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps,
609  				u8 phy_idx,
610  				struct ath12k_wmi_hal_reg_capabilities_ext_arg *param)
611  {
612  	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap;
613  
614  	if (!reg_caps || !ext_caps)
615  		return -EINVAL;
616  
617  	if (phy_idx >= le32_to_cpu(reg_caps->num_phy))
618  		return -EINVAL;
619  
620  	ext_reg_cap = &ext_caps[phy_idx];
621  
622  	param->phy_id = le32_to_cpu(ext_reg_cap->phy_id);
623  	param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain);
624  	param->eeprom_reg_domain_ext =
625  		le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext);
626  	param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1);
627  	param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2);
628  	/* check if param->wireless_mode is needed */
629  	param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan);
630  	param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan);
631  	param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan);
632  	param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan);
633  
634  	return 0;
635  }
636  
ath12k_pull_service_ready_tlv(struct ath12k_base * ab,const void * evt_buf,struct ath12k_wmi_target_cap_arg * cap)637  static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab,
638  					 const void *evt_buf,
639  					 struct ath12k_wmi_target_cap_arg *cap)
640  {
641  	const struct wmi_service_ready_event *ev = evt_buf;
642  
643  	if (!ev) {
644  		ath12k_err(ab, "%s: failed by NULL param\n",
645  			   __func__);
646  		return -EINVAL;
647  	}
648  
649  	cap->phy_capability = le32_to_cpu(ev->phy_capability);
650  	cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry);
651  	cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains);
652  	cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info);
653  	cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info);
654  	cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs);
655  	cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power);
656  	cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power);
657  	cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info);
658  	cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable);
659  	cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size);
660  	cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels);
661  	cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs);
662  	cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps);
663  	cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask);
664  	cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index);
665  	cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc);
666  
667  	return 0;
668  }
669  
670  /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
671   * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
672   * 4-byte word.
673   */
ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev * wmi,const u32 * wmi_svc_bm)674  static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi,
675  					   const u32 *wmi_svc_bm)
676  {
677  	int i, j;
678  
679  	for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
680  		do {
681  			if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
682  				set_bit(j, wmi->wmi_ab->svc_map);
683  		} while (++j % WMI_SERVICE_BITS_IN_SIZE32);
684  	}
685  }
686  
ath12k_wmi_svc_rdy_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)687  static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
688  				    const void *ptr, void *data)
689  {
690  	struct ath12k_wmi_svc_ready_parse *svc_ready = data;
691  	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
692  	u16 expect_len;
693  
694  	switch (tag) {
695  	case WMI_TAG_SERVICE_READY_EVENT:
696  		if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
697  			return -EINVAL;
698  		break;
699  
700  	case WMI_TAG_ARRAY_UINT32:
701  		if (!svc_ready->wmi_svc_bitmap_done) {
702  			expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
703  			if (len < expect_len) {
704  				ath12k_warn(ab, "invalid len %d for the tag 0x%x\n",
705  					    len, tag);
706  				return -EINVAL;
707  			}
708  
709  			ath12k_wmi_service_bitmap_copy(wmi_handle, ptr);
710  
711  			svc_ready->wmi_svc_bitmap_done = true;
712  		}
713  		break;
714  	default:
715  		break;
716  	}
717  
718  	return 0;
719  }
720  
ath12k_service_ready_event(struct ath12k_base * ab,struct sk_buff * skb)721  static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
722  {
723  	struct ath12k_wmi_svc_ready_parse svc_ready = { };
724  	int ret;
725  
726  	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
727  				  ath12k_wmi_svc_rdy_parse,
728  				  &svc_ready);
729  	if (ret) {
730  		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
731  		return ret;
732  	}
733  
734  	return 0;
735  }
736  
ath12k_wmi_mgmt_get_freq(struct ath12k * ar,struct ieee80211_tx_info * info)737  static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar,
738  				    struct ieee80211_tx_info *info)
739  {
740  	struct ath12k_base *ab = ar->ab;
741  	u32 freq = 0;
742  
743  	if (ab->hw_params->single_pdev_only &&
744  	    ar->scan.is_roc &&
745  	    (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
746  		freq = ar->scan.roc_freq;
747  
748  	return freq;
749  }
750  
ath12k_wmi_alloc_skb(struct ath12k_wmi_base * wmi_ab,u32 len)751  struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
752  {
753  	struct sk_buff *skb;
754  	struct ath12k_base *ab = wmi_ab->ab;
755  	u32 round_len = roundup(len, 4);
756  
757  	skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
758  	if (!skb)
759  		return NULL;
760  
761  	skb_reserve(skb, WMI_SKB_HEADROOM);
762  	if (!IS_ALIGNED((unsigned long)skb->data, 4))
763  		ath12k_warn(ab, "unaligned WMI skb data\n");
764  
765  	skb_put(skb, round_len);
766  	memset(skb->data, 0, round_len);
767  
768  	return skb;
769  }
770  
ath12k_wmi_mgmt_send(struct ath12k * ar,u32 vdev_id,u32 buf_id,struct sk_buff * frame)771  int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
772  			 struct sk_buff *frame)
773  {
774  	struct ath12k_wmi_pdev *wmi = ar->wmi;
775  	struct wmi_mgmt_send_cmd *cmd;
776  	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
777  	struct wmi_tlv *frame_tlv;
778  	struct sk_buff *skb;
779  	u32 buf_len;
780  	int ret, len;
781  
782  	buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);
783  
784  	len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
785  
786  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
787  	if (!skb)
788  		return -ENOMEM;
789  
790  	cmd = (struct wmi_mgmt_send_cmd *)skb->data;
791  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD,
792  						 sizeof(*cmd));
793  	cmd->vdev_id = cpu_to_le32(vdev_id);
794  	cmd->desc_id = cpu_to_le32(buf_id);
795  	cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info));
796  	cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
797  	cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
798  	cmd->frame_len = cpu_to_le32(frame->len);
799  	cmd->buf_len = cpu_to_le32(buf_len);
800  	cmd->tx_params_valid = 0;
801  
802  	frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
803  	frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len);
804  
805  	memcpy(frame_tlv->value, frame->data, buf_len);
806  
807  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
808  	if (ret) {
809  		ath12k_warn(ar->ab,
810  			    "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
811  		dev_kfree_skb(skb);
812  	}
813  
814  	return ret;
815  }
816  
ath12k_wmi_vdev_create(struct ath12k * ar,u8 * macaddr,struct ath12k_wmi_vdev_create_arg * args)817  int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
818  			   struct ath12k_wmi_vdev_create_arg *args)
819  {
820  	struct ath12k_wmi_pdev *wmi = ar->wmi;
821  	struct wmi_vdev_create_cmd *cmd;
822  	struct sk_buff *skb;
823  	struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
824  	struct wmi_tlv *tlv;
825  	int ret, len;
826  	void *ptr;
827  
828  	/* It can be optimized my sending tx/rx chain configuration
829  	 * only for supported bands instead of always sending it for
830  	 * both the bands.
831  	 */
832  	len = sizeof(*cmd) + TLV_HDR_SIZE +
833  		(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
834  
835  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
836  	if (!skb)
837  		return -ENOMEM;
838  
839  	cmd = (struct wmi_vdev_create_cmd *)skb->data;
840  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD,
841  						 sizeof(*cmd));
842  
843  	cmd->vdev_id = cpu_to_le32(args->if_id);
844  	cmd->vdev_type = cpu_to_le32(args->type);
845  	cmd->vdev_subtype = cpu_to_le32(args->subtype);
846  	cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
847  	cmd->pdev_id = cpu_to_le32(args->pdev_id);
848  	cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags);
849  	cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id);
850  	cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
851  	ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
852  
853  	if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID)
854  		cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0));
855  
856  	ptr = skb->data + sizeof(*cmd);
857  	len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
858  
859  	tlv = ptr;
860  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
861  
862  	ptr += TLV_HDR_SIZE;
863  	txrx_streams = ptr;
864  	len = sizeof(*txrx_streams);
865  	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
866  							  len);
867  	txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G);
868  	txrx_streams->supported_tx_streams =
869  				cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx);
870  	txrx_streams->supported_rx_streams =
871  				cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx);
872  
873  	txrx_streams++;
874  	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
875  							  len);
876  	txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G);
877  	txrx_streams->supported_tx_streams =
878  				cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx);
879  	txrx_streams->supported_rx_streams =
880  				cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);
881  
882  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
883  		   "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
884  		   args->if_id, args->type, args->subtype,
885  		   macaddr, args->pdev_id);
886  
887  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
888  	if (ret) {
889  		ath12k_warn(ar->ab,
890  			    "failed to submit WMI_VDEV_CREATE_CMDID\n");
891  		dev_kfree_skb(skb);
892  	}
893  
894  	return ret;
895  }
896  
ath12k_wmi_vdev_delete(struct ath12k * ar,u8 vdev_id)897  int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id)
898  {
899  	struct ath12k_wmi_pdev *wmi = ar->wmi;
900  	struct wmi_vdev_delete_cmd *cmd;
901  	struct sk_buff *skb;
902  	int ret;
903  
904  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
905  	if (!skb)
906  		return -ENOMEM;
907  
908  	cmd = (struct wmi_vdev_delete_cmd *)skb->data;
909  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD,
910  						 sizeof(*cmd));
911  	cmd->vdev_id = cpu_to_le32(vdev_id);
912  
913  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
914  
915  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
916  	if (ret) {
917  		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
918  		dev_kfree_skb(skb);
919  	}
920  
921  	return ret;
922  }
923  
ath12k_wmi_vdev_stop(struct ath12k * ar,u8 vdev_id)924  int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id)
925  {
926  	struct ath12k_wmi_pdev *wmi = ar->wmi;
927  	struct wmi_vdev_stop_cmd *cmd;
928  	struct sk_buff *skb;
929  	int ret;
930  
931  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
932  	if (!skb)
933  		return -ENOMEM;
934  
935  	cmd = (struct wmi_vdev_stop_cmd *)skb->data;
936  
937  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD,
938  						 sizeof(*cmd));
939  	cmd->vdev_id = cpu_to_le32(vdev_id);
940  
941  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
942  
943  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
944  	if (ret) {
945  		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
946  		dev_kfree_skb(skb);
947  	}
948  
949  	return ret;
950  }
951  
ath12k_wmi_vdev_down(struct ath12k * ar,u8 vdev_id)952  int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
953  {
954  	struct ath12k_wmi_pdev *wmi = ar->wmi;
955  	struct wmi_vdev_down_cmd *cmd;
956  	struct sk_buff *skb;
957  	int ret;
958  
959  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
960  	if (!skb)
961  		return -ENOMEM;
962  
963  	cmd = (struct wmi_vdev_down_cmd *)skb->data;
964  
965  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD,
966  						 sizeof(*cmd));
967  	cmd->vdev_id = cpu_to_le32(vdev_id);
968  
969  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
970  
971  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
972  	if (ret) {
973  		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
974  		dev_kfree_skb(skb);
975  	}
976  
977  	return ret;
978  }
979  
ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params * chan,struct wmi_vdev_start_req_arg * arg)980  static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
981  				       struct wmi_vdev_start_req_arg *arg)
982  {
983  	memset(chan, 0, sizeof(*chan));
984  
985  	chan->mhz = cpu_to_le32(arg->freq);
986  	chan->band_center_freq1 = cpu_to_le32(arg->band_center_freq1);
987  	if (arg->mode == MODE_11AC_VHT80_80)
988  		chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2);
989  	else
990  		chan->band_center_freq2 = 0;
991  
992  	chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
993  	if (arg->passive)
994  		chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
995  	if (arg->allow_ibss)
996  		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED);
997  	if (arg->allow_ht)
998  		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
999  	if (arg->allow_vht)
1000  		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
1001  	if (arg->allow_he)
1002  		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
1003  	if (arg->ht40plus)
1004  		chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS);
1005  	if (arg->chan_radar)
1006  		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
1007  	if (arg->freq2_radar)
1008  		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2);
1009  
1010  	chan->reg_info_1 = le32_encode_bits(arg->max_power,
1011  					    WMI_CHAN_REG_INFO1_MAX_PWR) |
1012  		le32_encode_bits(arg->max_reg_power,
1013  				 WMI_CHAN_REG_INFO1_MAX_REG_PWR);
1014  
1015  	chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain,
1016  					    WMI_CHAN_REG_INFO2_ANT_MAX) |
1017  		le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR);
1018  }
1019  
ath12k_wmi_vdev_start(struct ath12k * ar,struct wmi_vdev_start_req_arg * arg,bool restart)1020  int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
1021  			  bool restart)
1022  {
1023  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1024  	struct wmi_vdev_start_request_cmd *cmd;
1025  	struct sk_buff *skb;
1026  	struct ath12k_wmi_channel_params *chan;
1027  	struct wmi_tlv *tlv;
1028  	void *ptr;
1029  	int ret, len;
1030  
1031  	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1032  		return -EINVAL;
1033  
1034  	len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
1035  
1036  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1037  	if (!skb)
1038  		return -ENOMEM;
1039  
1040  	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
1041  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD,
1042  						 sizeof(*cmd));
1043  	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1044  	cmd->beacon_interval = cpu_to_le32(arg->bcn_intval);
1045  	cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate);
1046  	cmd->dtim_period = cpu_to_le32(arg->dtim_period);
1047  	cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors);
1048  	cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams);
1049  	cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams);
1050  	cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms);
1051  	cmd->regdomain = cpu_to_le32(arg->regdomain);
1052  	cmd->he_ops = cpu_to_le32(arg->he_ops);
1053  	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
1054  	cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
1055  	cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id);
1056  
1057  	if (!restart) {
1058  		if (arg->ssid) {
1059  			cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len);
1060  			memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1061  		}
1062  		if (arg->hidden_ssid)
1063  			cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID);
1064  		if (arg->pmf_enabled)
1065  			cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED);
1066  	}
1067  
1068  	cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED);
1069  
1070  	ptr = skb->data + sizeof(*cmd);
1071  	chan = ptr;
1072  
1073  	ath12k_wmi_put_wmi_channel(chan, arg);
1074  
1075  	chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
1076  						  sizeof(*chan));
1077  	ptr += sizeof(*chan);
1078  
1079  	tlv = ptr;
1080  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
1081  
1082  	/* Note: This is a nested TLV containing:
1083  	 * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv]..
1084  	 */
1085  
1086  	ptr += sizeof(*tlv);
1087  
1088  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
1089  		   restart ? "restart" : "start", arg->vdev_id,
1090  		   arg->freq, arg->mode);
1091  
1092  	if (restart)
1093  		ret = ath12k_wmi_cmd_send(wmi, skb,
1094  					  WMI_VDEV_RESTART_REQUEST_CMDID);
1095  	else
1096  		ret = ath12k_wmi_cmd_send(wmi, skb,
1097  					  WMI_VDEV_START_REQUEST_CMDID);
1098  	if (ret) {
1099  		ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
1100  			    restart ? "restart" : "start");
1101  		dev_kfree_skb(skb);
1102  	}
1103  
1104  	return ret;
1105  }
1106  
ath12k_wmi_vdev_up(struct ath12k * ar,struct ath12k_wmi_vdev_up_params * params)1107  int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params)
1108  {
1109  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1110  	struct wmi_vdev_up_cmd *cmd;
1111  	struct sk_buff *skb;
1112  	int ret;
1113  
1114  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1115  	if (!skb)
1116  		return -ENOMEM;
1117  
1118  	cmd = (struct wmi_vdev_up_cmd *)skb->data;
1119  
1120  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
1121  						 sizeof(*cmd));
1122  	cmd->vdev_id = cpu_to_le32(params->vdev_id);
1123  	cmd->vdev_assoc_id = cpu_to_le32(params->aid);
1124  
1125  	ether_addr_copy(cmd->vdev_bssid.addr, params->bssid);
1126  
1127  	if (params->tx_bssid) {
1128  		ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid);
1129  		cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx);
1130  		cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt);
1131  	}
1132  
1133  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1134  		   "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
1135  		   params->vdev_id, params->aid, params->bssid);
1136  
1137  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
1138  	if (ret) {
1139  		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
1140  		dev_kfree_skb(skb);
1141  	}
1142  
1143  	return ret;
1144  }
1145  
ath12k_wmi_send_peer_create_cmd(struct ath12k * ar,struct ath12k_wmi_peer_create_arg * arg)1146  int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
1147  				    struct ath12k_wmi_peer_create_arg *arg)
1148  {
1149  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1150  	struct wmi_peer_create_cmd *cmd;
1151  	struct sk_buff *skb;
1152  	int ret;
1153  
1154  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1155  	if (!skb)
1156  		return -ENOMEM;
1157  
1158  	cmd = (struct wmi_peer_create_cmd *)skb->data;
1159  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD,
1160  						 sizeof(*cmd));
1161  
1162  	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr);
1163  	cmd->peer_type = cpu_to_le32(arg->peer_type);
1164  	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1165  
1166  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1167  		   "WMI peer create vdev_id %d peer_addr %pM\n",
1168  		   arg->vdev_id, arg->peer_addr);
1169  
1170  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
1171  	if (ret) {
1172  		ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
1173  		dev_kfree_skb(skb);
1174  	}
1175  
1176  	return ret;
1177  }
1178  
ath12k_wmi_send_peer_delete_cmd(struct ath12k * ar,const u8 * peer_addr,u8 vdev_id)1179  int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
1180  				    const u8 *peer_addr, u8 vdev_id)
1181  {
1182  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1183  	struct wmi_peer_delete_cmd *cmd;
1184  	struct sk_buff *skb;
1185  	int ret;
1186  
1187  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1188  	if (!skb)
1189  		return -ENOMEM;
1190  
1191  	cmd = (struct wmi_peer_delete_cmd *)skb->data;
1192  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD,
1193  						 sizeof(*cmd));
1194  
1195  	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1196  	cmd->vdev_id = cpu_to_le32(vdev_id);
1197  
1198  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1199  		   "WMI peer delete vdev_id %d peer_addr %pM\n",
1200  		   vdev_id,  peer_addr);
1201  
1202  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
1203  	if (ret) {
1204  		ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
1205  		dev_kfree_skb(skb);
1206  	}
1207  
1208  	return ret;
1209  }
1210  
ath12k_wmi_send_pdev_set_regdomain(struct ath12k * ar,struct ath12k_wmi_pdev_set_regdomain_arg * arg)1211  int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
1212  				       struct ath12k_wmi_pdev_set_regdomain_arg *arg)
1213  {
1214  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1215  	struct wmi_pdev_set_regdomain_cmd *cmd;
1216  	struct sk_buff *skb;
1217  	int ret;
1218  
1219  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1220  	if (!skb)
1221  		return -ENOMEM;
1222  
1223  	cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
1224  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
1225  						 sizeof(*cmd));
1226  
1227  	cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use);
1228  	cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g);
1229  	cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g);
1230  	cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g);
1231  	cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g);
1232  	cmd->dfs_domain = cpu_to_le32(arg->dfs_domain);
1233  	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
1234  
1235  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1236  		   "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
1237  		   arg->current_rd_in_use, arg->current_rd_2g,
1238  		   arg->current_rd_5g, arg->dfs_domain, arg->pdev_id);
1239  
1240  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
1241  	if (ret) {
1242  		ath12k_warn(ar->ab,
1243  			    "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
1244  		dev_kfree_skb(skb);
1245  	}
1246  
1247  	return ret;
1248  }
1249  
ath12k_wmi_set_peer_param(struct ath12k * ar,const u8 * peer_addr,u32 vdev_id,u32 param_id,u32 param_val)1250  int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
1251  			      u32 vdev_id, u32 param_id, u32 param_val)
1252  {
1253  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1254  	struct wmi_peer_set_param_cmd *cmd;
1255  	struct sk_buff *skb;
1256  	int ret;
1257  
1258  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1259  	if (!skb)
1260  		return -ENOMEM;
1261  
1262  	cmd = (struct wmi_peer_set_param_cmd *)skb->data;
1263  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD,
1264  						 sizeof(*cmd));
1265  	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1266  	cmd->vdev_id = cpu_to_le32(vdev_id);
1267  	cmd->param_id = cpu_to_le32(param_id);
1268  	cmd->param_value = cpu_to_le32(param_val);
1269  
1270  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1271  		   "WMI vdev %d peer 0x%pM set param %d value %d\n",
1272  		   vdev_id, peer_addr, param_id, param_val);
1273  
1274  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
1275  	if (ret) {
1276  		ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
1277  		dev_kfree_skb(skb);
1278  	}
1279  
1280  	return ret;
1281  }
1282  
ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k * ar,u8 peer_addr[ETH_ALEN],u32 peer_tid_bitmap,u8 vdev_id)1283  int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar,
1284  					u8 peer_addr[ETH_ALEN],
1285  					u32 peer_tid_bitmap,
1286  					u8 vdev_id)
1287  {
1288  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1289  	struct wmi_peer_flush_tids_cmd *cmd;
1290  	struct sk_buff *skb;
1291  	int ret;
1292  
1293  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1294  	if (!skb)
1295  		return -ENOMEM;
1296  
1297  	cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
1298  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD,
1299  						 sizeof(*cmd));
1300  
1301  	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1302  	cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap);
1303  	cmd->vdev_id = cpu_to_le32(vdev_id);
1304  
1305  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1306  		   "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
1307  		   vdev_id, peer_addr, peer_tid_bitmap);
1308  
1309  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
1310  	if (ret) {
1311  		ath12k_warn(ar->ab,
1312  			    "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
1313  		dev_kfree_skb(skb);
1314  	}
1315  
1316  	return ret;
1317  }
1318  
ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k * ar,int vdev_id,const u8 * addr,dma_addr_t paddr,u8 tid,u8 ba_window_size_valid,u32 ba_window_size)1319  int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
1320  					   int vdev_id, const u8 *addr,
1321  					   dma_addr_t paddr, u8 tid,
1322  					   u8 ba_window_size_valid,
1323  					   u32 ba_window_size)
1324  {
1325  	struct wmi_peer_reorder_queue_setup_cmd *cmd;
1326  	struct sk_buff *skb;
1327  	int ret;
1328  
1329  	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
1330  	if (!skb)
1331  		return -ENOMEM;
1332  
1333  	cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
1334  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD,
1335  						 sizeof(*cmd));
1336  
1337  	ether_addr_copy(cmd->peer_macaddr.addr, addr);
1338  	cmd->vdev_id = cpu_to_le32(vdev_id);
1339  	cmd->tid = cpu_to_le32(tid);
1340  	cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr));
1341  	cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr));
1342  	cmd->queue_no = cpu_to_le32(tid);
1343  	cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid);
1344  	cmd->ba_window_size = cpu_to_le32(ba_window_size);
1345  
1346  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1347  		   "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
1348  		   addr, vdev_id, tid);
1349  
1350  	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
1351  				  WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
1352  	if (ret) {
1353  		ath12k_warn(ar->ab,
1354  			    "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
1355  		dev_kfree_skb(skb);
1356  	}
1357  
1358  	return ret;
1359  }
1360  
1361  int
ath12k_wmi_rx_reord_queue_remove(struct ath12k * ar,struct ath12k_wmi_rx_reorder_queue_remove_arg * arg)1362  ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
1363  				 struct ath12k_wmi_rx_reorder_queue_remove_arg *arg)
1364  {
1365  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1366  	struct wmi_peer_reorder_queue_remove_cmd *cmd;
1367  	struct sk_buff *skb;
1368  	int ret;
1369  
1370  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1371  	if (!skb)
1372  		return -ENOMEM;
1373  
1374  	cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
1375  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
1376  						 sizeof(*cmd));
1377  
1378  	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr);
1379  	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1380  	cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap);
1381  
1382  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1383  		   "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
1384  		   arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap);
1385  
1386  	ret = ath12k_wmi_cmd_send(wmi, skb,
1387  				  WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
1388  	if (ret) {
1389  		ath12k_warn(ar->ab,
1390  			    "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
1391  		dev_kfree_skb(skb);
1392  	}
1393  
1394  	return ret;
1395  }
1396  
ath12k_wmi_pdev_set_param(struct ath12k * ar,u32 param_id,u32 param_value,u8 pdev_id)1397  int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
1398  			      u32 param_value, u8 pdev_id)
1399  {
1400  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1401  	struct wmi_pdev_set_param_cmd *cmd;
1402  	struct sk_buff *skb;
1403  	int ret;
1404  
1405  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1406  	if (!skb)
1407  		return -ENOMEM;
1408  
1409  	cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
1410  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD,
1411  						 sizeof(*cmd));
1412  	cmd->pdev_id = cpu_to_le32(pdev_id);
1413  	cmd->param_id = cpu_to_le32(param_id);
1414  	cmd->param_value = cpu_to_le32(param_value);
1415  
1416  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1417  		   "WMI pdev set param %d pdev id %d value %d\n",
1418  		   param_id, pdev_id, param_value);
1419  
1420  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
1421  	if (ret) {
1422  		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1423  		dev_kfree_skb(skb);
1424  	}
1425  
1426  	return ret;
1427  }
1428  
ath12k_wmi_pdev_set_ps_mode(struct ath12k * ar,int vdev_id,u32 enable)1429  int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable)
1430  {
1431  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1432  	struct wmi_pdev_set_ps_mode_cmd *cmd;
1433  	struct sk_buff *skb;
1434  	int ret;
1435  
1436  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1437  	if (!skb)
1438  		return -ENOMEM;
1439  
1440  	cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
1441  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD,
1442  						 sizeof(*cmd));
1443  	cmd->vdev_id = cpu_to_le32(vdev_id);
1444  	cmd->sta_ps_mode = cpu_to_le32(enable);
1445  
1446  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1447  		   "WMI vdev set psmode %d vdev id %d\n",
1448  		   enable, vdev_id);
1449  
1450  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
1451  	if (ret) {
1452  		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1453  		dev_kfree_skb(skb);
1454  	}
1455  
1456  	return ret;
1457  }
1458  
ath12k_wmi_pdev_suspend(struct ath12k * ar,u32 suspend_opt,u32 pdev_id)1459  int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt,
1460  			    u32 pdev_id)
1461  {
1462  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1463  	struct wmi_pdev_suspend_cmd *cmd;
1464  	struct sk_buff *skb;
1465  	int ret;
1466  
1467  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1468  	if (!skb)
1469  		return -ENOMEM;
1470  
1471  	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
1472  
1473  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD,
1474  						 sizeof(*cmd));
1475  
1476  	cmd->suspend_opt = cpu_to_le32(suspend_opt);
1477  	cmd->pdev_id = cpu_to_le32(pdev_id);
1478  
1479  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1480  		   "WMI pdev suspend pdev_id %d\n", pdev_id);
1481  
1482  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
1483  	if (ret) {
1484  		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
1485  		dev_kfree_skb(skb);
1486  	}
1487  
1488  	return ret;
1489  }
1490  
ath12k_wmi_pdev_resume(struct ath12k * ar,u32 pdev_id)1491  int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id)
1492  {
1493  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1494  	struct wmi_pdev_resume_cmd *cmd;
1495  	struct sk_buff *skb;
1496  	int ret;
1497  
1498  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1499  	if (!skb)
1500  		return -ENOMEM;
1501  
1502  	cmd = (struct wmi_pdev_resume_cmd *)skb->data;
1503  
1504  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD,
1505  						 sizeof(*cmd));
1506  	cmd->pdev_id = cpu_to_le32(pdev_id);
1507  
1508  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1509  		   "WMI pdev resume pdev id %d\n", pdev_id);
1510  
1511  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
1512  	if (ret) {
1513  		ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
1514  		dev_kfree_skb(skb);
1515  	}
1516  
1517  	return ret;
1518  }
1519  
1520  /* TODO FW Support for the cmd is not available yet.
1521   * Can be tested once the command and corresponding
1522   * event is implemented in FW
1523   */
ath12k_wmi_pdev_bss_chan_info_request(struct ath12k * ar,enum wmi_bss_chan_info_req_type type)1524  int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
1525  					  enum wmi_bss_chan_info_req_type type)
1526  {
1527  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1528  	struct wmi_pdev_bss_chan_info_req_cmd *cmd;
1529  	struct sk_buff *skb;
1530  	int ret;
1531  
1532  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1533  	if (!skb)
1534  		return -ENOMEM;
1535  
1536  	cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
1537  
1538  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
1539  						 sizeof(*cmd));
1540  	cmd->req_type = cpu_to_le32(type);
1541  	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1542  
1543  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1544  		   "WMI bss chan info req type %d\n", type);
1545  
1546  	ret = ath12k_wmi_cmd_send(wmi, skb,
1547  				  WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
1548  	if (ret) {
1549  		ath12k_warn(ar->ab,
1550  			    "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
1551  		dev_kfree_skb(skb);
1552  	}
1553  
1554  	return ret;
1555  }
1556  
ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k * ar,u8 * peer_addr,struct ath12k_wmi_ap_ps_arg * arg)1557  int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr,
1558  					struct ath12k_wmi_ap_ps_arg *arg)
1559  {
1560  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1561  	struct wmi_ap_ps_peer_cmd *cmd;
1562  	struct sk_buff *skb;
1563  	int ret;
1564  
1565  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1566  	if (!skb)
1567  		return -ENOMEM;
1568  
1569  	cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
1570  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD,
1571  						 sizeof(*cmd));
1572  
1573  	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1574  	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1575  	cmd->param = cpu_to_le32(arg->param);
1576  	cmd->value = cpu_to_le32(arg->value);
1577  
1578  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1579  		   "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
1580  		   arg->vdev_id, peer_addr, arg->param, arg->value);
1581  
1582  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
1583  	if (ret) {
1584  		ath12k_warn(ar->ab,
1585  			    "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
1586  		dev_kfree_skb(skb);
1587  	}
1588  
1589  	return ret;
1590  }
1591  
ath12k_wmi_set_sta_ps_param(struct ath12k * ar,u32 vdev_id,u32 param,u32 param_value)1592  int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
1593  				u32 param, u32 param_value)
1594  {
1595  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1596  	struct wmi_sta_powersave_param_cmd *cmd;
1597  	struct sk_buff *skb;
1598  	int ret;
1599  
1600  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1601  	if (!skb)
1602  		return -ENOMEM;
1603  
1604  	cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
1605  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD,
1606  						 sizeof(*cmd));
1607  
1608  	cmd->vdev_id = cpu_to_le32(vdev_id);
1609  	cmd->param = cpu_to_le32(param);
1610  	cmd->value = cpu_to_le32(param_value);
1611  
1612  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1613  		   "WMI set sta ps vdev_id %d param %d value %d\n",
1614  		   vdev_id, param, param_value);
1615  
1616  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
1617  	if (ret) {
1618  		ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
1619  		dev_kfree_skb(skb);
1620  	}
1621  
1622  	return ret;
1623  }
1624  
ath12k_wmi_force_fw_hang_cmd(struct ath12k * ar,u32 type,u32 delay_time_ms)1625  int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms)
1626  {
1627  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1628  	struct wmi_force_fw_hang_cmd *cmd;
1629  	struct sk_buff *skb;
1630  	int ret, len;
1631  
1632  	len = sizeof(*cmd);
1633  
1634  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1635  	if (!skb)
1636  		return -ENOMEM;
1637  
1638  	cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
1639  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD,
1640  						 len);
1641  
1642  	cmd->type = cpu_to_le32(type);
1643  	cmd->delay_time_ms = cpu_to_le32(delay_time_ms);
1644  
1645  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
1646  
1647  	if (ret) {
1648  		ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
1649  		dev_kfree_skb(skb);
1650  	}
1651  	return ret;
1652  }
1653  
ath12k_wmi_vdev_set_param_cmd(struct ath12k * ar,u32 vdev_id,u32 param_id,u32 param_value)1654  int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id,
1655  				  u32 param_id, u32 param_value)
1656  {
1657  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1658  	struct wmi_vdev_set_param_cmd *cmd;
1659  	struct sk_buff *skb;
1660  	int ret;
1661  
1662  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1663  	if (!skb)
1664  		return -ENOMEM;
1665  
1666  	cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
1667  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD,
1668  						 sizeof(*cmd));
1669  
1670  	cmd->vdev_id = cpu_to_le32(vdev_id);
1671  	cmd->param_id = cpu_to_le32(param_id);
1672  	cmd->param_value = cpu_to_le32(param_value);
1673  
1674  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1675  		   "WMI vdev id 0x%x set param %d value %d\n",
1676  		   vdev_id, param_id, param_value);
1677  
1678  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
1679  	if (ret) {
1680  		ath12k_warn(ar->ab,
1681  			    "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
1682  		dev_kfree_skb(skb);
1683  	}
1684  
1685  	return ret;
1686  }
1687  
ath12k_wmi_send_pdev_temperature_cmd(struct ath12k * ar)1688  int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar)
1689  {
1690  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1691  	struct wmi_get_pdev_temperature_cmd *cmd;
1692  	struct sk_buff *skb;
1693  	int ret;
1694  
1695  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1696  	if (!skb)
1697  		return -ENOMEM;
1698  
1699  	cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
1700  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
1701  						 sizeof(*cmd));
1702  	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1703  
1704  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1705  		   "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
1706  
1707  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
1708  	if (ret) {
1709  		ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
1710  		dev_kfree_skb(skb);
1711  	}
1712  
1713  	return ret;
1714  }
1715  
ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k * ar,u32 vdev_id,u32 bcn_ctrl_op)1716  int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
1717  					    u32 vdev_id, u32 bcn_ctrl_op)
1718  {
1719  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1720  	struct wmi_bcn_offload_ctrl_cmd *cmd;
1721  	struct sk_buff *skb;
1722  	int ret;
1723  
1724  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1725  	if (!skb)
1726  		return -ENOMEM;
1727  
1728  	cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
1729  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
1730  						 sizeof(*cmd));
1731  
1732  	cmd->vdev_id = cpu_to_le32(vdev_id);
1733  	cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op);
1734  
1735  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1736  		   "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
1737  		   vdev_id, bcn_ctrl_op);
1738  
1739  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
1740  	if (ret) {
1741  		ath12k_warn(ar->ab,
1742  			    "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
1743  		dev_kfree_skb(skb);
1744  	}
1745  
1746  	return ret;
1747  }
1748  
ath12k_wmi_p2p_go_bcn_ie(struct ath12k * ar,u32 vdev_id,const u8 * p2p_ie)1749  int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
1750  			     const u8 *p2p_ie)
1751  {
1752  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1753  	struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
1754  	size_t p2p_ie_len, aligned_len;
1755  	struct wmi_tlv *tlv;
1756  	struct sk_buff *skb;
1757  	void *ptr;
1758  	int ret, len;
1759  
1760  	p2p_ie_len = p2p_ie[1] + 2;
1761  	aligned_len = roundup(p2p_ie_len, sizeof(u32));
1762  
1763  	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
1764  
1765  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1766  	if (!skb)
1767  		return -ENOMEM;
1768  
1769  	ptr = skb->data;
1770  	cmd = ptr;
1771  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE,
1772  						 sizeof(*cmd));
1773  	cmd->vdev_id = cpu_to_le32(vdev_id);
1774  	cmd->ie_buf_len = cpu_to_le32(p2p_ie_len);
1775  
1776  	ptr += sizeof(*cmd);
1777  	tlv = ptr;
1778  	tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
1779  					     aligned_len);
1780  	memcpy(tlv->value, p2p_ie, p2p_ie_len);
1781  
1782  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
1783  	if (ret) {
1784  		ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
1785  		dev_kfree_skb(skb);
1786  	}
1787  
1788  	return ret;
1789  }
1790  
ath12k_wmi_bcn_tmpl(struct ath12k * ar,u32 vdev_id,struct ieee80211_mutable_offsets * offs,struct sk_buff * bcn,struct ath12k_wmi_bcn_tmpl_ema_arg * ema_args)1791  int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
1792  			struct ieee80211_mutable_offsets *offs,
1793  			struct sk_buff *bcn,
1794  			struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args)
1795  {
1796  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1797  	struct wmi_bcn_tmpl_cmd *cmd;
1798  	struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
1799  	struct wmi_tlv *tlv;
1800  	struct sk_buff *skb;
1801  	u32 ema_params = 0;
1802  	void *ptr;
1803  	int ret, len;
1804  	size_t aligned_len = roundup(bcn->len, 4);
1805  
1806  	len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
1807  
1808  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1809  	if (!skb)
1810  		return -ENOMEM;
1811  
1812  	cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
1813  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD,
1814  						 sizeof(*cmd));
1815  	cmd->vdev_id = cpu_to_le32(vdev_id);
1816  	cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
1817  	cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]);
1818  	cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]);
1819  	cmd->buf_len = cpu_to_le32(bcn->len);
1820  	cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
1821  	if (ema_args) {
1822  		u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT);
1823  		u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX);
1824  		if (ema_args->bcn_index == 0)
1825  			u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST);
1826  		if (ema_args->bcn_index + 1 == ema_args->bcn_cnt)
1827  			u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST);
1828  		cmd->ema_params = cpu_to_le32(ema_params);
1829  	}
1830  
1831  	ptr = skb->data + sizeof(*cmd);
1832  
1833  	bcn_prb_info = ptr;
1834  	len = sizeof(*bcn_prb_info);
1835  	bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
1836  							  len);
1837  	bcn_prb_info->caps = 0;
1838  	bcn_prb_info->erp = 0;
1839  
1840  	ptr += sizeof(*bcn_prb_info);
1841  
1842  	tlv = ptr;
1843  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
1844  	memcpy(tlv->value, bcn->data, bcn->len);
1845  
1846  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
1847  	if (ret) {
1848  		ath12k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
1849  		dev_kfree_skb(skb);
1850  	}
1851  
1852  	return ret;
1853  }
1854  
ath12k_wmi_vdev_install_key(struct ath12k * ar,struct wmi_vdev_install_key_arg * arg)1855  int ath12k_wmi_vdev_install_key(struct ath12k *ar,
1856  				struct wmi_vdev_install_key_arg *arg)
1857  {
1858  	struct ath12k_wmi_pdev *wmi = ar->wmi;
1859  	struct wmi_vdev_install_key_cmd *cmd;
1860  	struct wmi_tlv *tlv;
1861  	struct sk_buff *skb;
1862  	int ret, len, key_len_aligned;
1863  
1864  	/* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key
1865  	 * length is specified in cmd->key_len.
1866  	 */
1867  	key_len_aligned = roundup(arg->key_len, 4);
1868  
1869  	len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
1870  
1871  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1872  	if (!skb)
1873  		return -ENOMEM;
1874  
1875  	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
1876  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD,
1877  						 sizeof(*cmd));
1878  	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1879  	ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
1880  	cmd->key_idx = cpu_to_le32(arg->key_idx);
1881  	cmd->key_flags = cpu_to_le32(arg->key_flags);
1882  	cmd->key_cipher = cpu_to_le32(arg->key_cipher);
1883  	cmd->key_len = cpu_to_le32(arg->key_len);
1884  	cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len);
1885  	cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len);
1886  
1887  	if (arg->key_rsc_counter)
1888  		cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter);
1889  
1890  	tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
1891  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned);
1892  	memcpy(tlv->value, arg->key_data, arg->key_len);
1893  
1894  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1895  		   "WMI vdev install key idx %d cipher %d len %d\n",
1896  		   arg->key_idx, arg->key_cipher, arg->key_len);
1897  
1898  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
1899  	if (ret) {
1900  		ath12k_warn(ar->ab,
1901  			    "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
1902  		dev_kfree_skb(skb);
1903  	}
1904  
1905  	return ret;
1906  }
1907  
ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd * cmd,struct ath12k_wmi_peer_assoc_arg * arg,bool hw_crypto_disabled)1908  static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
1909  				       struct ath12k_wmi_peer_assoc_arg *arg,
1910  				       bool hw_crypto_disabled)
1911  {
1912  	cmd->peer_flags = 0;
1913  	cmd->peer_flags_ext = 0;
1914  
1915  	if (arg->is_wme_set) {
1916  		if (arg->qos_flag)
1917  			cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS);
1918  		if (arg->apsd_flag)
1919  			cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD);
1920  		if (arg->ht_flag)
1921  			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT);
1922  		if (arg->bw_40)
1923  			cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ);
1924  		if (arg->bw_80)
1925  			cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ);
1926  		if (arg->bw_160)
1927  			cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
1928  		if (arg->bw_320)
1929  			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
1930  
1931  		/* Typically if STBC is enabled for VHT it should be enabled
1932  		 * for HT as well
1933  		 **/
1934  		if (arg->stbc_flag)
1935  			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC);
1936  
1937  		/* Typically if LDPC is enabled for VHT it should be enabled
1938  		 * for HT as well
1939  		 **/
1940  		if (arg->ldpc_flag)
1941  			cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC);
1942  
1943  		if (arg->static_mimops_flag)
1944  			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS);
1945  		if (arg->dynamic_mimops_flag)
1946  			cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS);
1947  		if (arg->spatial_mux_flag)
1948  			cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX);
1949  		if (arg->vht_flag)
1950  			cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT);
1951  		if (arg->he_flag)
1952  			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE);
1953  		if (arg->twt_requester)
1954  			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ);
1955  		if (arg->twt_responder)
1956  			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP);
1957  		if (arg->eht_flag)
1958  			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_EHT);
1959  	}
1960  
1961  	/* Suppress authorization for all AUTH modes that need 4-way handshake
1962  	 * (during re-association).
1963  	 * Authorization will be done for these modes on key installation.
1964  	 */
1965  	if (arg->auth_flag)
1966  		cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH);
1967  	if (arg->need_ptk_4_way) {
1968  		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY);
1969  		if (!hw_crypto_disabled)
1970  			cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH);
1971  	}
1972  	if (arg->need_gtk_2_way)
1973  		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY);
1974  	/* safe mode bypass the 4-way handshake */
1975  	if (arg->safe_mode_enabled)
1976  		cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY |
1977  						 WMI_PEER_NEED_GTK_2_WAY));
1978  
1979  	if (arg->is_pmf_enabled)
1980  		cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF);
1981  
1982  	/* Disable AMSDU for station transmit, if user configures it */
1983  	/* Disable AMSDU for AP transmit to 11n Stations, if user configures
1984  	 * it
1985  	 * if (arg->amsdu_disable) Add after FW support
1986  	 **/
1987  
1988  	/* Target asserts if node is marked HT and all MCS is set to 0.
1989  	 * Mark the node as non-HT if all the mcs rates are disabled through
1990  	 * iwpriv
1991  	 **/
1992  	if (arg->peer_ht_rates.num_rates == 0)
1993  		cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT);
1994  }
1995  
ath12k_wmi_send_peer_assoc_cmd(struct ath12k * ar,struct ath12k_wmi_peer_assoc_arg * arg)1996  int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
1997  				   struct ath12k_wmi_peer_assoc_arg *arg)
1998  {
1999  	struct ath12k_wmi_pdev *wmi = ar->wmi;
2000  	struct wmi_peer_assoc_complete_cmd *cmd;
2001  	struct ath12k_wmi_vht_rate_set_params *mcs;
2002  	struct ath12k_wmi_he_rate_set_params *he_mcs;
2003  	struct ath12k_wmi_eht_rate_set_params *eht_mcs;
2004  	struct sk_buff *skb;
2005  	struct wmi_tlv *tlv;
2006  	void *ptr;
2007  	u32 peer_legacy_rates_align;
2008  	u32 peer_ht_rates_align;
2009  	int i, ret, len;
2010  
2011  	peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
2012  					  sizeof(u32));
2013  	peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates,
2014  				      sizeof(u32));
2015  
2016  	len = sizeof(*cmd) +
2017  	      TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
2018  	      TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
2019  	      sizeof(*mcs) + TLV_HDR_SIZE +
2020  	      (sizeof(*he_mcs) * arg->peer_he_mcs_count) +
2021  	      TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count) +
2022  	      TLV_HDR_SIZE + TLV_HDR_SIZE;
2023  
2024  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2025  	if (!skb)
2026  		return -ENOMEM;
2027  
2028  	ptr = skb->data;
2029  
2030  	cmd = ptr;
2031  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
2032  						 sizeof(*cmd));
2033  
2034  	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2035  
2036  	cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc);
2037  	cmd->peer_associd = cpu_to_le32(arg->peer_associd);
2038  	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
2039  
2040  	ath12k_wmi_copy_peer_flags(cmd, arg,
2041  				   test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED,
2042  					    &ar->ab->dev_flags));
2043  
2044  	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac);
2045  
2046  	cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps);
2047  	cmd->peer_caps = cpu_to_le32(arg->peer_caps);
2048  	cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval);
2049  	cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps);
2050  	cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu);
2051  	cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density);
2052  	cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps);
2053  	cmd->peer_phymode = cpu_to_le32(arg->peer_phymode);
2054  
2055  	/* Update 11ax capabilities */
2056  	cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]);
2057  	cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]);
2058  	cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal);
2059  	cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz);
2060  	cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops);
2061  	for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
2062  		cmd->peer_he_cap_phy[i] =
2063  			cpu_to_le32(arg->peer_he_cap_phyinfo[i]);
2064  	cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1);
2065  	cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask);
2066  	for (i = 0; i < WMI_MAX_NUM_SS; i++)
2067  		cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] =
2068  			cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]);
2069  
2070  	/* Update 11be capabilities */
2071  	memcpy_and_pad(cmd->peer_eht_cap_mac, sizeof(cmd->peer_eht_cap_mac),
2072  		       arg->peer_eht_cap_mac, sizeof(arg->peer_eht_cap_mac),
2073  		       0);
2074  	memcpy_and_pad(cmd->peer_eht_cap_phy, sizeof(cmd->peer_eht_cap_phy),
2075  		       arg->peer_eht_cap_phy, sizeof(arg->peer_eht_cap_phy),
2076  		       0);
2077  	memcpy_and_pad(&cmd->peer_eht_ppet, sizeof(cmd->peer_eht_ppet),
2078  		       &arg->peer_eht_ppet, sizeof(arg->peer_eht_ppet), 0);
2079  
2080  	/* Update peer legacy rate information */
2081  	ptr += sizeof(*cmd);
2082  
2083  	tlv = ptr;
2084  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align);
2085  
2086  	ptr += TLV_HDR_SIZE;
2087  
2088  	cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates);
2089  	memcpy(ptr, arg->peer_legacy_rates.rates,
2090  	       arg->peer_legacy_rates.num_rates);
2091  
2092  	/* Update peer HT rate information */
2093  	ptr += peer_legacy_rates_align;
2094  
2095  	tlv = ptr;
2096  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align);
2097  	ptr += TLV_HDR_SIZE;
2098  	cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates);
2099  	memcpy(ptr, arg->peer_ht_rates.rates,
2100  	       arg->peer_ht_rates.num_rates);
2101  
2102  	/* VHT Rates */
2103  	ptr += peer_ht_rates_align;
2104  
2105  	mcs = ptr;
2106  
2107  	mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET,
2108  						 sizeof(*mcs));
2109  
2110  	cmd->peer_nss = cpu_to_le32(arg->peer_nss);
2111  
2112  	/* Update bandwidth-NSS mapping */
2113  	cmd->peer_bw_rxnss_override = 0;
2114  	cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override);
2115  
2116  	if (arg->vht_capable) {
2117  		mcs->rx_max_rate = cpu_to_le32(arg->rx_max_rate);
2118  		mcs->rx_mcs_set = cpu_to_le32(arg->rx_mcs_set);
2119  		mcs->tx_max_rate = cpu_to_le32(arg->tx_max_rate);
2120  		mcs->tx_mcs_set = cpu_to_le32(arg->tx_mcs_set);
2121  	}
2122  
2123  	/* HE Rates */
2124  	cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count);
2125  	cmd->min_data_rate = cpu_to_le32(arg->min_data_rate);
2126  
2127  	ptr += sizeof(*mcs);
2128  
2129  	len = arg->peer_he_mcs_count * sizeof(*he_mcs);
2130  
2131  	tlv = ptr;
2132  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2133  	ptr += TLV_HDR_SIZE;
2134  
2135  	/* Loop through the HE rate set */
2136  	for (i = 0; i < arg->peer_he_mcs_count; i++) {
2137  		he_mcs = ptr;
2138  		he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
2139  							    sizeof(*he_mcs));
2140  
2141  		he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
2142  		he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
2143  		ptr += sizeof(*he_mcs);
2144  	}
2145  
2146  	/* MLO header tag with 0 length */
2147  	len = 0;
2148  	tlv = ptr;
2149  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2150  	ptr += TLV_HDR_SIZE;
2151  
2152  	/* Loop through the EHT rate set */
2153  	len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
2154  	tlv = ptr;
2155  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2156  	ptr += TLV_HDR_SIZE;
2157  
2158  	for (i = 0; i < arg->peer_eht_mcs_count; i++) {
2159  		eht_mcs = ptr;
2160  		eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
2161  							     sizeof(*eht_mcs));
2162  
2163  		eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]);
2164  		eht_mcs->tx_mcs_set = cpu_to_le32(arg->peer_eht_tx_mcs_set[i]);
2165  		ptr += sizeof(*eht_mcs);
2166  	}
2167  
2168  	/* ML partner links tag with 0 length */
2169  	len = 0;
2170  	tlv = ptr;
2171  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2172  	ptr += TLV_HDR_SIZE;
2173  
2174  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2175  		   "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n",
2176  		   cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
2177  		   cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
2178  		   cmd->peer_listen_intval, cmd->peer_ht_caps,
2179  		   cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
2180  		   cmd->peer_mpdu_density,
2181  		   cmd->peer_vht_caps, cmd->peer_he_cap_info,
2182  		   cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
2183  		   cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
2184  		   cmd->peer_he_cap_phy[2],
2185  		   cmd->peer_bw_rxnss_override, cmd->peer_flags_ext,
2186  		   cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1],
2187  		   cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1],
2188  		   cmd->peer_eht_cap_phy[2]);
2189  
2190  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
2191  	if (ret) {
2192  		ath12k_warn(ar->ab,
2193  			    "failed to send WMI_PEER_ASSOC_CMDID\n");
2194  		dev_kfree_skb(skb);
2195  	}
2196  
2197  	return ret;
2198  }
2199  
ath12k_wmi_start_scan_init(struct ath12k * ar,struct ath12k_wmi_scan_req_arg * arg)2200  void ath12k_wmi_start_scan_init(struct ath12k *ar,
2201  				struct ath12k_wmi_scan_req_arg *arg)
2202  {
2203  	/* setup commonly used values */
2204  	arg->scan_req_id = 1;
2205  	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2206  	arg->dwell_time_active = 50;
2207  	arg->dwell_time_active_2g = 0;
2208  	arg->dwell_time_passive = 150;
2209  	arg->dwell_time_active_6g = 40;
2210  	arg->dwell_time_passive_6g = 30;
2211  	arg->min_rest_time = 50;
2212  	arg->max_rest_time = 500;
2213  	arg->repeat_probe_time = 0;
2214  	arg->probe_spacing_time = 0;
2215  	arg->idle_time = 0;
2216  	arg->max_scan_time = 20000;
2217  	arg->probe_delay = 5;
2218  	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
2219  				  WMI_SCAN_EVENT_COMPLETED |
2220  				  WMI_SCAN_EVENT_BSS_CHANNEL |
2221  				  WMI_SCAN_EVENT_FOREIGN_CHAN |
2222  				  WMI_SCAN_EVENT_DEQUEUED;
2223  	arg->scan_f_chan_stat_evnt = 1;
2224  	arg->num_bssid = 1;
2225  
2226  	/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
2227  	 * ZEROs in probe request
2228  	 */
2229  	eth_broadcast_addr(arg->bssid_list[0].addr);
2230  }
2231  
ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd * cmd,struct ath12k_wmi_scan_req_arg * arg)2232  static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
2233  						   struct ath12k_wmi_scan_req_arg *arg)
2234  {
2235  	/* Scan events subscription */
2236  	if (arg->scan_ev_started)
2237  		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED);
2238  	if (arg->scan_ev_completed)
2239  		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED);
2240  	if (arg->scan_ev_bss_chan)
2241  		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL);
2242  	if (arg->scan_ev_foreign_chan)
2243  		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN);
2244  	if (arg->scan_ev_dequeued)
2245  		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED);
2246  	if (arg->scan_ev_preempted)
2247  		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED);
2248  	if (arg->scan_ev_start_failed)
2249  		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED);
2250  	if (arg->scan_ev_restarted)
2251  		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED);
2252  	if (arg->scan_ev_foreign_chn_exit)
2253  		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT);
2254  	if (arg->scan_ev_suspended)
2255  		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED);
2256  	if (arg->scan_ev_resumed)
2257  		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED);
2258  
2259  	/** Set scan control flags */
2260  	cmd->scan_ctrl_flags = 0;
2261  	if (arg->scan_f_passive)
2262  		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE);
2263  	if (arg->scan_f_strict_passive_pch)
2264  		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN);
2265  	if (arg->scan_f_promisc_mode)
2266  		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS);
2267  	if (arg->scan_f_capture_phy_err)
2268  		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR);
2269  	if (arg->scan_f_half_rate)
2270  		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT);
2271  	if (arg->scan_f_quarter_rate)
2272  		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT);
2273  	if (arg->scan_f_cck_rates)
2274  		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES);
2275  	if (arg->scan_f_ofdm_rates)
2276  		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES);
2277  	if (arg->scan_f_chan_stat_evnt)
2278  		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT);
2279  	if (arg->scan_f_filter_prb_req)
2280  		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
2281  	if (arg->scan_f_bcast_probe)
2282  		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ);
2283  	if (arg->scan_f_offchan_mgmt_tx)
2284  		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX);
2285  	if (arg->scan_f_offchan_data_tx)
2286  		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX);
2287  	if (arg->scan_f_force_active_dfs_chn)
2288  		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS);
2289  	if (arg->scan_f_add_tpc_ie_in_probe)
2290  		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ);
2291  	if (arg->scan_f_add_ds_ie_in_probe)
2292  		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ);
2293  	if (arg->scan_f_add_spoofed_mac_in_probe)
2294  		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ);
2295  	if (arg->scan_f_add_rand_seq_in_probe)
2296  		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ);
2297  	if (arg->scan_f_en_ie_whitelist_in_probe)
2298  		cmd->scan_ctrl_flags |=
2299  			cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ);
2300  
2301  	cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode,
2302  						 WMI_SCAN_DWELL_MODE_MASK);
2303  }
2304  
ath12k_wmi_send_scan_start_cmd(struct ath12k * ar,struct ath12k_wmi_scan_req_arg * arg)2305  int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
2306  				   struct ath12k_wmi_scan_req_arg *arg)
2307  {
2308  	struct ath12k_wmi_pdev *wmi = ar->wmi;
2309  	struct wmi_start_scan_cmd *cmd;
2310  	struct ath12k_wmi_ssid_params *ssid = NULL;
2311  	struct ath12k_wmi_mac_addr_params *bssid;
2312  	struct sk_buff *skb;
2313  	struct wmi_tlv *tlv;
2314  	void *ptr;
2315  	int i, ret, len;
2316  	u32 *tmp_ptr, extraie_len_with_pad = 0;
2317  	struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL;
2318  	struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL;
2319  
2320  	len = sizeof(*cmd);
2321  
2322  	len += TLV_HDR_SIZE;
2323  	if (arg->num_chan)
2324  		len += arg->num_chan * sizeof(u32);
2325  
2326  	len += TLV_HDR_SIZE;
2327  	if (arg->num_ssids)
2328  		len += arg->num_ssids * sizeof(*ssid);
2329  
2330  	len += TLV_HDR_SIZE;
2331  	if (arg->num_bssid)
2332  		len += sizeof(*bssid) * arg->num_bssid;
2333  
2334  	if (arg->num_hint_bssid)
2335  		len += TLV_HDR_SIZE +
2336  		       arg->num_hint_bssid * sizeof(*hint_bssid);
2337  
2338  	if (arg->num_hint_s_ssid)
2339  		len += TLV_HDR_SIZE +
2340  		       arg->num_hint_s_ssid * sizeof(*s_ssid);
2341  
2342  	len += TLV_HDR_SIZE;
2343  	if (arg->extraie.len)
2344  		extraie_len_with_pad =
2345  			roundup(arg->extraie.len, sizeof(u32));
2346  	if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) {
2347  		len += extraie_len_with_pad;
2348  	} else {
2349  		ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n",
2350  			    arg->extraie.len);
2351  		extraie_len_with_pad = 0;
2352  	}
2353  
2354  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2355  	if (!skb)
2356  		return -ENOMEM;
2357  
2358  	ptr = skb->data;
2359  
2360  	cmd = ptr;
2361  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD,
2362  						 sizeof(*cmd));
2363  
2364  	cmd->scan_id = cpu_to_le32(arg->scan_id);
2365  	cmd->scan_req_id = cpu_to_le32(arg->scan_req_id);
2366  	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2367  	cmd->scan_priority = cpu_to_le32(arg->scan_priority);
2368  	cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events);
2369  
2370  	ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg);
2371  
2372  	cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active);
2373  	cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g);
2374  	cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive);
2375  	cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g);
2376  	cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g);
2377  	cmd->min_rest_time = cpu_to_le32(arg->min_rest_time);
2378  	cmd->max_rest_time = cpu_to_le32(arg->max_rest_time);
2379  	cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time);
2380  	cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time);
2381  	cmd->idle_time = cpu_to_le32(arg->idle_time);
2382  	cmd->max_scan_time = cpu_to_le32(arg->max_scan_time);
2383  	cmd->probe_delay = cpu_to_le32(arg->probe_delay);
2384  	cmd->burst_duration = cpu_to_le32(arg->burst_duration);
2385  	cmd->num_chan = cpu_to_le32(arg->num_chan);
2386  	cmd->num_bssid = cpu_to_le32(arg->num_bssid);
2387  	cmd->num_ssids = cpu_to_le32(arg->num_ssids);
2388  	cmd->ie_len = cpu_to_le32(arg->extraie.len);
2389  	cmd->n_probes = cpu_to_le32(arg->n_probes);
2390  
2391  	ptr += sizeof(*cmd);
2392  
2393  	len = arg->num_chan * sizeof(u32);
2394  
2395  	tlv = ptr;
2396  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len);
2397  	ptr += TLV_HDR_SIZE;
2398  	tmp_ptr = (u32 *)ptr;
2399  
2400  	memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4);
2401  
2402  	ptr += len;
2403  
2404  	len = arg->num_ssids * sizeof(*ssid);
2405  	tlv = ptr;
2406  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2407  
2408  	ptr += TLV_HDR_SIZE;
2409  
2410  	if (arg->num_ssids) {
2411  		ssid = ptr;
2412  		for (i = 0; i < arg->num_ssids; ++i) {
2413  			ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len);
2414  			memcpy(ssid->ssid, arg->ssid[i].ssid,
2415  			       arg->ssid[i].ssid_len);
2416  			ssid++;
2417  		}
2418  	}
2419  
2420  	ptr += (arg->num_ssids * sizeof(*ssid));
2421  	len = arg->num_bssid * sizeof(*bssid);
2422  	tlv = ptr;
2423  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2424  
2425  	ptr += TLV_HDR_SIZE;
2426  	bssid = ptr;
2427  
2428  	if (arg->num_bssid) {
2429  		for (i = 0; i < arg->num_bssid; ++i) {
2430  			ether_addr_copy(bssid->addr,
2431  					arg->bssid_list[i].addr);
2432  			bssid++;
2433  		}
2434  	}
2435  
2436  	ptr += arg->num_bssid * sizeof(*bssid);
2437  
2438  	len = extraie_len_with_pad;
2439  	tlv = ptr;
2440  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len);
2441  	ptr += TLV_HDR_SIZE;
2442  
2443  	if (extraie_len_with_pad)
2444  		memcpy(ptr, arg->extraie.ptr,
2445  		       arg->extraie.len);
2446  
2447  	ptr += extraie_len_with_pad;
2448  
2449  	if (arg->num_hint_s_ssid) {
2450  		len = arg->num_hint_s_ssid * sizeof(*s_ssid);
2451  		tlv = ptr;
2452  		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2453  		ptr += TLV_HDR_SIZE;
2454  		s_ssid = ptr;
2455  		for (i = 0; i < arg->num_hint_s_ssid; ++i) {
2456  			s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags;
2457  			s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid;
2458  			s_ssid++;
2459  		}
2460  		ptr += len;
2461  	}
2462  
2463  	if (arg->num_hint_bssid) {
2464  		len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg);
2465  		tlv = ptr;
2466  		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2467  		ptr += TLV_HDR_SIZE;
2468  		hint_bssid = ptr;
2469  		for (i = 0; i < arg->num_hint_bssid; ++i) {
2470  			hint_bssid->freq_flags =
2471  				arg->hint_bssid[i].freq_flags;
2472  			ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0],
2473  					&hint_bssid->bssid.addr[0]);
2474  			hint_bssid++;
2475  		}
2476  	}
2477  
2478  	ret = ath12k_wmi_cmd_send(wmi, skb,
2479  				  WMI_START_SCAN_CMDID);
2480  	if (ret) {
2481  		ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
2482  		dev_kfree_skb(skb);
2483  	}
2484  
2485  	return ret;
2486  }
2487  
ath12k_wmi_send_scan_stop_cmd(struct ath12k * ar,struct ath12k_wmi_scan_cancel_arg * arg)2488  int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar,
2489  				  struct ath12k_wmi_scan_cancel_arg *arg)
2490  {
2491  	struct ath12k_wmi_pdev *wmi = ar->wmi;
2492  	struct wmi_stop_scan_cmd *cmd;
2493  	struct sk_buff *skb;
2494  	int ret;
2495  
2496  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2497  	if (!skb)
2498  		return -ENOMEM;
2499  
2500  	cmd = (struct wmi_stop_scan_cmd *)skb->data;
2501  
2502  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD,
2503  						 sizeof(*cmd));
2504  
2505  	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2506  	cmd->requestor = cpu_to_le32(arg->requester);
2507  	cmd->scan_id = cpu_to_le32(arg->scan_id);
2508  	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2509  	/* stop the scan with the corresponding scan_id */
2510  	if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
2511  		/* Cancelling all scans */
2512  		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL);
2513  	} else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
2514  		/* Cancelling VAP scans */
2515  		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL);
2516  	} else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) {
2517  		/* Cancelling specific scan */
2518  		cmd->req_type = WMI_SCAN_STOP_ONE;
2519  	} else {
2520  		ath12k_warn(ar->ab, "invalid scan cancel req_type %d",
2521  			    arg->req_type);
2522  		dev_kfree_skb(skb);
2523  		return -EINVAL;
2524  	}
2525  
2526  	ret = ath12k_wmi_cmd_send(wmi, skb,
2527  				  WMI_STOP_SCAN_CMDID);
2528  	if (ret) {
2529  		ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
2530  		dev_kfree_skb(skb);
2531  	}
2532  
2533  	return ret;
2534  }
2535  
ath12k_wmi_send_scan_chan_list_cmd(struct ath12k * ar,struct ath12k_wmi_scan_chan_list_arg * arg)2536  int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
2537  				       struct ath12k_wmi_scan_chan_list_arg *arg)
2538  {
2539  	struct ath12k_wmi_pdev *wmi = ar->wmi;
2540  	struct wmi_scan_chan_list_cmd *cmd;
2541  	struct sk_buff *skb;
2542  	struct ath12k_wmi_channel_params *chan_info;
2543  	struct ath12k_wmi_channel_arg *channel_arg;
2544  	struct wmi_tlv *tlv;
2545  	void *ptr;
2546  	int i, ret, len;
2547  	u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
2548  	__le32 *reg1, *reg2;
2549  
2550  	channel_arg = &arg->channel[0];
2551  	while (arg->nallchans) {
2552  		len = sizeof(*cmd) + TLV_HDR_SIZE;
2553  		max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
2554  			sizeof(*chan_info);
2555  
2556  		num_send_chans = min(arg->nallchans, max_chan_limit);
2557  
2558  		arg->nallchans -= num_send_chans;
2559  		len += sizeof(*chan_info) * num_send_chans;
2560  
2561  		skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2562  		if (!skb)
2563  			return -ENOMEM;
2564  
2565  		cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
2566  		cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD,
2567  							 sizeof(*cmd));
2568  		cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2569  		cmd->num_scan_chans = cpu_to_le32(num_send_chans);
2570  		if (num_sends)
2571  			cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG);
2572  
2573  		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2574  			   "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
2575  			   num_send_chans, len, cmd->pdev_id, num_sends);
2576  
2577  		ptr = skb->data + sizeof(*cmd);
2578  
2579  		len = sizeof(*chan_info) * num_send_chans;
2580  		tlv = ptr;
2581  		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT,
2582  						     len);
2583  		ptr += TLV_HDR_SIZE;
2584  
2585  		for (i = 0; i < num_send_chans; ++i) {
2586  			chan_info = ptr;
2587  			memset(chan_info, 0, sizeof(*chan_info));
2588  			len = sizeof(*chan_info);
2589  			chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
2590  								       len);
2591  
2592  			reg1 = &chan_info->reg_info_1;
2593  			reg2 = &chan_info->reg_info_2;
2594  			chan_info->mhz = cpu_to_le32(channel_arg->mhz);
2595  			chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1);
2596  			chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2);
2597  
2598  			if (channel_arg->is_chan_passive)
2599  				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
2600  			if (channel_arg->allow_he)
2601  				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
2602  			else if (channel_arg->allow_vht)
2603  				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
2604  			else if (channel_arg->allow_ht)
2605  				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
2606  			if (channel_arg->half_rate)
2607  				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE);
2608  			if (channel_arg->quarter_rate)
2609  				chan_info->info |=
2610  					cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE);
2611  
2612  			if (channel_arg->psc_channel)
2613  				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC);
2614  
2615  			if (channel_arg->dfs_set)
2616  				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
2617  
2618  			chan_info->info |= le32_encode_bits(channel_arg->phy_mode,
2619  							    WMI_CHAN_INFO_MODE);
2620  			*reg1 |= le32_encode_bits(channel_arg->minpower,
2621  						  WMI_CHAN_REG_INFO1_MIN_PWR);
2622  			*reg1 |= le32_encode_bits(channel_arg->maxpower,
2623  						  WMI_CHAN_REG_INFO1_MAX_PWR);
2624  			*reg1 |= le32_encode_bits(channel_arg->maxregpower,
2625  						  WMI_CHAN_REG_INFO1_MAX_REG_PWR);
2626  			*reg1 |= le32_encode_bits(channel_arg->reg_class_id,
2627  						  WMI_CHAN_REG_INFO1_REG_CLS);
2628  			*reg2 |= le32_encode_bits(channel_arg->antennamax,
2629  						  WMI_CHAN_REG_INFO2_ANT_MAX);
2630  
2631  			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2632  				   "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
2633  				   i, chan_info->mhz, chan_info->info);
2634  
2635  			ptr += sizeof(*chan_info);
2636  
2637  			channel_arg++;
2638  		}
2639  
2640  		ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
2641  		if (ret) {
2642  			ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
2643  			dev_kfree_skb(skb);
2644  			return ret;
2645  		}
2646  
2647  		num_sends++;
2648  	}
2649  
2650  	return 0;
2651  }
2652  
ath12k_wmi_send_wmm_update_cmd(struct ath12k * ar,u32 vdev_id,struct wmi_wmm_params_all_arg * param)2653  int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id,
2654  				   struct wmi_wmm_params_all_arg *param)
2655  {
2656  	struct ath12k_wmi_pdev *wmi = ar->wmi;
2657  	struct wmi_vdev_set_wmm_params_cmd *cmd;
2658  	struct wmi_wmm_params *wmm_param;
2659  	struct wmi_wmm_params_arg *wmi_wmm_arg;
2660  	struct sk_buff *skb;
2661  	int ret, ac;
2662  
2663  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2664  	if (!skb)
2665  		return -ENOMEM;
2666  
2667  	cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
2668  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2669  						 sizeof(*cmd));
2670  
2671  	cmd->vdev_id = cpu_to_le32(vdev_id);
2672  	cmd->wmm_param_type = 0;
2673  
2674  	for (ac = 0; ac < WME_NUM_AC; ac++) {
2675  		switch (ac) {
2676  		case WME_AC_BE:
2677  			wmi_wmm_arg = &param->ac_be;
2678  			break;
2679  		case WME_AC_BK:
2680  			wmi_wmm_arg = &param->ac_bk;
2681  			break;
2682  		case WME_AC_VI:
2683  			wmi_wmm_arg = &param->ac_vi;
2684  			break;
2685  		case WME_AC_VO:
2686  			wmi_wmm_arg = &param->ac_vo;
2687  			break;
2688  		}
2689  
2690  		wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
2691  		wmm_param->tlv_header =
2692  			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2693  					       sizeof(*wmm_param));
2694  
2695  		wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs);
2696  		wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin);
2697  		wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax);
2698  		wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop);
2699  		wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm);
2700  		wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack);
2701  
2702  		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2703  			   "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
2704  			   ac, wmm_param->aifs, wmm_param->cwmin,
2705  			   wmm_param->cwmax, wmm_param->txoplimit,
2706  			   wmm_param->acm, wmm_param->no_ack);
2707  	}
2708  	ret = ath12k_wmi_cmd_send(wmi, skb,
2709  				  WMI_VDEV_SET_WMM_PARAMS_CMDID);
2710  	if (ret) {
2711  		ath12k_warn(ar->ab,
2712  			    "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
2713  		dev_kfree_skb(skb);
2714  	}
2715  
2716  	return ret;
2717  }
2718  
ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k * ar,u32 pdev_id)2719  int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
2720  						  u32 pdev_id)
2721  {
2722  	struct ath12k_wmi_pdev *wmi = ar->wmi;
2723  	struct wmi_dfs_phyerr_offload_cmd *cmd;
2724  	struct sk_buff *skb;
2725  	int ret;
2726  
2727  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2728  	if (!skb)
2729  		return -ENOMEM;
2730  
2731  	cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
2732  	cmd->tlv_header =
2733  		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
2734  				       sizeof(*cmd));
2735  
2736  	cmd->pdev_id = cpu_to_le32(pdev_id);
2737  
2738  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2739  		   "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
2740  
2741  	ret = ath12k_wmi_cmd_send(wmi, skb,
2742  				  WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
2743  	if (ret) {
2744  		ath12k_warn(ar->ab,
2745  			    "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
2746  		dev_kfree_skb(skb);
2747  	}
2748  
2749  	return ret;
2750  }
2751  
ath12k_wmi_set_bios_cmd(struct ath12k_base * ab,u32 param_id,const u8 * buf,size_t buf_len)2752  int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
2753  			    const u8 *buf, size_t buf_len)
2754  {
2755  	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
2756  	struct wmi_pdev_set_bios_interface_cmd *cmd;
2757  	struct wmi_tlv *tlv;
2758  	struct sk_buff *skb;
2759  	u8 *ptr;
2760  	u32 len, len_aligned;
2761  	int ret;
2762  
2763  	len_aligned = roundup(buf_len, sizeof(u32));
2764  	len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned;
2765  
2766  	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
2767  	if (!skb)
2768  		return -ENOMEM;
2769  
2770  	cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data;
2771  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD,
2772  						 sizeof(*cmd));
2773  	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
2774  	cmd->param_type_id = cpu_to_le32(param_id);
2775  	cmd->length = cpu_to_le32(buf_len);
2776  
2777  	ptr = skb->data + sizeof(*cmd);
2778  	tlv = (struct wmi_tlv *)ptr;
2779  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned);
2780  	ptr += TLV_HDR_SIZE;
2781  	memcpy(ptr, buf, buf_len);
2782  
2783  	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
2784  				  skb,
2785  				  WMI_PDEV_SET_BIOS_INTERFACE_CMDID);
2786  	if (ret) {
2787  		ath12k_warn(ab,
2788  			    "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n",
2789  			    param_id, ret);
2790  		dev_kfree_skb(skb);
2791  	}
2792  
2793  	return 0;
2794  }
2795  
ath12k_wmi_set_bios_sar_cmd(struct ath12k_base * ab,const u8 * psar_table)2796  int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table)
2797  {
2798  	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
2799  	struct wmi_pdev_set_bios_sar_table_cmd *cmd;
2800  	struct wmi_tlv *tlv;
2801  	struct sk_buff *skb;
2802  	int ret;
2803  	u8 *buf_ptr;
2804  	u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned;
2805  	const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET;
2806  	const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET;
2807  
2808  	sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32));
2809  	sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN,
2810  					      sizeof(u32));
2811  	len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned +
2812  		TLV_HDR_SIZE + sar_dbs_backoff_len_aligned;
2813  
2814  	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
2815  	if (!skb)
2816  		return -ENOMEM;
2817  
2818  	cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data;
2819  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD,
2820  						 sizeof(*cmd));
2821  	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
2822  	cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
2823  	cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
2824  
2825  	buf_ptr = skb->data + sizeof(*cmd);
2826  	tlv = (struct wmi_tlv *)buf_ptr;
2827  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
2828  					 sar_table_len_aligned);
2829  	buf_ptr += TLV_HDR_SIZE;
2830  	memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
2831  
2832  	buf_ptr += sar_table_len_aligned;
2833  	tlv = (struct wmi_tlv *)buf_ptr;
2834  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
2835  					 sar_dbs_backoff_len_aligned);
2836  	buf_ptr += TLV_HDR_SIZE;
2837  	memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
2838  
2839  	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
2840  				  skb,
2841  				  WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
2842  	if (ret) {
2843  		ath12k_warn(ab,
2844  			    "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n",
2845  			    ret);
2846  		dev_kfree_skb(skb);
2847  	}
2848  
2849  	return ret;
2850  }
2851  
ath12k_wmi_set_bios_geo_cmd(struct ath12k_base * ab,const u8 * pgeo_table)2852  int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table)
2853  {
2854  	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
2855  	struct wmi_pdev_set_bios_geo_table_cmd *cmd;
2856  	struct wmi_tlv *tlv;
2857  	struct sk_buff *skb;
2858  	int ret;
2859  	u8 *buf_ptr;
2860  	u32 len, sar_geo_len_aligned;
2861  	const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET;
2862  
2863  	sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32));
2864  	len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned;
2865  
2866  	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
2867  	if (!skb)
2868  		return -ENOMEM;
2869  
2870  	cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data;
2871  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
2872  						 sizeof(*cmd));
2873  	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
2874  	cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
2875  
2876  	buf_ptr = skb->data + sizeof(*cmd);
2877  	tlv = (struct wmi_tlv *)buf_ptr;
2878  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned);
2879  	buf_ptr += TLV_HDR_SIZE;
2880  	memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
2881  
2882  	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
2883  				  skb,
2884  				  WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
2885  	if (ret) {
2886  		ath12k_warn(ab,
2887  			    "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n",
2888  			    ret);
2889  		dev_kfree_skb(skb);
2890  	}
2891  
2892  	return ret;
2893  }
2894  
ath12k_wmi_delba_send(struct ath12k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 initiator,u32 reason)2895  int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
2896  			  u32 tid, u32 initiator, u32 reason)
2897  {
2898  	struct ath12k_wmi_pdev *wmi = ar->wmi;
2899  	struct wmi_delba_send_cmd *cmd;
2900  	struct sk_buff *skb;
2901  	int ret;
2902  
2903  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2904  	if (!skb)
2905  		return -ENOMEM;
2906  
2907  	cmd = (struct wmi_delba_send_cmd *)skb->data;
2908  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD,
2909  						 sizeof(*cmd));
2910  	cmd->vdev_id = cpu_to_le32(vdev_id);
2911  	ether_addr_copy(cmd->peer_macaddr.addr, mac);
2912  	cmd->tid = cpu_to_le32(tid);
2913  	cmd->initiator = cpu_to_le32(initiator);
2914  	cmd->reasoncode = cpu_to_le32(reason);
2915  
2916  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2917  		   "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
2918  		   vdev_id, mac, tid, initiator, reason);
2919  
2920  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
2921  
2922  	if (ret) {
2923  		ath12k_warn(ar->ab,
2924  			    "failed to send WMI_DELBA_SEND_CMDID cmd\n");
2925  		dev_kfree_skb(skb);
2926  	}
2927  
2928  	return ret;
2929  }
2930  
ath12k_wmi_addba_set_resp(struct ath12k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 status)2931  int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac,
2932  			      u32 tid, u32 status)
2933  {
2934  	struct ath12k_wmi_pdev *wmi = ar->wmi;
2935  	struct wmi_addba_setresponse_cmd *cmd;
2936  	struct sk_buff *skb;
2937  	int ret;
2938  
2939  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2940  	if (!skb)
2941  		return -ENOMEM;
2942  
2943  	cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
2944  	cmd->tlv_header =
2945  		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD,
2946  				       sizeof(*cmd));
2947  	cmd->vdev_id = cpu_to_le32(vdev_id);
2948  	ether_addr_copy(cmd->peer_macaddr.addr, mac);
2949  	cmd->tid = cpu_to_le32(tid);
2950  	cmd->statuscode = cpu_to_le32(status);
2951  
2952  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2953  		   "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
2954  		   vdev_id, mac, tid, status);
2955  
2956  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
2957  
2958  	if (ret) {
2959  		ath12k_warn(ar->ab,
2960  			    "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
2961  		dev_kfree_skb(skb);
2962  	}
2963  
2964  	return ret;
2965  }
2966  
ath12k_wmi_addba_send(struct ath12k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 buf_size)2967  int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
2968  			  u32 tid, u32 buf_size)
2969  {
2970  	struct ath12k_wmi_pdev *wmi = ar->wmi;
2971  	struct wmi_addba_send_cmd *cmd;
2972  	struct sk_buff *skb;
2973  	int ret;
2974  
2975  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2976  	if (!skb)
2977  		return -ENOMEM;
2978  
2979  	cmd = (struct wmi_addba_send_cmd *)skb->data;
2980  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD,
2981  						 sizeof(*cmd));
2982  	cmd->vdev_id = cpu_to_le32(vdev_id);
2983  	ether_addr_copy(cmd->peer_macaddr.addr, mac);
2984  	cmd->tid = cpu_to_le32(tid);
2985  	cmd->buffersize = cpu_to_le32(buf_size);
2986  
2987  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2988  		   "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
2989  		   vdev_id, mac, tid, buf_size);
2990  
2991  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
2992  
2993  	if (ret) {
2994  		ath12k_warn(ar->ab,
2995  			    "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
2996  		dev_kfree_skb(skb);
2997  	}
2998  
2999  	return ret;
3000  }
3001  
ath12k_wmi_addba_clear_resp(struct ath12k * ar,u32 vdev_id,const u8 * mac)3002  int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac)
3003  {
3004  	struct ath12k_wmi_pdev *wmi = ar->wmi;
3005  	struct wmi_addba_clear_resp_cmd *cmd;
3006  	struct sk_buff *skb;
3007  	int ret;
3008  
3009  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3010  	if (!skb)
3011  		return -ENOMEM;
3012  
3013  	cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
3014  	cmd->tlv_header =
3015  		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD,
3016  				       sizeof(*cmd));
3017  	cmd->vdev_id = cpu_to_le32(vdev_id);
3018  	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3019  
3020  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3021  		   "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
3022  		   vdev_id, mac);
3023  
3024  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
3025  
3026  	if (ret) {
3027  		ath12k_warn(ar->ab,
3028  			    "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
3029  		dev_kfree_skb(skb);
3030  	}
3031  
3032  	return ret;
3033  }
3034  
ath12k_wmi_send_init_country_cmd(struct ath12k * ar,struct ath12k_wmi_init_country_arg * arg)3035  int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
3036  				     struct ath12k_wmi_init_country_arg *arg)
3037  {
3038  	struct ath12k_wmi_pdev *wmi = ar->wmi;
3039  	struct wmi_init_country_cmd *cmd;
3040  	struct sk_buff *skb;
3041  	int ret;
3042  
3043  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3044  	if (!skb)
3045  		return -ENOMEM;
3046  
3047  	cmd = (struct wmi_init_country_cmd *)skb->data;
3048  	cmd->tlv_header =
3049  		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD,
3050  				       sizeof(*cmd));
3051  
3052  	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
3053  
3054  	switch (arg->flags) {
3055  	case ALPHA_IS_SET:
3056  		cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
3057  		memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3);
3058  		break;
3059  	case CC_IS_SET:
3060  		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE);
3061  		cmd->cc_info.country_code =
3062  			cpu_to_le32(arg->cc_info.country_code);
3063  		break;
3064  	case REGDMN_IS_SET:
3065  		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN);
3066  		cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id);
3067  		break;
3068  	default:
3069  		ret = -EINVAL;
3070  		goto out;
3071  	}
3072  
3073  	ret = ath12k_wmi_cmd_send(wmi, skb,
3074  				  WMI_SET_INIT_COUNTRY_CMDID);
3075  
3076  out:
3077  	if (ret) {
3078  		ath12k_warn(ar->ab,
3079  			    "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
3080  			    ret);
3081  		dev_kfree_skb(skb);
3082  	}
3083  
3084  	return ret;
3085  }
3086  
3087  int
ath12k_wmi_send_twt_enable_cmd(struct ath12k * ar,u32 pdev_id)3088  ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id)
3089  {
3090  	struct ath12k_wmi_pdev *wmi = ar->wmi;
3091  	struct ath12k_base *ab = wmi->wmi_ab->ab;
3092  	struct wmi_twt_enable_params_cmd *cmd;
3093  	struct sk_buff *skb;
3094  	int ret, len;
3095  
3096  	len = sizeof(*cmd);
3097  
3098  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3099  	if (!skb)
3100  		return -ENOMEM;
3101  
3102  	cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
3103  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD,
3104  						 len);
3105  	cmd->pdev_id = cpu_to_le32(pdev_id);
3106  	cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS);
3107  	cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE);
3108  	cmd->congestion_thresh_setup =
3109  		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP);
3110  	cmd->congestion_thresh_teardown =
3111  		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN);
3112  	cmd->congestion_thresh_critical =
3113  		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL);
3114  	cmd->interference_thresh_teardown =
3115  		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN);
3116  	cmd->interference_thresh_setup =
3117  		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP);
3118  	cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP);
3119  	cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN);
3120  	cmd->no_of_bcast_mcast_slots =
3121  		cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS);
3122  	cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS);
3123  	cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT);
3124  	cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL);
3125  	cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL);
3126  	cmd->remove_sta_slot_interval =
3127  		cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL);
3128  	/* TODO add MBSSID support */
3129  	cmd->mbss_support = 0;
3130  
3131  	ret = ath12k_wmi_cmd_send(wmi, skb,
3132  				  WMI_TWT_ENABLE_CMDID);
3133  	if (ret) {
3134  		ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
3135  		dev_kfree_skb(skb);
3136  	}
3137  	return ret;
3138  }
3139  
3140  int
ath12k_wmi_send_twt_disable_cmd(struct ath12k * ar,u32 pdev_id)3141  ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id)
3142  {
3143  	struct ath12k_wmi_pdev *wmi = ar->wmi;
3144  	struct ath12k_base *ab = wmi->wmi_ab->ab;
3145  	struct wmi_twt_disable_params_cmd *cmd;
3146  	struct sk_buff *skb;
3147  	int ret, len;
3148  
3149  	len = sizeof(*cmd);
3150  
3151  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3152  	if (!skb)
3153  		return -ENOMEM;
3154  
3155  	cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
3156  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD,
3157  						 len);
3158  	cmd->pdev_id = cpu_to_le32(pdev_id);
3159  
3160  	ret = ath12k_wmi_cmd_send(wmi, skb,
3161  				  WMI_TWT_DISABLE_CMDID);
3162  	if (ret) {
3163  		ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
3164  		dev_kfree_skb(skb);
3165  	}
3166  	return ret;
3167  }
3168  
3169  int
ath12k_wmi_send_obss_spr_cmd(struct ath12k * ar,u32 vdev_id,struct ieee80211_he_obss_pd * he_obss_pd)3170  ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
3171  			     struct ieee80211_he_obss_pd *he_obss_pd)
3172  {
3173  	struct ath12k_wmi_pdev *wmi = ar->wmi;
3174  	struct ath12k_base *ab = wmi->wmi_ab->ab;
3175  	struct wmi_obss_spatial_reuse_params_cmd *cmd;
3176  	struct sk_buff *skb;
3177  	int ret, len;
3178  
3179  	len = sizeof(*cmd);
3180  
3181  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3182  	if (!skb)
3183  		return -ENOMEM;
3184  
3185  	cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
3186  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
3187  						 len);
3188  	cmd->vdev_id = cpu_to_le32(vdev_id);
3189  	cmd->enable = cpu_to_le32(he_obss_pd->enable);
3190  	cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset);
3191  	cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset);
3192  
3193  	ret = ath12k_wmi_cmd_send(wmi, skb,
3194  				  WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
3195  	if (ret) {
3196  		ath12k_warn(ab,
3197  			    "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
3198  		dev_kfree_skb(skb);
3199  	}
3200  	return ret;
3201  }
3202  
ath12k_wmi_obss_color_cfg_cmd(struct ath12k * ar,u32 vdev_id,u8 bss_color,u32 period,bool enable)3203  int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
3204  				  u8 bss_color, u32 period,
3205  				  bool enable)
3206  {
3207  	struct ath12k_wmi_pdev *wmi = ar->wmi;
3208  	struct ath12k_base *ab = wmi->wmi_ab->ab;
3209  	struct wmi_obss_color_collision_cfg_params_cmd *cmd;
3210  	struct sk_buff *skb;
3211  	int ret, len;
3212  
3213  	len = sizeof(*cmd);
3214  
3215  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3216  	if (!skb)
3217  		return -ENOMEM;
3218  
3219  	cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
3220  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
3221  						 len);
3222  	cmd->vdev_id = cpu_to_le32(vdev_id);
3223  	cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) :
3224  		cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE);
3225  	cmd->current_bss_color = cpu_to_le32(bss_color);
3226  	cmd->detection_period_ms = cpu_to_le32(period);
3227  	cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS);
3228  	cmd->free_slot_expiry_time_ms = 0;
3229  	cmd->flags = 0;
3230  
3231  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3232  		   "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
3233  		   cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
3234  		   cmd->detection_period_ms, cmd->scan_period_ms);
3235  
3236  	ret = ath12k_wmi_cmd_send(wmi, skb,
3237  				  WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
3238  	if (ret) {
3239  		ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
3240  		dev_kfree_skb(skb);
3241  	}
3242  	return ret;
3243  }
3244  
ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k * ar,u32 vdev_id,bool enable)3245  int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id,
3246  						bool enable)
3247  {
3248  	struct ath12k_wmi_pdev *wmi = ar->wmi;
3249  	struct ath12k_base *ab = wmi->wmi_ab->ab;
3250  	struct wmi_bss_color_change_enable_params_cmd *cmd;
3251  	struct sk_buff *skb;
3252  	int ret, len;
3253  
3254  	len = sizeof(*cmd);
3255  
3256  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3257  	if (!skb)
3258  		return -ENOMEM;
3259  
3260  	cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
3261  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
3262  						 len);
3263  	cmd->vdev_id = cpu_to_le32(vdev_id);
3264  	cmd->enable = enable ? cpu_to_le32(1) : 0;
3265  
3266  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3267  		   "wmi_send_bss_color_change_enable id %d enable %d\n",
3268  		   cmd->vdev_id, cmd->enable);
3269  
3270  	ret = ath12k_wmi_cmd_send(wmi, skb,
3271  				  WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
3272  	if (ret) {
3273  		ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
3274  		dev_kfree_skb(skb);
3275  	}
3276  	return ret;
3277  }
3278  
ath12k_wmi_fils_discovery_tmpl(struct ath12k * ar,u32 vdev_id,struct sk_buff * tmpl)3279  int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id,
3280  				   struct sk_buff *tmpl)
3281  {
3282  	struct wmi_tlv *tlv;
3283  	struct sk_buff *skb;
3284  	void *ptr;
3285  	int ret, len;
3286  	size_t aligned_len;
3287  	struct wmi_fils_discovery_tmpl_cmd *cmd;
3288  
3289  	aligned_len = roundup(tmpl->len, 4);
3290  	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
3291  
3292  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3293  		   "WMI vdev %i set FILS discovery template\n", vdev_id);
3294  
3295  	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3296  	if (!skb)
3297  		return -ENOMEM;
3298  
3299  	cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
3300  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD,
3301  						 sizeof(*cmd));
3302  	cmd->vdev_id = cpu_to_le32(vdev_id);
3303  	cmd->buf_len = cpu_to_le32(tmpl->len);
3304  	ptr = skb->data + sizeof(*cmd);
3305  
3306  	tlv = ptr;
3307  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3308  	memcpy(tlv->value, tmpl->data, tmpl->len);
3309  
3310  	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
3311  	if (ret) {
3312  		ath12k_warn(ar->ab,
3313  			    "WMI vdev %i failed to send FILS discovery template command\n",
3314  			    vdev_id);
3315  		dev_kfree_skb(skb);
3316  	}
3317  	return ret;
3318  }
3319  
ath12k_wmi_probe_resp_tmpl(struct ath12k * ar,u32 vdev_id,struct sk_buff * tmpl)3320  int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
3321  			       struct sk_buff *tmpl)
3322  {
3323  	struct wmi_probe_tmpl_cmd *cmd;
3324  	struct ath12k_wmi_bcn_prb_info_params *probe_info;
3325  	struct wmi_tlv *tlv;
3326  	struct sk_buff *skb;
3327  	void *ptr;
3328  	int ret, len;
3329  	size_t aligned_len = roundup(tmpl->len, 4);
3330  
3331  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3332  		   "WMI vdev %i set probe response template\n", vdev_id);
3333  
3334  	len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
3335  
3336  	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3337  	if (!skb)
3338  		return -ENOMEM;
3339  
3340  	cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
3341  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD,
3342  						 sizeof(*cmd));
3343  	cmd->vdev_id = cpu_to_le32(vdev_id);
3344  	cmd->buf_len = cpu_to_le32(tmpl->len);
3345  
3346  	ptr = skb->data + sizeof(*cmd);
3347  
3348  	probe_info = ptr;
3349  	len = sizeof(*probe_info);
3350  	probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
3351  							len);
3352  	probe_info->caps = 0;
3353  	probe_info->erp = 0;
3354  
3355  	ptr += sizeof(*probe_info);
3356  
3357  	tlv = ptr;
3358  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3359  	memcpy(tlv->value, tmpl->data, tmpl->len);
3360  
3361  	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
3362  	if (ret) {
3363  		ath12k_warn(ar->ab,
3364  			    "WMI vdev %i failed to send probe response template command\n",
3365  			    vdev_id);
3366  		dev_kfree_skb(skb);
3367  	}
3368  	return ret;
3369  }
3370  
ath12k_wmi_fils_discovery(struct ath12k * ar,u32 vdev_id,u32 interval,bool unsol_bcast_probe_resp_enabled)3371  int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval,
3372  			      bool unsol_bcast_probe_resp_enabled)
3373  {
3374  	struct sk_buff *skb;
3375  	int ret, len;
3376  	struct wmi_fils_discovery_cmd *cmd;
3377  
3378  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3379  		   "WMI vdev %i set %s interval to %u TU\n",
3380  		   vdev_id, unsol_bcast_probe_resp_enabled ?
3381  		   "unsolicited broadcast probe response" : "FILS discovery",
3382  		   interval);
3383  
3384  	len = sizeof(*cmd);
3385  	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3386  	if (!skb)
3387  		return -ENOMEM;
3388  
3389  	cmd = (struct wmi_fils_discovery_cmd *)skb->data;
3390  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD,
3391  						 len);
3392  	cmd->vdev_id = cpu_to_le32(vdev_id);
3393  	cmd->interval = cpu_to_le32(interval);
3394  	cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled);
3395  
3396  	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
3397  	if (ret) {
3398  		ath12k_warn(ar->ab,
3399  			    "WMI vdev %i failed to send FILS discovery enable/disable command\n",
3400  			    vdev_id);
3401  		dev_kfree_skb(skb);
3402  	}
3403  	return ret;
3404  }
3405  
3406  static void
ath12k_fill_band_to_mac_param(struct ath12k_base * soc,struct ath12k_wmi_pdev_band_arg * arg)3407  ath12k_fill_band_to_mac_param(struct ath12k_base  *soc,
3408  			      struct ath12k_wmi_pdev_band_arg *arg)
3409  {
3410  	u8 i;
3411  	struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap;
3412  	struct ath12k_pdev *pdev;
3413  
3414  	for (i = 0; i < soc->num_radios; i++) {
3415  		pdev = &soc->pdevs[i];
3416  		hal_reg_cap = &soc->hal_reg_cap[i];
3417  		arg[i].pdev_id = pdev->pdev_id;
3418  
3419  		switch (pdev->cap.supported_bands) {
3420  		case WMI_HOST_WLAN_2G_5G_CAP:
3421  			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3422  			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3423  			break;
3424  		case WMI_HOST_WLAN_2G_CAP:
3425  			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3426  			arg[i].end_freq = hal_reg_cap->high_2ghz_chan;
3427  			break;
3428  		case WMI_HOST_WLAN_5G_CAP:
3429  			arg[i].start_freq = hal_reg_cap->low_5ghz_chan;
3430  			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3431  			break;
3432  		default:
3433  			break;
3434  		}
3435  	}
3436  }
3437  
3438  static void
ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params * wmi_cfg,struct ath12k_wmi_resource_config_arg * tg_cfg)3439  ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cfg,
3440  				struct ath12k_wmi_resource_config_arg *tg_cfg)
3441  {
3442  	wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs);
3443  	wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers);
3444  	wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers);
3445  	wmi_cfg->num_offload_reorder_buffs =
3446  		cpu_to_le32(tg_cfg->num_offload_reorder_buffs);
3447  	wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys);
3448  	wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids);
3449  	wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit);
3450  	wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask);
3451  	wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask);
3452  	wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]);
3453  	wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]);
3454  	wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]);
3455  	wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]);
3456  	wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode);
3457  	wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req);
3458  	wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev);
3459  	wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev);
3460  	wmi_cfg->roam_offload_max_ap_profiles =
3461  		cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles);
3462  	wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups);
3463  	wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems);
3464  	wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode);
3465  	wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size);
3466  	wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries);
3467  	wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size);
3468  	wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim);
3469  	wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
3470  		cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check);
3471  	wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config);
3472  	wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev);
3473  	wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc);
3474  	wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries);
3475  	wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs);
3476  	wmi_cfg->num_tdls_conn_table_entries =
3477  		cpu_to_le32(tg_cfg->num_tdls_conn_table_entries);
3478  	wmi_cfg->beacon_tx_offload_max_vdev =
3479  		cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev);
3480  	wmi_cfg->num_multicast_filter_entries =
3481  		cpu_to_le32(tg_cfg->num_multicast_filter_entries);
3482  	wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters);
3483  	wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern);
3484  	wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size);
3485  	wmi_cfg->max_tdls_concurrent_sleep_sta =
3486  		cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta);
3487  	wmi_cfg->max_tdls_concurrent_buffer_sta =
3488  		cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta);
3489  	wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate);
3490  	wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs);
3491  	wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels);
3492  	wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules);
3493  	wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
3494  	wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
3495  	wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
3496  	wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config |
3497  				     WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64);
3498  	wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
3499  	wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
3500  	wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
3501  	wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
3502  	wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver,
3503  					   WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
3504  	wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
3505  				WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
3506  	wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt);
3507  	wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period);
3508  	wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET);
3509  }
3510  
ath12k_init_cmd_send(struct ath12k_wmi_pdev * wmi,struct ath12k_wmi_init_cmd_arg * arg)3511  static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
3512  				struct ath12k_wmi_init_cmd_arg *arg)
3513  {
3514  	struct ath12k_base *ab = wmi->wmi_ab->ab;
3515  	struct sk_buff *skb;
3516  	struct wmi_init_cmd *cmd;
3517  	struct ath12k_wmi_resource_config_params *cfg;
3518  	struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode;
3519  	struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac;
3520  	struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks;
3521  	struct wmi_tlv *tlv;
3522  	size_t ret, len;
3523  	void *ptr;
3524  	u32 hw_mode_len = 0;
3525  	u16 idx;
3526  
3527  	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX)
3528  		hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
3529  			      (arg->num_band_to_mac * sizeof(*band_to_mac));
3530  
3531  	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
3532  	      (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
3533  
3534  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3535  	if (!skb)
3536  		return -ENOMEM;
3537  
3538  	cmd = (struct wmi_init_cmd *)skb->data;
3539  
3540  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD,
3541  						 sizeof(*cmd));
3542  
3543  	ptr = skb->data + sizeof(*cmd);
3544  	cfg = ptr;
3545  
3546  	ath12k_wmi_copy_resource_config(cfg, &arg->res_cfg);
3547  
3548  	cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG,
3549  						 sizeof(*cfg));
3550  
3551  	ptr += sizeof(*cfg);
3552  	host_mem_chunks = ptr + TLV_HDR_SIZE;
3553  	len = sizeof(struct ath12k_wmi_host_mem_chunk_params);
3554  
3555  	for (idx = 0; idx < arg->num_mem_chunks; ++idx) {
3556  		host_mem_chunks[idx].tlv_header =
3557  			ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
3558  					   len);
3559  
3560  		host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr);
3561  		host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len);
3562  		host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id);
3563  
3564  		ath12k_dbg(ab, ATH12K_DBG_WMI,
3565  			   "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
3566  			   arg->mem_chunks[idx].req_id,
3567  			   (u64)arg->mem_chunks[idx].paddr,
3568  			   arg->mem_chunks[idx].len);
3569  	}
3570  	cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks);
3571  	len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks;
3572  
3573  	/* num_mem_chunks is zero */
3574  	tlv = ptr;
3575  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3576  	ptr += TLV_HDR_SIZE + len;
3577  
3578  	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
3579  		hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr;
3580  		hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
3581  							     sizeof(*hw_mode));
3582  
3583  		hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id);
3584  		hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac);
3585  
3586  		ptr += sizeof(*hw_mode);
3587  
3588  		len = arg->num_band_to_mac * sizeof(*band_to_mac);
3589  		tlv = ptr;
3590  		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3591  
3592  		ptr += TLV_HDR_SIZE;
3593  		len = sizeof(*band_to_mac);
3594  
3595  		for (idx = 0; idx < arg->num_band_to_mac; idx++) {
3596  			band_to_mac = (void *)ptr;
3597  
3598  			band_to_mac->tlv_header =
3599  				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC,
3600  						       len);
3601  			band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id);
3602  			band_to_mac->start_freq =
3603  				cpu_to_le32(arg->band_to_mac[idx].start_freq);
3604  			band_to_mac->end_freq =
3605  				cpu_to_le32(arg->band_to_mac[idx].end_freq);
3606  			ptr += sizeof(*band_to_mac);
3607  		}
3608  	}
3609  
3610  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
3611  	if (ret) {
3612  		ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n");
3613  		dev_kfree_skb(skb);
3614  	}
3615  
3616  	return ret;
3617  }
3618  
ath12k_wmi_pdev_lro_cfg(struct ath12k * ar,int pdev_id)3619  int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar,
3620  			    int pdev_id)
3621  {
3622  	struct ath12k_wmi_pdev_lro_config_cmd *cmd;
3623  	struct sk_buff *skb;
3624  	int ret;
3625  
3626  	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3627  	if (!skb)
3628  		return -ENOMEM;
3629  
3630  	cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data;
3631  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD,
3632  						 sizeof(*cmd));
3633  
3634  	get_random_bytes(cmd->th_4, sizeof(cmd->th_4));
3635  	get_random_bytes(cmd->th_6, sizeof(cmd->th_6));
3636  
3637  	cmd->pdev_id = cpu_to_le32(pdev_id);
3638  
3639  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3640  		   "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
3641  
3642  	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
3643  	if (ret) {
3644  		ath12k_warn(ar->ab,
3645  			    "failed to send lro cfg req wmi cmd\n");
3646  		goto err;
3647  	}
3648  
3649  	return 0;
3650  err:
3651  	dev_kfree_skb(skb);
3652  	return ret;
3653  }
3654  
ath12k_wmi_wait_for_service_ready(struct ath12k_base * ab)3655  int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab)
3656  {
3657  	unsigned long time_left;
3658  
3659  	time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
3660  						WMI_SERVICE_READY_TIMEOUT_HZ);
3661  	if (!time_left)
3662  		return -ETIMEDOUT;
3663  
3664  	return 0;
3665  }
3666  
ath12k_wmi_wait_for_unified_ready(struct ath12k_base * ab)3667  int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab)
3668  {
3669  	unsigned long time_left;
3670  
3671  	time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
3672  						WMI_SERVICE_READY_TIMEOUT_HZ);
3673  	if (!time_left)
3674  		return -ETIMEDOUT;
3675  
3676  	return 0;
3677  }
3678  
ath12k_wmi_set_hw_mode(struct ath12k_base * ab,enum wmi_host_hw_mode_config_type mode)3679  int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
3680  			   enum wmi_host_hw_mode_config_type mode)
3681  {
3682  	struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd;
3683  	struct sk_buff *skb;
3684  	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3685  	int len;
3686  	int ret;
3687  
3688  	len = sizeof(*cmd);
3689  
3690  	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3691  	if (!skb)
3692  		return -ENOMEM;
3693  
3694  	cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data;
3695  
3696  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
3697  						 sizeof(*cmd));
3698  
3699  	cmd->pdev_id = WMI_PDEV_ID_SOC;
3700  	cmd->hw_mode_index = cpu_to_le32(mode);
3701  
3702  	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
3703  	if (ret) {
3704  		ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
3705  		dev_kfree_skb(skb);
3706  	}
3707  
3708  	return ret;
3709  }
3710  
ath12k_wmi_cmd_init(struct ath12k_base * ab)3711  int ath12k_wmi_cmd_init(struct ath12k_base *ab)
3712  {
3713  	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3714  	struct ath12k_wmi_init_cmd_arg arg = {};
3715  
3716  	if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
3717  		     ab->wmi_ab.svc_map))
3718  		arg.res_cfg.is_reg_cc_ext_event_supported = true;
3719  
3720  	ab->hw_params->wmi_init(ab, &arg.res_cfg);
3721  	ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode;
3722  
3723  	arg.num_mem_chunks = wmi_ab->num_mem_chunks;
3724  	arg.hw_mode_id = wmi_ab->preferred_hw_mode;
3725  	arg.mem_chunks = wmi_ab->mem_chunks;
3726  
3727  	if (ab->hw_params->single_pdev_only)
3728  		arg.hw_mode_id = WMI_HOST_HW_MODE_MAX;
3729  
3730  	arg.num_band_to_mac = ab->num_radios;
3731  	ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
3732  
3733  	ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver;
3734  
3735  	return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg);
3736  }
3737  
ath12k_wmi_vdev_spectral_conf(struct ath12k * ar,struct ath12k_wmi_vdev_spectral_conf_arg * arg)3738  int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar,
3739  				  struct ath12k_wmi_vdev_spectral_conf_arg *arg)
3740  {
3741  	struct ath12k_wmi_vdev_spectral_conf_cmd *cmd;
3742  	struct sk_buff *skb;
3743  	int ret;
3744  
3745  	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3746  	if (!skb)
3747  		return -ENOMEM;
3748  
3749  	cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data;
3750  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
3751  						 sizeof(*cmd));
3752  	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
3753  	cmd->scan_count = cpu_to_le32(arg->scan_count);
3754  	cmd->scan_period = cpu_to_le32(arg->scan_period);
3755  	cmd->scan_priority = cpu_to_le32(arg->scan_priority);
3756  	cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size);
3757  	cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena);
3758  	cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena);
3759  	cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref);
3760  	cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay);
3761  	cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr);
3762  	cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr);
3763  	cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode);
3764  	cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode);
3765  	cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr);
3766  	cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format);
3767  	cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode);
3768  	cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale);
3769  	cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj);
3770  	cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask);
3771  
3772  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3773  		   "WMI spectral scan config cmd vdev_id 0x%x\n",
3774  		   arg->vdev_id);
3775  
3776  	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
3777  				  WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
3778  	if (ret) {
3779  		ath12k_warn(ar->ab,
3780  			    "failed to send spectral scan config wmi cmd\n");
3781  		goto err;
3782  	}
3783  
3784  	return 0;
3785  err:
3786  	dev_kfree_skb(skb);
3787  	return ret;
3788  }
3789  
ath12k_wmi_vdev_spectral_enable(struct ath12k * ar,u32 vdev_id,u32 trigger,u32 enable)3790  int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id,
3791  				    u32 trigger, u32 enable)
3792  {
3793  	struct ath12k_wmi_vdev_spectral_enable_cmd *cmd;
3794  	struct sk_buff *skb;
3795  	int ret;
3796  
3797  	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3798  	if (!skb)
3799  		return -ENOMEM;
3800  
3801  	cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data;
3802  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
3803  						 sizeof(*cmd));
3804  
3805  	cmd->vdev_id = cpu_to_le32(vdev_id);
3806  	cmd->trigger_cmd = cpu_to_le32(trigger);
3807  	cmd->enable_cmd = cpu_to_le32(enable);
3808  
3809  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3810  		   "WMI spectral enable cmd vdev id 0x%x\n",
3811  		   vdev_id);
3812  
3813  	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
3814  				  WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
3815  	if (ret) {
3816  		ath12k_warn(ar->ab,
3817  			    "failed to send spectral enable wmi cmd\n");
3818  		goto err;
3819  	}
3820  
3821  	return 0;
3822  err:
3823  	dev_kfree_skb(skb);
3824  	return ret;
3825  }
3826  
ath12k_wmi_pdev_dma_ring_cfg(struct ath12k * ar,struct ath12k_wmi_pdev_dma_ring_cfg_arg * arg)3827  int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
3828  				 struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg)
3829  {
3830  	struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
3831  	struct sk_buff *skb;
3832  	int ret;
3833  
3834  	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3835  	if (!skb)
3836  		return -ENOMEM;
3837  
3838  	cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
3839  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
3840  						 sizeof(*cmd));
3841  
3842  	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
3843  	cmd->module_id = cpu_to_le32(arg->module_id);
3844  	cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
3845  	cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
3846  	cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo);
3847  	cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi);
3848  	cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo);
3849  	cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi);
3850  	cmd->num_elems = cpu_to_le32(arg->num_elems);
3851  	cmd->buf_size = cpu_to_le32(arg->buf_size);
3852  	cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event);
3853  	cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms);
3854  
3855  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3856  		   "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
3857  		   arg->pdev_id);
3858  
3859  	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
3860  				  WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
3861  	if (ret) {
3862  		ath12k_warn(ar->ab,
3863  			    "failed to send dma ring cfg req wmi cmd\n");
3864  		goto err;
3865  	}
3866  
3867  	return 0;
3868  err:
3869  	dev_kfree_skb(skb);
3870  	return ret;
3871  }
3872  
ath12k_wmi_dma_buf_entry_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)3873  static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc,
3874  					  u16 tag, u16 len,
3875  					  const void *ptr, void *data)
3876  {
3877  	struct ath12k_wmi_dma_buf_release_arg *arg = data;
3878  
3879  	if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
3880  		return -EPROTO;
3881  
3882  	if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry))
3883  		return -ENOBUFS;
3884  
3885  	arg->num_buf_entry++;
3886  	return 0;
3887  }
3888  
ath12k_wmi_dma_buf_meta_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)3889  static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc,
3890  					 u16 tag, u16 len,
3891  					 const void *ptr, void *data)
3892  {
3893  	struct ath12k_wmi_dma_buf_release_arg *arg = data;
3894  
3895  	if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
3896  		return -EPROTO;
3897  
3898  	if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry))
3899  		return -ENOBUFS;
3900  
3901  	arg->num_meta++;
3902  
3903  	return 0;
3904  }
3905  
ath12k_wmi_dma_buf_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)3906  static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab,
3907  				    u16 tag, u16 len,
3908  				    const void *ptr, void *data)
3909  {
3910  	struct ath12k_wmi_dma_buf_release_arg *arg = data;
3911  	const struct ath12k_wmi_dma_buf_release_fixed_params *fixed;
3912  	u32 pdev_id;
3913  	int ret;
3914  
3915  	switch (tag) {
3916  	case WMI_TAG_DMA_BUF_RELEASE:
3917  		fixed = ptr;
3918  		arg->fixed = *fixed;
3919  		pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id));
3920  		arg->fixed.pdev_id = cpu_to_le32(pdev_id);
3921  		break;
3922  	case WMI_TAG_ARRAY_STRUCT:
3923  		if (!arg->buf_entry_done) {
3924  			arg->num_buf_entry = 0;
3925  			arg->buf_entry = ptr;
3926  
3927  			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
3928  						  ath12k_wmi_dma_buf_entry_parse,
3929  						  arg);
3930  			if (ret) {
3931  				ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n",
3932  					    ret);
3933  				return ret;
3934  			}
3935  
3936  			arg->buf_entry_done = true;
3937  		} else if (!arg->meta_data_done) {
3938  			arg->num_meta = 0;
3939  			arg->meta_data = ptr;
3940  
3941  			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
3942  						  ath12k_wmi_dma_buf_meta_parse,
3943  						  arg);
3944  			if (ret) {
3945  				ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n",
3946  					    ret);
3947  				return ret;
3948  			}
3949  
3950  			arg->meta_data_done = true;
3951  		}
3952  		break;
3953  	default:
3954  		break;
3955  	}
3956  	return 0;
3957  }
3958  
ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base * ab,struct sk_buff * skb)3959  static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab,
3960  						       struct sk_buff *skb)
3961  {
3962  	struct ath12k_wmi_dma_buf_release_arg arg = {};
3963  	struct ath12k_dbring_buf_release_event param;
3964  	int ret;
3965  
3966  	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
3967  				  ath12k_wmi_dma_buf_parse,
3968  				  &arg);
3969  	if (ret) {
3970  		ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
3971  		return;
3972  	}
3973  
3974  	param.fixed = arg.fixed;
3975  	param.buf_entry = arg.buf_entry;
3976  	param.num_buf_entry = arg.num_buf_entry;
3977  	param.meta_data = arg.meta_data;
3978  	param.num_meta = arg.num_meta;
3979  
3980  	ret = ath12k_dbring_buffer_release_event(ab, &param);
3981  	if (ret) {
3982  		ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret);
3983  		return;
3984  	}
3985  }
3986  
ath12k_wmi_hw_mode_caps_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)3987  static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
3988  					 u16 tag, u16 len,
3989  					 const void *ptr, void *data)
3990  {
3991  	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3992  	struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
3993  	u32 phy_map = 0;
3994  
3995  	if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
3996  		return -EPROTO;
3997  
3998  	if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes)
3999  		return -ENOBUFS;
4000  
4001  	hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params,
4002  				   hw_mode_id);
4003  	svc_rdy_ext->n_hw_mode_caps++;
4004  
4005  	phy_map = le32_to_cpu(hw_mode_cap->phy_id_map);
4006  	svc_rdy_ext->tot_phy_id += fls(phy_map);
4007  
4008  	return 0;
4009  }
4010  
ath12k_wmi_hw_mode_caps(struct ath12k_base * soc,u16 len,const void * ptr,void * data)4011  static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
4012  				   u16 len, const void *ptr, void *data)
4013  {
4014  	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4015  	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
4016  	enum wmi_host_hw_mode_config_type mode, pref;
4017  	u32 i;
4018  	int ret;
4019  
4020  	svc_rdy_ext->n_hw_mode_caps = 0;
4021  	svc_rdy_ext->hw_mode_caps = ptr;
4022  
4023  	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4024  				  ath12k_wmi_hw_mode_caps_parse,
4025  				  svc_rdy_ext);
4026  	if (ret) {
4027  		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4028  		return ret;
4029  	}
4030  
4031  	for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) {
4032  		hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
4033  		mode = le32_to_cpu(hw_mode_caps->hw_mode_id);
4034  
4035  		if (mode >= WMI_HOST_HW_MODE_MAX)
4036  			continue;
4037  
4038  		pref = soc->wmi_ab.preferred_hw_mode;
4039  
4040  		if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) {
4041  			svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
4042  			soc->wmi_ab.preferred_hw_mode = mode;
4043  		}
4044  	}
4045  
4046  	ath12k_dbg(soc, ATH12K_DBG_WMI, "preferred_hw_mode:%d\n",
4047  		   soc->wmi_ab.preferred_hw_mode);
4048  	if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
4049  		return -EINVAL;
4050  
4051  	return 0;
4052  }
4053  
ath12k_wmi_mac_phy_caps_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)4054  static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc,
4055  					 u16 tag, u16 len,
4056  					 const void *ptr, void *data)
4057  {
4058  	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4059  
4060  	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
4061  		return -EPROTO;
4062  
4063  	if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
4064  		return -ENOBUFS;
4065  
4066  	len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params));
4067  	if (!svc_rdy_ext->n_mac_phy_caps) {
4068  		svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
4069  						    GFP_ATOMIC);
4070  		if (!svc_rdy_ext->mac_phy_caps)
4071  			return -ENOMEM;
4072  	}
4073  
4074  	memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
4075  	svc_rdy_ext->n_mac_phy_caps++;
4076  	return 0;
4077  }
4078  
ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)4079  static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc,
4080  					     u16 tag, u16 len,
4081  					     const void *ptr, void *data)
4082  {
4083  	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4084  
4085  	if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
4086  		return -EPROTO;
4087  
4088  	if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy)
4089  		return -ENOBUFS;
4090  
4091  	svc_rdy_ext->n_ext_hal_reg_caps++;
4092  	return 0;
4093  }
4094  
ath12k_wmi_ext_hal_reg_caps(struct ath12k_base * soc,u16 len,const void * ptr,void * data)4095  static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
4096  				       u16 len, const void *ptr, void *data)
4097  {
4098  	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4099  	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4100  	struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap;
4101  	int ret;
4102  	u32 i;
4103  
4104  	svc_rdy_ext->n_ext_hal_reg_caps = 0;
4105  	svc_rdy_ext->ext_hal_reg_caps = ptr;
4106  	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4107  				  ath12k_wmi_ext_hal_reg_caps_parse,
4108  				  svc_rdy_ext);
4109  	if (ret) {
4110  		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4111  		return ret;
4112  	}
4113  
4114  	for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) {
4115  		ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle,
4116  						      svc_rdy_ext->soc_hal_reg_caps,
4117  						      svc_rdy_ext->ext_hal_reg_caps, i,
4118  						      &reg_cap);
4119  		if (ret) {
4120  			ath12k_warn(soc, "failed to extract reg cap %d\n", i);
4121  			return ret;
4122  		}
4123  
4124  		if (reg_cap.phy_id >= MAX_RADIOS) {
4125  			ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id);
4126  			return -EINVAL;
4127  		}
4128  
4129  		soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
4130  	}
4131  	return 0;
4132  }
4133  
ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base * soc,u16 len,const void * ptr,void * data)4134  static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc,
4135  						 u16 len, const void *ptr,
4136  						 void *data)
4137  {
4138  	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4139  	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4140  	u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id);
4141  	u32 phy_id_map;
4142  	int pdev_index = 0;
4143  	int ret;
4144  
4145  	svc_rdy_ext->soc_hal_reg_caps = ptr;
4146  	svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy);
4147  
4148  	soc->num_radios = 0;
4149  	phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map);
4150  	soc->fw_pdev_count = 0;
4151  
4152  	while (phy_id_map && soc->num_radios < MAX_RADIOS) {
4153  		ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
4154  							    svc_rdy_ext,
4155  							    hw_mode_id, soc->num_radios,
4156  							    &soc->pdevs[pdev_index]);
4157  		if (ret) {
4158  			ath12k_warn(soc, "failed to extract mac caps, idx :%d\n",
4159  				    soc->num_radios);
4160  			return ret;
4161  		}
4162  
4163  		soc->num_radios++;
4164  
4165  		/* For single_pdev_only targets,
4166  		 * save mac_phy capability in the same pdev
4167  		 */
4168  		if (soc->hw_params->single_pdev_only)
4169  			pdev_index = 0;
4170  		else
4171  			pdev_index = soc->num_radios;
4172  
4173  		/* TODO: mac_phy_cap prints */
4174  		phy_id_map >>= 1;
4175  	}
4176  
4177  	if (soc->hw_params->single_pdev_only) {
4178  		soc->num_radios = 1;
4179  		soc->pdevs[0].pdev_id = 0;
4180  	}
4181  
4182  	return 0;
4183  }
4184  
ath12k_wmi_dma_ring_caps_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)4185  static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc,
4186  					  u16 tag, u16 len,
4187  					  const void *ptr, void *data)
4188  {
4189  	struct ath12k_wmi_dma_ring_caps_parse *parse = data;
4190  
4191  	if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
4192  		return -EPROTO;
4193  
4194  	parse->n_dma_ring_caps++;
4195  	return 0;
4196  }
4197  
ath12k_wmi_alloc_dbring_caps(struct ath12k_base * ab,u32 num_cap)4198  static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab,
4199  					u32 num_cap)
4200  {
4201  	size_t sz;
4202  	void *ptr;
4203  
4204  	sz = num_cap * sizeof(struct ath12k_dbring_cap);
4205  	ptr = kzalloc(sz, GFP_ATOMIC);
4206  	if (!ptr)
4207  		return -ENOMEM;
4208  
4209  	ab->db_caps = ptr;
4210  	ab->num_db_cap = num_cap;
4211  
4212  	return 0;
4213  }
4214  
ath12k_wmi_free_dbring_caps(struct ath12k_base * ab)4215  static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
4216  {
4217  	kfree(ab->db_caps);
4218  	ab->db_caps = NULL;
4219  	ab->num_db_cap = 0;
4220  }
4221  
ath12k_wmi_dma_ring_caps(struct ath12k_base * ab,u16 len,const void * ptr,void * data)4222  static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
4223  				    u16 len, const void *ptr, void *data)
4224  {
4225  	struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data;
4226  	struct ath12k_wmi_dma_ring_caps_params *dma_caps;
4227  	struct ath12k_dbring_cap *dir_buff_caps;
4228  	int ret;
4229  	u32 i;
4230  
4231  	dma_caps_parse->n_dma_ring_caps = 0;
4232  	dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr;
4233  	ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4234  				  ath12k_wmi_dma_ring_caps_parse,
4235  				  dma_caps_parse);
4236  	if (ret) {
4237  		ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
4238  		return ret;
4239  	}
4240  
4241  	if (!dma_caps_parse->n_dma_ring_caps)
4242  		return 0;
4243  
4244  	if (ab->num_db_cap) {
4245  		ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n");
4246  		return 0;
4247  	}
4248  
4249  	ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
4250  	if (ret)
4251  		return ret;
4252  
4253  	dir_buff_caps = ab->db_caps;
4254  	for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
4255  		if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) {
4256  			ath12k_warn(ab, "Invalid module id %d\n",
4257  				    le32_to_cpu(dma_caps[i].module_id));
4258  			ret = -EINVAL;
4259  			goto free_dir_buff;
4260  		}
4261  
4262  		dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id);
4263  		dir_buff_caps[i].pdev_id =
4264  			DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id));
4265  		dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem);
4266  		dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz);
4267  		dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align);
4268  	}
4269  
4270  	return 0;
4271  
4272  free_dir_buff:
4273  	ath12k_wmi_free_dbring_caps(ab);
4274  	return ret;
4275  }
4276  
ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)4277  static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
4278  					u16 tag, u16 len,
4279  					const void *ptr, void *data)
4280  {
4281  	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
4282  	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4283  	int ret;
4284  
4285  	switch (tag) {
4286  	case WMI_TAG_SERVICE_READY_EXT_EVENT:
4287  		ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr,
4288  						&svc_rdy_ext->arg);
4289  		if (ret) {
4290  			ath12k_warn(ab, "unable to extract ext params\n");
4291  			return ret;
4292  		}
4293  		break;
4294  
4295  	case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
4296  		svc_rdy_ext->hw_caps = ptr;
4297  		svc_rdy_ext->arg.num_hw_modes =
4298  			le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes);
4299  		break;
4300  
4301  	case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
4302  		ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr,
4303  							    svc_rdy_ext);
4304  		if (ret)
4305  			return ret;
4306  		break;
4307  
4308  	case WMI_TAG_ARRAY_STRUCT:
4309  		if (!svc_rdy_ext->hw_mode_done) {
4310  			ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext);
4311  			if (ret)
4312  				return ret;
4313  
4314  			svc_rdy_ext->hw_mode_done = true;
4315  		} else if (!svc_rdy_ext->mac_phy_done) {
4316  			svc_rdy_ext->n_mac_phy_caps = 0;
4317  			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4318  						  ath12k_wmi_mac_phy_caps_parse,
4319  						  svc_rdy_ext);
4320  			if (ret) {
4321  				ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4322  				return ret;
4323  			}
4324  
4325  			svc_rdy_ext->mac_phy_done = true;
4326  		} else if (!svc_rdy_ext->ext_hal_reg_done) {
4327  			ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext);
4328  			if (ret)
4329  				return ret;
4330  
4331  			svc_rdy_ext->ext_hal_reg_done = true;
4332  		} else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
4333  			svc_rdy_ext->mac_phy_chainmask_combo_done = true;
4334  		} else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
4335  			svc_rdy_ext->mac_phy_chainmask_cap_done = true;
4336  		} else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
4337  			svc_rdy_ext->oem_dma_ring_cap_done = true;
4338  		} else if (!svc_rdy_ext->dma_ring_cap_done) {
4339  			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4340  						       &svc_rdy_ext->dma_caps_parse);
4341  			if (ret)
4342  				return ret;
4343  
4344  			svc_rdy_ext->dma_ring_cap_done = true;
4345  		}
4346  		break;
4347  
4348  	default:
4349  		break;
4350  	}
4351  	return 0;
4352  }
4353  
ath12k_service_ready_ext_event(struct ath12k_base * ab,struct sk_buff * skb)4354  static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
4355  					  struct sk_buff *skb)
4356  {
4357  	struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { };
4358  	int ret;
4359  
4360  	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4361  				  ath12k_wmi_svc_rdy_ext_parse,
4362  				  &svc_rdy_ext);
4363  	if (ret) {
4364  		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4365  		goto err;
4366  	}
4367  
4368  	if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
4369  		complete(&ab->wmi_ab.service_ready);
4370  
4371  	kfree(svc_rdy_ext.mac_phy_caps);
4372  	return 0;
4373  
4374  err:
4375  	ath12k_wmi_free_dbring_caps(ab);
4376  	return ret;
4377  }
4378  
ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev * wmi_handle,const void * ptr,struct ath12k_wmi_svc_rdy_ext2_arg * arg)4379  static int ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev *wmi_handle,
4380  				      const void *ptr,
4381  				      struct ath12k_wmi_svc_rdy_ext2_arg *arg)
4382  {
4383  	const struct wmi_service_ready_ext2_event *ev = ptr;
4384  
4385  	if (!ev)
4386  		return -EINVAL;
4387  
4388  	arg->reg_db_version = le32_to_cpu(ev->reg_db_version);
4389  	arg->hw_min_max_tx_power_2ghz = le32_to_cpu(ev->hw_min_max_tx_power_2ghz);
4390  	arg->hw_min_max_tx_power_5ghz = le32_to_cpu(ev->hw_min_max_tx_power_5ghz);
4391  	arg->chwidth_num_peer_caps = le32_to_cpu(ev->chwidth_num_peer_caps);
4392  	arg->preamble_puncture_bw = le32_to_cpu(ev->preamble_puncture_bw);
4393  	arg->max_user_per_ppdu_ofdma = le32_to_cpu(ev->max_user_per_ppdu_ofdma);
4394  	arg->max_user_per_ppdu_mumimo = le32_to_cpu(ev->max_user_per_ppdu_mumimo);
4395  	arg->target_cap_flags = le32_to_cpu(ev->target_cap_flags);
4396  	return 0;
4397  }
4398  
ath12k_wmi_eht_caps_parse(struct ath12k_pdev * pdev,u32 band,const __le32 cap_mac_info[],const __le32 cap_phy_info[],const __le32 supp_mcs[],const struct ath12k_wmi_ppe_threshold_params * ppet,__le32 cap_info_internal)4399  static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band,
4400  				      const __le32 cap_mac_info[],
4401  				      const __le32 cap_phy_info[],
4402  				      const __le32 supp_mcs[],
4403  				      const struct ath12k_wmi_ppe_threshold_params *ppet,
4404  				       __le32 cap_info_internal)
4405  {
4406  	struct ath12k_band_cap *cap_band = &pdev->cap.band[band];
4407  	u32 support_320mhz;
4408  	u8 i;
4409  
4410  	if (band == NL80211_BAND_6GHZ)
4411  		support_320mhz = cap_band->eht_cap_phy_info[0] &
4412  					IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4413  
4414  	for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++)
4415  		cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]);
4416  
4417  	for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++)
4418  		cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]);
4419  
4420  	if (band == NL80211_BAND_6GHZ)
4421  		cap_band->eht_cap_phy_info[0] |= support_320mhz;
4422  
4423  	cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]);
4424  	cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]);
4425  	if (band != NL80211_BAND_2GHZ) {
4426  		cap_band->eht_mcs_160 = le32_to_cpu(supp_mcs[2]);
4427  		cap_band->eht_mcs_320 = le32_to_cpu(supp_mcs[3]);
4428  	}
4429  
4430  	cap_band->eht_ppet.numss_m1 = le32_to_cpu(ppet->numss_m1);
4431  	cap_band->eht_ppet.ru_bit_mask = le32_to_cpu(ppet->ru_info);
4432  	for (i = 0; i < WMI_MAX_NUM_SS; i++)
4433  		cap_band->eht_ppet.ppet16_ppet8_ru3_ru0[i] =
4434  			le32_to_cpu(ppet->ppet16_ppet8_ru3_ru0[i]);
4435  
4436  	cap_band->eht_cap_info_internal = le32_to_cpu(cap_info_internal);
4437  }
4438  
4439  static int
ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base * ab,const struct ath12k_wmi_caps_ext_params * caps,struct ath12k_pdev * pdev)4440  ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
4441  				      const struct ath12k_wmi_caps_ext_params *caps,
4442  				      struct ath12k_pdev *pdev)
4443  {
4444  	struct ath12k_band_cap *cap_band;
4445  	u32 bands, support_320mhz;
4446  	int i;
4447  
4448  	if (ab->hw_params->single_pdev_only) {
4449  		if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) {
4450  			support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) &
4451  				IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
4452  			cap_band = &pdev->cap.band[NL80211_BAND_6GHZ];
4453  			cap_band->eht_cap_phy_info[0] |= support_320mhz;
4454  			return 0;
4455  		}
4456  
4457  		for (i = 0; i < ab->fw_pdev_count; i++) {
4458  			struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
4459  
4460  			if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) &&
4461  			    fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) {
4462  				bands = fw_pdev->supported_bands;
4463  				break;
4464  			}
4465  		}
4466  
4467  		if (i == ab->fw_pdev_count)
4468  			return -EINVAL;
4469  	} else {
4470  		bands = pdev->cap.supported_bands;
4471  	}
4472  
4473  	if (bands & WMI_HOST_WLAN_2G_CAP) {
4474  		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ,
4475  					  caps->eht_cap_mac_info_2ghz,
4476  					  caps->eht_cap_phy_info_2ghz,
4477  					  caps->eht_supp_mcs_ext_2ghz,
4478  					  &caps->eht_ppet_2ghz,
4479  					  caps->eht_cap_info_internal);
4480  	}
4481  
4482  	if (bands & WMI_HOST_WLAN_5G_CAP) {
4483  		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ,
4484  					  caps->eht_cap_mac_info_5ghz,
4485  					  caps->eht_cap_phy_info_5ghz,
4486  					  caps->eht_supp_mcs_ext_5ghz,
4487  					  &caps->eht_ppet_5ghz,
4488  					  caps->eht_cap_info_internal);
4489  
4490  		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_6GHZ,
4491  					  caps->eht_cap_mac_info_5ghz,
4492  					  caps->eht_cap_phy_info_5ghz,
4493  					  caps->eht_supp_mcs_ext_5ghz,
4494  					  &caps->eht_ppet_5ghz,
4495  					  caps->eht_cap_info_internal);
4496  	}
4497  
4498  	return 0;
4499  }
4500  
ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)4501  static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
4502  					   u16 len, const void *ptr,
4503  					   void *data)
4504  {
4505  	const struct ath12k_wmi_caps_ext_params *caps = ptr;
4506  	int i = 0, ret;
4507  
4508  	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT)
4509  		return -EPROTO;
4510  
4511  	if (ab->hw_params->single_pdev_only) {
4512  		if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id) &&
4513  		    caps->hw_mode_id != WMI_HOST_HW_MODE_SINGLE)
4514  			return 0;
4515  	} else {
4516  		for (i = 0; i < ab->num_radios; i++) {
4517  			if (ab->pdevs[i].pdev_id ==
4518  			    ath12k_wmi_caps_ext_get_pdev_id(caps))
4519  				break;
4520  		}
4521  
4522  		if (i == ab->num_radios)
4523  			return -EINVAL;
4524  	}
4525  
4526  	ret = ath12k_wmi_tlv_mac_phy_caps_ext_parse(ab, caps, &ab->pdevs[i]);
4527  	if (ret) {
4528  		ath12k_warn(ab,
4529  			    "failed to parse extended MAC PHY capabilities for pdev %d: %d\n",
4530  			    ret, ab->pdevs[i].pdev_id);
4531  		return ret;
4532  	}
4533  
4534  	return 0;
4535  }
4536  
ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)4537  static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
4538  					 u16 tag, u16 len,
4539  					 const void *ptr, void *data)
4540  {
4541  	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
4542  	struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
4543  	int ret;
4544  
4545  	switch (tag) {
4546  	case WMI_TAG_SERVICE_READY_EXT2_EVENT:
4547  		ret = ath12k_pull_svc_ready_ext2(wmi_handle, ptr,
4548  						 &parse->arg);
4549  		if (ret) {
4550  			ath12k_warn(ab,
4551  				    "failed to extract wmi service ready ext2 parameters: %d\n",
4552  				    ret);
4553  			return ret;
4554  		}
4555  		break;
4556  
4557  	case WMI_TAG_ARRAY_STRUCT:
4558  		if (!parse->dma_ring_cap_done) {
4559  			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4560  						       &parse->dma_caps_parse);
4561  			if (ret)
4562  				return ret;
4563  
4564  			parse->dma_ring_cap_done = true;
4565  		} else if (!parse->spectral_bin_scaling_done) {
4566  			/* TODO: This is a place-holder as WMI tag for
4567  			 * spectral scaling is before
4568  			 * WMI_TAG_MAC_PHY_CAPABILITIES_EXT
4569  			 */
4570  			parse->spectral_bin_scaling_done = true;
4571  		} else if (!parse->mac_phy_caps_ext_done) {
4572  			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4573  						  ath12k_wmi_tlv_mac_phy_caps_ext,
4574  						  parse);
4575  			if (ret) {
4576  				ath12k_warn(ab, "failed to parse extended MAC PHY capabilities WMI TLV: %d\n",
4577  					    ret);
4578  				return ret;
4579  			}
4580  
4581  			parse->mac_phy_caps_ext_done = true;
4582  		}
4583  		break;
4584  	default:
4585  		break;
4586  	}
4587  
4588  	return 0;
4589  }
4590  
ath12k_service_ready_ext2_event(struct ath12k_base * ab,struct sk_buff * skb)4591  static int ath12k_service_ready_ext2_event(struct ath12k_base *ab,
4592  					   struct sk_buff *skb)
4593  {
4594  	struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { };
4595  	int ret;
4596  
4597  	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4598  				  ath12k_wmi_svc_rdy_ext2_parse,
4599  				  &svc_rdy_ext2);
4600  	if (ret) {
4601  		ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
4602  		goto err;
4603  	}
4604  
4605  	complete(&ab->wmi_ab.service_ready);
4606  
4607  	return 0;
4608  
4609  err:
4610  	ath12k_wmi_free_dbring_caps(ab);
4611  	return ret;
4612  }
4613  
ath12k_pull_vdev_start_resp_tlv(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_vdev_start_resp_event * vdev_rsp)4614  static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb,
4615  					   struct wmi_vdev_start_resp_event *vdev_rsp)
4616  {
4617  	const void **tb;
4618  	const struct wmi_vdev_start_resp_event *ev;
4619  	int ret;
4620  
4621  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
4622  	if (IS_ERR(tb)) {
4623  		ret = PTR_ERR(tb);
4624  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4625  		return ret;
4626  	}
4627  
4628  	ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
4629  	if (!ev) {
4630  		ath12k_warn(ab, "failed to fetch vdev start resp ev");
4631  		kfree(tb);
4632  		return -EPROTO;
4633  	}
4634  
4635  	*vdev_rsp = *ev;
4636  
4637  	kfree(tb);
4638  	return 0;
4639  }
4640  
4641  static struct ath12k_reg_rule
create_ext_reg_rules_from_wmi(u32 num_reg_rules,struct ath12k_wmi_reg_rule_ext_params * wmi_reg_rule)4642  *create_ext_reg_rules_from_wmi(u32 num_reg_rules,
4643  			       struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule)
4644  {
4645  	struct ath12k_reg_rule *reg_rule_ptr;
4646  	u32 count;
4647  
4648  	reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
4649  			       GFP_ATOMIC);
4650  
4651  	if (!reg_rule_ptr)
4652  		return NULL;
4653  
4654  	for (count = 0; count < num_reg_rules; count++) {
4655  		reg_rule_ptr[count].start_freq =
4656  			le32_get_bits(wmi_reg_rule[count].freq_info,
4657  				      REG_RULE_START_FREQ);
4658  		reg_rule_ptr[count].end_freq =
4659  			le32_get_bits(wmi_reg_rule[count].freq_info,
4660  				      REG_RULE_END_FREQ);
4661  		reg_rule_ptr[count].max_bw =
4662  			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4663  				      REG_RULE_MAX_BW);
4664  		reg_rule_ptr[count].reg_power =
4665  			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4666  				      REG_RULE_REG_PWR);
4667  		reg_rule_ptr[count].ant_gain =
4668  			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4669  				      REG_RULE_ANT_GAIN);
4670  		reg_rule_ptr[count].flags =
4671  			le32_get_bits(wmi_reg_rule[count].flag_info,
4672  				      REG_RULE_FLAGS);
4673  		reg_rule_ptr[count].psd_flag =
4674  			le32_get_bits(wmi_reg_rule[count].psd_power_info,
4675  				      REG_RULE_PSD_INFO);
4676  		reg_rule_ptr[count].psd_eirp =
4677  			le32_get_bits(wmi_reg_rule[count].psd_power_info,
4678  				      REG_RULE_PSD_EIRP);
4679  	}
4680  
4681  	return reg_rule_ptr;
4682  }
4683  
ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base * ab,struct sk_buff * skb,struct ath12k_reg_info * reg_info)4684  static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
4685  						   struct sk_buff *skb,
4686  						   struct ath12k_reg_info *reg_info)
4687  {
4688  	const void **tb;
4689  	const struct wmi_reg_chan_list_cc_ext_event *ev;
4690  	struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule;
4691  	u32 num_2g_reg_rules, num_5g_reg_rules;
4692  	u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
4693  	u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
4694  	u32 total_reg_rules = 0;
4695  	int ret, i, j;
4696  
4697  	ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
4698  
4699  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
4700  	if (IS_ERR(tb)) {
4701  		ret = PTR_ERR(tb);
4702  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4703  		return ret;
4704  	}
4705  
4706  	ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT];
4707  	if (!ev) {
4708  		ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n");
4709  		kfree(tb);
4710  		return -EPROTO;
4711  	}
4712  
4713  	reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules);
4714  	reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules);
4715  	reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] =
4716  		le32_to_cpu(ev->num_6g_reg_rules_ap_lpi);
4717  	reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] =
4718  		le32_to_cpu(ev->num_6g_reg_rules_ap_sp);
4719  	reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] =
4720  		le32_to_cpu(ev->num_6g_reg_rules_ap_vlp);
4721  
4722  	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4723  		reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
4724  			le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]);
4725  		reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
4726  			le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]);
4727  		reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
4728  			le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]);
4729  	}
4730  
4731  	num_2g_reg_rules = reg_info->num_2g_reg_rules;
4732  	total_reg_rules += num_2g_reg_rules;
4733  	num_5g_reg_rules = reg_info->num_5g_reg_rules;
4734  	total_reg_rules += num_5g_reg_rules;
4735  
4736  	if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) {
4737  		ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n",
4738  			    num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES);
4739  		kfree(tb);
4740  		return -EINVAL;
4741  	}
4742  
4743  	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
4744  		num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i];
4745  
4746  		if (num_6g_reg_rules_ap[i] > MAX_6G_REG_RULES) {
4747  			ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n",
4748  				    i, num_6g_reg_rules_ap[i], MAX_6G_REG_RULES);
4749  			kfree(tb);
4750  			return -EINVAL;
4751  		}
4752  
4753  		total_reg_rules += num_6g_reg_rules_ap[i];
4754  	}
4755  
4756  	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4757  		num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
4758  				reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
4759  		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
4760  
4761  		num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
4762  				reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
4763  		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
4764  
4765  		num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
4766  				reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
4767  		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
4768  
4769  		if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6G_REG_RULES ||
4770  		    num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6G_REG_RULES ||
4771  		    num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] >  MAX_6G_REG_RULES) {
4772  			ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n",
4773  				    i);
4774  			kfree(tb);
4775  			return -EINVAL;
4776  		}
4777  	}
4778  
4779  	if (!total_reg_rules) {
4780  		ath12k_warn(ab, "No reg rules available\n");
4781  		kfree(tb);
4782  		return -EINVAL;
4783  	}
4784  
4785  	memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
4786  
4787  	/* FIXME: Currently FW includes 6G reg rule also in 5G rule
4788  	 * list for country US.
4789  	 * Having same 6G reg rule in 5G and 6G rules list causes
4790  	 * intersect check to be true, and same rules will be shown
4791  	 * multiple times in iw cmd. So added hack below to avoid
4792  	 * parsing 6G rule from 5G reg rule list, and this can be
4793  	 * removed later, after FW updates to remove 6G reg rule
4794  	 * from 5G rules list.
4795  	 */
4796  	if (memcmp(reg_info->alpha2, "US", 2) == 0) {
4797  		reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES;
4798  		num_5g_reg_rules = reg_info->num_5g_reg_rules;
4799  	}
4800  
4801  	reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
4802  	reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
4803  	reg_info->num_phy = le32_to_cpu(ev->num_phy);
4804  	reg_info->phy_id = le32_to_cpu(ev->phy_id);
4805  	reg_info->ctry_code = le32_to_cpu(ev->country_id);
4806  	reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code);
4807  
4808  	switch (le32_to_cpu(ev->status_code)) {
4809  	case WMI_REG_SET_CC_STATUS_PASS:
4810  		reg_info->status_code = REG_SET_CC_STATUS_PASS;
4811  		break;
4812  	case WMI_REG_CURRENT_ALPHA2_NOT_FOUND:
4813  		reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
4814  		break;
4815  	case WMI_REG_INIT_ALPHA2_NOT_FOUND:
4816  		reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
4817  		break;
4818  	case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED:
4819  		reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
4820  		break;
4821  	case WMI_REG_SET_CC_STATUS_NO_MEMORY:
4822  		reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
4823  		break;
4824  	case WMI_REG_SET_CC_STATUS_FAIL:
4825  		reg_info->status_code = REG_SET_CC_STATUS_FAIL;
4826  		break;
4827  	}
4828  
4829  	reg_info->is_ext_reg_event = true;
4830  
4831  	reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g);
4832  	reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g);
4833  	reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g);
4834  	reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g);
4835  	reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi);
4836  	reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi);
4837  	reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp);
4838  	reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp);
4839  	reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp);
4840  	reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp);
4841  
4842  	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4843  		reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] =
4844  			le32_to_cpu(ev->min_bw_6g_client_lpi[i]);
4845  		reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] =
4846  			le32_to_cpu(ev->max_bw_6g_client_lpi[i]);
4847  		reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
4848  			le32_to_cpu(ev->min_bw_6g_client_sp[i]);
4849  		reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
4850  			le32_to_cpu(ev->max_bw_6g_client_sp[i]);
4851  		reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] =
4852  			le32_to_cpu(ev->min_bw_6g_client_vlp[i]);
4853  		reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] =
4854  			le32_to_cpu(ev->max_bw_6g_client_vlp[i]);
4855  	}
4856  
4857  	ath12k_dbg(ab, ATH12K_DBG_WMI,
4858  		   "%s:cc_ext %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap 0x%x",
4859  		   __func__, reg_info->alpha2, reg_info->dfs_region,
4860  		   reg_info->min_bw_2g, reg_info->max_bw_2g,
4861  		   reg_info->min_bw_5g, reg_info->max_bw_5g,
4862  		   reg_info->phybitmap);
4863  
4864  	ath12k_dbg(ab, ATH12K_DBG_WMI,
4865  		   "num_2g_reg_rules %d num_5g_reg_rules %d",
4866  		   num_2g_reg_rules, num_5g_reg_rules);
4867  
4868  	ath12k_dbg(ab, ATH12K_DBG_WMI,
4869  		   "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d",
4870  		   num_6g_reg_rules_ap[WMI_REG_INDOOR_AP],
4871  		   num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP],
4872  		   num_6g_reg_rules_ap[WMI_REG_VLP_AP]);
4873  
4874  	ath12k_dbg(ab, ATH12K_DBG_WMI,
4875  		   "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
4876  		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT],
4877  		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT],
4878  		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]);
4879  
4880  	ath12k_dbg(ab, ATH12K_DBG_WMI,
4881  		   "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
4882  		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT],
4883  		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT],
4884  		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]);
4885  
4886  	ext_wmi_reg_rule =
4887  		(struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev
4888  			+ sizeof(*ev)
4889  			+ sizeof(struct wmi_tlv));
4890  
4891  	if (num_2g_reg_rules) {
4892  		reg_info->reg_rules_2g_ptr =
4893  			create_ext_reg_rules_from_wmi(num_2g_reg_rules,
4894  						      ext_wmi_reg_rule);
4895  
4896  		if (!reg_info->reg_rules_2g_ptr) {
4897  			kfree(tb);
4898  			ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n");
4899  			return -ENOMEM;
4900  		}
4901  	}
4902  
4903  	if (num_5g_reg_rules) {
4904  		ext_wmi_reg_rule += num_2g_reg_rules;
4905  		reg_info->reg_rules_5g_ptr =
4906  			create_ext_reg_rules_from_wmi(num_5g_reg_rules,
4907  						      ext_wmi_reg_rule);
4908  
4909  		if (!reg_info->reg_rules_5g_ptr) {
4910  			kfree(tb);
4911  			ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n");
4912  			return -ENOMEM;
4913  		}
4914  	}
4915  
4916  	ext_wmi_reg_rule += num_5g_reg_rules;
4917  
4918  	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
4919  		reg_info->reg_rules_6g_ap_ptr[i] =
4920  			create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i],
4921  						      ext_wmi_reg_rule);
4922  
4923  		if (!reg_info->reg_rules_6g_ap_ptr[i]) {
4924  			kfree(tb);
4925  			ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n");
4926  			return -ENOMEM;
4927  		}
4928  
4929  		ext_wmi_reg_rule += num_6g_reg_rules_ap[i];
4930  	}
4931  
4932  	for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
4933  		for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4934  			reg_info->reg_rules_6g_client_ptr[j][i] =
4935  				create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i],
4936  							      ext_wmi_reg_rule);
4937  
4938  			if (!reg_info->reg_rules_6g_client_ptr[j][i]) {
4939  				kfree(tb);
4940  				ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n");
4941  				return -ENOMEM;
4942  			}
4943  
4944  			ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i];
4945  		}
4946  	}
4947  
4948  	reg_info->client_type = le32_to_cpu(ev->client_type);
4949  	reg_info->rnr_tpe_usable = ev->rnr_tpe_usable;
4950  	reg_info->unspecified_ap_usable = ev->unspecified_ap_usable;
4951  	reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] =
4952  		le32_to_cpu(ev->domain_code_6g_ap_lpi);
4953  	reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] =
4954  		le32_to_cpu(ev->domain_code_6g_ap_sp);
4955  	reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] =
4956  		le32_to_cpu(ev->domain_code_6g_ap_vlp);
4957  
4958  	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4959  		reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] =
4960  			le32_to_cpu(ev->domain_code_6g_client_lpi[i]);
4961  		reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] =
4962  			le32_to_cpu(ev->domain_code_6g_client_sp[i]);
4963  		reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] =
4964  			le32_to_cpu(ev->domain_code_6g_client_vlp[i]);
4965  	}
4966  
4967  	reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id);
4968  
4969  	ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d",
4970  		   reg_info->client_type, reg_info->domain_code_6g_super_id);
4971  
4972  	ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n");
4973  
4974  	kfree(tb);
4975  	return 0;
4976  }
4977  
ath12k_pull_peer_del_resp_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_peer_delete_resp_event * peer_del_resp)4978  static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb,
4979  					struct wmi_peer_delete_resp_event *peer_del_resp)
4980  {
4981  	const void **tb;
4982  	const struct wmi_peer_delete_resp_event *ev;
4983  	int ret;
4984  
4985  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
4986  	if (IS_ERR(tb)) {
4987  		ret = PTR_ERR(tb);
4988  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4989  		return ret;
4990  	}
4991  
4992  	ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
4993  	if (!ev) {
4994  		ath12k_warn(ab, "failed to fetch peer delete resp ev");
4995  		kfree(tb);
4996  		return -EPROTO;
4997  	}
4998  
4999  	memset(peer_del_resp, 0, sizeof(*peer_del_resp));
5000  
5001  	peer_del_resp->vdev_id = ev->vdev_id;
5002  	ether_addr_copy(peer_del_resp->peer_macaddr.addr,
5003  			ev->peer_macaddr.addr);
5004  
5005  	kfree(tb);
5006  	return 0;
5007  }
5008  
ath12k_pull_vdev_del_resp_ev(struct ath12k_base * ab,struct sk_buff * skb,u32 * vdev_id)5009  static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
5010  					struct sk_buff *skb,
5011  					u32 *vdev_id)
5012  {
5013  	const void **tb;
5014  	const struct wmi_vdev_delete_resp_event *ev;
5015  	int ret;
5016  
5017  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5018  	if (IS_ERR(tb)) {
5019  		ret = PTR_ERR(tb);
5020  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5021  		return ret;
5022  	}
5023  
5024  	ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
5025  	if (!ev) {
5026  		ath12k_warn(ab, "failed to fetch vdev delete resp ev");
5027  		kfree(tb);
5028  		return -EPROTO;
5029  	}
5030  
5031  	*vdev_id = le32_to_cpu(ev->vdev_id);
5032  
5033  	kfree(tb);
5034  	return 0;
5035  }
5036  
ath12k_pull_bcn_tx_status_ev(struct ath12k_base * ab,struct sk_buff * skb,u32 * vdev_id,u32 * tx_status)5037  static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab,
5038  					struct sk_buff *skb,
5039  					u32 *vdev_id, u32 *tx_status)
5040  {
5041  	const void **tb;
5042  	const struct wmi_bcn_tx_status_event *ev;
5043  	int ret;
5044  
5045  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5046  	if (IS_ERR(tb)) {
5047  		ret = PTR_ERR(tb);
5048  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5049  		return ret;
5050  	}
5051  
5052  	ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
5053  	if (!ev) {
5054  		ath12k_warn(ab, "failed to fetch bcn tx status ev");
5055  		kfree(tb);
5056  		return -EPROTO;
5057  	}
5058  
5059  	*vdev_id = le32_to_cpu(ev->vdev_id);
5060  	*tx_status = le32_to_cpu(ev->tx_status);
5061  
5062  	kfree(tb);
5063  	return 0;
5064  }
5065  
ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base * ab,struct sk_buff * skb,u32 * vdev_id)5066  static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb,
5067  					      u32 *vdev_id)
5068  {
5069  	const void **tb;
5070  	const struct wmi_vdev_stopped_event *ev;
5071  	int ret;
5072  
5073  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5074  	if (IS_ERR(tb)) {
5075  		ret = PTR_ERR(tb);
5076  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5077  		return ret;
5078  	}
5079  
5080  	ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
5081  	if (!ev) {
5082  		ath12k_warn(ab, "failed to fetch vdev stop ev");
5083  		kfree(tb);
5084  		return -EPROTO;
5085  	}
5086  
5087  	*vdev_id = le32_to_cpu(ev->vdev_id);
5088  
5089  	kfree(tb);
5090  	return 0;
5091  }
5092  
ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)5093  static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab,
5094  					u16 tag, u16 len,
5095  					const void *ptr, void *data)
5096  {
5097  	struct wmi_tlv_mgmt_rx_parse *parse = data;
5098  
5099  	switch (tag) {
5100  	case WMI_TAG_MGMT_RX_HDR:
5101  		parse->fixed = ptr;
5102  		break;
5103  	case WMI_TAG_ARRAY_BYTE:
5104  		if (!parse->frame_buf_done) {
5105  			parse->frame_buf = ptr;
5106  			parse->frame_buf_done = true;
5107  		}
5108  		break;
5109  	}
5110  	return 0;
5111  }
5112  
ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base * ab,struct sk_buff * skb,struct ath12k_wmi_mgmt_rx_arg * hdr)5113  static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab,
5114  					  struct sk_buff *skb,
5115  					  struct ath12k_wmi_mgmt_rx_arg *hdr)
5116  {
5117  	struct wmi_tlv_mgmt_rx_parse parse = { };
5118  	const struct ath12k_wmi_mgmt_rx_params *ev;
5119  	const u8 *frame;
5120  	int i, ret;
5121  
5122  	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
5123  				  ath12k_wmi_tlv_mgmt_rx_parse,
5124  				  &parse);
5125  	if (ret) {
5126  		ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret);
5127  		return ret;
5128  	}
5129  
5130  	ev = parse.fixed;
5131  	frame = parse.frame_buf;
5132  
5133  	if (!ev || !frame) {
5134  		ath12k_warn(ab, "failed to fetch mgmt rx hdr");
5135  		return -EPROTO;
5136  	}
5137  
5138  	hdr->pdev_id = le32_to_cpu(ev->pdev_id);
5139  	hdr->chan_freq = le32_to_cpu(ev->chan_freq);
5140  	hdr->channel = le32_to_cpu(ev->channel);
5141  	hdr->snr = le32_to_cpu(ev->snr);
5142  	hdr->rate = le32_to_cpu(ev->rate);
5143  	hdr->phy_mode = le32_to_cpu(ev->phy_mode);
5144  	hdr->buf_len = le32_to_cpu(ev->buf_len);
5145  	hdr->status = le32_to_cpu(ev->status);
5146  	hdr->flags = le32_to_cpu(ev->flags);
5147  	hdr->rssi = a_sle32_to_cpu(ev->rssi);
5148  	hdr->tsf_delta = le32_to_cpu(ev->tsf_delta);
5149  
5150  	for (i = 0; i < ATH_MAX_ANTENNA; i++)
5151  		hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]);
5152  
5153  	if (skb->len < (frame - skb->data) + hdr->buf_len) {
5154  		ath12k_warn(ab, "invalid length in mgmt rx hdr ev");
5155  		return -EPROTO;
5156  	}
5157  
5158  	/* shift the sk_buff to point to `frame` */
5159  	skb_trim(skb, 0);
5160  	skb_put(skb, frame - skb->data);
5161  	skb_pull(skb, frame - skb->data);
5162  	skb_put(skb, hdr->buf_len);
5163  
5164  	return 0;
5165  }
5166  
wmi_process_mgmt_tx_comp(struct ath12k * ar,u32 desc_id,u32 status)5167  static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
5168  				    u32 status)
5169  {
5170  	struct sk_buff *msdu;
5171  	struct ieee80211_tx_info *info;
5172  	struct ath12k_skb_cb *skb_cb;
5173  	int num_mgmt;
5174  
5175  	spin_lock_bh(&ar->txmgmt_idr_lock);
5176  	msdu = idr_find(&ar->txmgmt_idr, desc_id);
5177  
5178  	if (!msdu) {
5179  		ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
5180  			    desc_id);
5181  		spin_unlock_bh(&ar->txmgmt_idr_lock);
5182  		return -ENOENT;
5183  	}
5184  
5185  	idr_remove(&ar->txmgmt_idr, desc_id);
5186  	spin_unlock_bh(&ar->txmgmt_idr_lock);
5187  
5188  	skb_cb = ATH12K_SKB_CB(msdu);
5189  	dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
5190  
5191  	info = IEEE80211_SKB_CB(msdu);
5192  	if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
5193  		info->flags |= IEEE80211_TX_STAT_ACK;
5194  
5195  	ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
5196  
5197  	num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
5198  
5199  	/* WARN when we received this event without doing any mgmt tx */
5200  	if (num_mgmt < 0)
5201  		WARN_ON_ONCE(1);
5202  
5203  	if (!num_mgmt)
5204  		wake_up(&ar->txmgmt_empty_waitq);
5205  
5206  	return 0;
5207  }
5208  
ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_mgmt_tx_compl_event * param)5209  static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
5210  					       struct sk_buff *skb,
5211  					       struct wmi_mgmt_tx_compl_event *param)
5212  {
5213  	const void **tb;
5214  	const struct wmi_mgmt_tx_compl_event *ev;
5215  	int ret;
5216  
5217  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5218  	if (IS_ERR(tb)) {
5219  		ret = PTR_ERR(tb);
5220  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5221  		return ret;
5222  	}
5223  
5224  	ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
5225  	if (!ev) {
5226  		ath12k_warn(ab, "failed to fetch mgmt tx compl ev");
5227  		kfree(tb);
5228  		return -EPROTO;
5229  	}
5230  
5231  	param->pdev_id = ev->pdev_id;
5232  	param->desc_id = ev->desc_id;
5233  	param->status = ev->status;
5234  
5235  	kfree(tb);
5236  	return 0;
5237  }
5238  
ath12k_wmi_event_scan_started(struct ath12k * ar)5239  static void ath12k_wmi_event_scan_started(struct ath12k *ar)
5240  {
5241  	lockdep_assert_held(&ar->data_lock);
5242  
5243  	switch (ar->scan.state) {
5244  	case ATH12K_SCAN_IDLE:
5245  	case ATH12K_SCAN_RUNNING:
5246  	case ATH12K_SCAN_ABORTING:
5247  		ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
5248  			    ath12k_scan_state_str(ar->scan.state),
5249  			    ar->scan.state);
5250  		break;
5251  	case ATH12K_SCAN_STARTING:
5252  		ar->scan.state = ATH12K_SCAN_RUNNING;
5253  
5254  		if (ar->scan.is_roc)
5255  			ieee80211_ready_on_channel(ath12k_ar_to_hw(ar));
5256  
5257  		complete(&ar->scan.started);
5258  		break;
5259  	}
5260  }
5261  
ath12k_wmi_event_scan_start_failed(struct ath12k * ar)5262  static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar)
5263  {
5264  	lockdep_assert_held(&ar->data_lock);
5265  
5266  	switch (ar->scan.state) {
5267  	case ATH12K_SCAN_IDLE:
5268  	case ATH12K_SCAN_RUNNING:
5269  	case ATH12K_SCAN_ABORTING:
5270  		ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
5271  			    ath12k_scan_state_str(ar->scan.state),
5272  			    ar->scan.state);
5273  		break;
5274  	case ATH12K_SCAN_STARTING:
5275  		complete(&ar->scan.started);
5276  		__ath12k_mac_scan_finish(ar);
5277  		break;
5278  	}
5279  }
5280  
ath12k_wmi_event_scan_completed(struct ath12k * ar)5281  static void ath12k_wmi_event_scan_completed(struct ath12k *ar)
5282  {
5283  	lockdep_assert_held(&ar->data_lock);
5284  
5285  	switch (ar->scan.state) {
5286  	case ATH12K_SCAN_IDLE:
5287  	case ATH12K_SCAN_STARTING:
5288  		/* One suspected reason scan can be completed while starting is
5289  		 * if firmware fails to deliver all scan events to the host,
5290  		 * e.g. when transport pipe is full. This has been observed
5291  		 * with spectral scan phyerr events starving wmi transport
5292  		 * pipe. In such case the "scan completed" event should be (and
5293  		 * is) ignored by the host as it may be just firmware's scan
5294  		 * state machine recovering.
5295  		 */
5296  		ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
5297  			    ath12k_scan_state_str(ar->scan.state),
5298  			    ar->scan.state);
5299  		break;
5300  	case ATH12K_SCAN_RUNNING:
5301  	case ATH12K_SCAN_ABORTING:
5302  		__ath12k_mac_scan_finish(ar);
5303  		break;
5304  	}
5305  }
5306  
ath12k_wmi_event_scan_bss_chan(struct ath12k * ar)5307  static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
5308  {
5309  	lockdep_assert_held(&ar->data_lock);
5310  
5311  	switch (ar->scan.state) {
5312  	case ATH12K_SCAN_IDLE:
5313  	case ATH12K_SCAN_STARTING:
5314  		ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
5315  			    ath12k_scan_state_str(ar->scan.state),
5316  			    ar->scan.state);
5317  		break;
5318  	case ATH12K_SCAN_RUNNING:
5319  	case ATH12K_SCAN_ABORTING:
5320  		ar->scan_channel = NULL;
5321  		break;
5322  	}
5323  }
5324  
ath12k_wmi_event_scan_foreign_chan(struct ath12k * ar,u32 freq)5325  static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
5326  {
5327  	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
5328  
5329  	lockdep_assert_held(&ar->data_lock);
5330  
5331  	switch (ar->scan.state) {
5332  	case ATH12K_SCAN_IDLE:
5333  	case ATH12K_SCAN_STARTING:
5334  		ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
5335  			    ath12k_scan_state_str(ar->scan.state),
5336  			    ar->scan.state);
5337  		break;
5338  	case ATH12K_SCAN_RUNNING:
5339  	case ATH12K_SCAN_ABORTING:
5340  		ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq);
5341  
5342  		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
5343  			complete(&ar->scan.on_channel);
5344  
5345  		break;
5346  	}
5347  }
5348  
5349  static const char *
ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,enum wmi_scan_completion_reason reason)5350  ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
5351  			       enum wmi_scan_completion_reason reason)
5352  {
5353  	switch (type) {
5354  	case WMI_SCAN_EVENT_STARTED:
5355  		return "started";
5356  	case WMI_SCAN_EVENT_COMPLETED:
5357  		switch (reason) {
5358  		case WMI_SCAN_REASON_COMPLETED:
5359  			return "completed";
5360  		case WMI_SCAN_REASON_CANCELLED:
5361  			return "completed [cancelled]";
5362  		case WMI_SCAN_REASON_PREEMPTED:
5363  			return "completed [preempted]";
5364  		case WMI_SCAN_REASON_TIMEDOUT:
5365  			return "completed [timedout]";
5366  		case WMI_SCAN_REASON_INTERNAL_FAILURE:
5367  			return "completed [internal err]";
5368  		case WMI_SCAN_REASON_MAX:
5369  			break;
5370  		}
5371  		return "completed [unknown]";
5372  	case WMI_SCAN_EVENT_BSS_CHANNEL:
5373  		return "bss channel";
5374  	case WMI_SCAN_EVENT_FOREIGN_CHAN:
5375  		return "foreign channel";
5376  	case WMI_SCAN_EVENT_DEQUEUED:
5377  		return "dequeued";
5378  	case WMI_SCAN_EVENT_PREEMPTED:
5379  		return "preempted";
5380  	case WMI_SCAN_EVENT_START_FAILED:
5381  		return "start failed";
5382  	case WMI_SCAN_EVENT_RESTARTED:
5383  		return "restarted";
5384  	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
5385  		return "foreign channel exit";
5386  	default:
5387  		return "unknown";
5388  	}
5389  }
5390  
ath12k_pull_scan_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_scan_event * scan_evt_param)5391  static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
5392  			       struct wmi_scan_event *scan_evt_param)
5393  {
5394  	const void **tb;
5395  	const struct wmi_scan_event *ev;
5396  	int ret;
5397  
5398  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5399  	if (IS_ERR(tb)) {
5400  		ret = PTR_ERR(tb);
5401  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5402  		return ret;
5403  	}
5404  
5405  	ev = tb[WMI_TAG_SCAN_EVENT];
5406  	if (!ev) {
5407  		ath12k_warn(ab, "failed to fetch scan ev");
5408  		kfree(tb);
5409  		return -EPROTO;
5410  	}
5411  
5412  	scan_evt_param->event_type = ev->event_type;
5413  	scan_evt_param->reason = ev->reason;
5414  	scan_evt_param->channel_freq = ev->channel_freq;
5415  	scan_evt_param->scan_req_id = ev->scan_req_id;
5416  	scan_evt_param->scan_id = ev->scan_id;
5417  	scan_evt_param->vdev_id = ev->vdev_id;
5418  	scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
5419  
5420  	kfree(tb);
5421  	return 0;
5422  }
5423  
ath12k_pull_peer_sta_kickout_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_peer_sta_kickout_arg * arg)5424  static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb,
5425  					   struct wmi_peer_sta_kickout_arg *arg)
5426  {
5427  	const void **tb;
5428  	const struct wmi_peer_sta_kickout_event *ev;
5429  	int ret;
5430  
5431  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5432  	if (IS_ERR(tb)) {
5433  		ret = PTR_ERR(tb);
5434  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5435  		return ret;
5436  	}
5437  
5438  	ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
5439  	if (!ev) {
5440  		ath12k_warn(ab, "failed to fetch peer sta kickout ev");
5441  		kfree(tb);
5442  		return -EPROTO;
5443  	}
5444  
5445  	arg->mac_addr = ev->peer_macaddr.addr;
5446  
5447  	kfree(tb);
5448  	return 0;
5449  }
5450  
ath12k_pull_roam_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_roam_event * roam_ev)5451  static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
5452  			       struct wmi_roam_event *roam_ev)
5453  {
5454  	const void **tb;
5455  	const struct wmi_roam_event *ev;
5456  	int ret;
5457  
5458  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5459  	if (IS_ERR(tb)) {
5460  		ret = PTR_ERR(tb);
5461  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5462  		return ret;
5463  	}
5464  
5465  	ev = tb[WMI_TAG_ROAM_EVENT];
5466  	if (!ev) {
5467  		ath12k_warn(ab, "failed to fetch roam ev");
5468  		kfree(tb);
5469  		return -EPROTO;
5470  	}
5471  
5472  	roam_ev->vdev_id = ev->vdev_id;
5473  	roam_ev->reason = ev->reason;
5474  	roam_ev->rssi = ev->rssi;
5475  
5476  	kfree(tb);
5477  	return 0;
5478  }
5479  
freq_to_idx(struct ath12k * ar,int freq)5480  static int freq_to_idx(struct ath12k *ar, int freq)
5481  {
5482  	struct ieee80211_supported_band *sband;
5483  	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
5484  	int band, ch, idx = 0;
5485  
5486  	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
5487  		if (!ar->mac.sbands[band].channels)
5488  			continue;
5489  
5490  		sband = hw->wiphy->bands[band];
5491  		if (!sband)
5492  			continue;
5493  
5494  		for (ch = 0; ch < sband->n_channels; ch++, idx++)
5495  			if (sband->channels[ch].center_freq == freq)
5496  				goto exit;
5497  	}
5498  
5499  exit:
5500  	return idx;
5501  }
5502  
ath12k_pull_chan_info_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_chan_info_event * ch_info_ev)5503  static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
5504  				    struct wmi_chan_info_event *ch_info_ev)
5505  {
5506  	const void **tb;
5507  	const struct wmi_chan_info_event *ev;
5508  	int ret;
5509  
5510  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5511  	if (IS_ERR(tb)) {
5512  		ret = PTR_ERR(tb);
5513  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5514  		return ret;
5515  	}
5516  
5517  	ev = tb[WMI_TAG_CHAN_INFO_EVENT];
5518  	if (!ev) {
5519  		ath12k_warn(ab, "failed to fetch chan info ev");
5520  		kfree(tb);
5521  		return -EPROTO;
5522  	}
5523  
5524  	ch_info_ev->err_code = ev->err_code;
5525  	ch_info_ev->freq = ev->freq;
5526  	ch_info_ev->cmd_flags = ev->cmd_flags;
5527  	ch_info_ev->noise_floor = ev->noise_floor;
5528  	ch_info_ev->rx_clear_count = ev->rx_clear_count;
5529  	ch_info_ev->cycle_count = ev->cycle_count;
5530  	ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
5531  	ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
5532  	ch_info_ev->rx_frame_count = ev->rx_frame_count;
5533  	ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
5534  	ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
5535  	ch_info_ev->vdev_id = ev->vdev_id;
5536  
5537  	kfree(tb);
5538  	return 0;
5539  }
5540  
5541  static int
ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_pdev_bss_chan_info_event * bss_ch_info_ev)5542  ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
5543  				  struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
5544  {
5545  	const void **tb;
5546  	const struct wmi_pdev_bss_chan_info_event *ev;
5547  	int ret;
5548  
5549  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5550  	if (IS_ERR(tb)) {
5551  		ret = PTR_ERR(tb);
5552  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5553  		return ret;
5554  	}
5555  
5556  	ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
5557  	if (!ev) {
5558  		ath12k_warn(ab, "failed to fetch pdev bss chan info ev");
5559  		kfree(tb);
5560  		return -EPROTO;
5561  	}
5562  
5563  	bss_ch_info_ev->pdev_id = ev->pdev_id;
5564  	bss_ch_info_ev->freq = ev->freq;
5565  	bss_ch_info_ev->noise_floor = ev->noise_floor;
5566  	bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
5567  	bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
5568  	bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
5569  	bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
5570  	bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
5571  	bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
5572  	bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
5573  	bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
5574  	bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
5575  	bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
5576  
5577  	kfree(tb);
5578  	return 0;
5579  }
5580  
5581  static int
ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_vdev_install_key_complete_arg * arg)5582  ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb,
5583  				      struct wmi_vdev_install_key_complete_arg *arg)
5584  {
5585  	const void **tb;
5586  	const struct wmi_vdev_install_key_compl_event *ev;
5587  	int ret;
5588  
5589  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5590  	if (IS_ERR(tb)) {
5591  		ret = PTR_ERR(tb);
5592  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5593  		return ret;
5594  	}
5595  
5596  	ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
5597  	if (!ev) {
5598  		ath12k_warn(ab, "failed to fetch vdev install key compl ev");
5599  		kfree(tb);
5600  		return -EPROTO;
5601  	}
5602  
5603  	arg->vdev_id = le32_to_cpu(ev->vdev_id);
5604  	arg->macaddr = ev->peer_macaddr.addr;
5605  	arg->key_idx = le32_to_cpu(ev->key_idx);
5606  	arg->key_flags = le32_to_cpu(ev->key_flags);
5607  	arg->status = le32_to_cpu(ev->status);
5608  
5609  	kfree(tb);
5610  	return 0;
5611  }
5612  
ath12k_pull_peer_assoc_conf_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_peer_assoc_conf_arg * peer_assoc_conf)5613  static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb,
5614  					  struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
5615  {
5616  	const void **tb;
5617  	const struct wmi_peer_assoc_conf_event *ev;
5618  	int ret;
5619  
5620  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5621  	if (IS_ERR(tb)) {
5622  		ret = PTR_ERR(tb);
5623  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5624  		return ret;
5625  	}
5626  
5627  	ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
5628  	if (!ev) {
5629  		ath12k_warn(ab, "failed to fetch peer assoc conf ev");
5630  		kfree(tb);
5631  		return -EPROTO;
5632  	}
5633  
5634  	peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id);
5635  	peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
5636  
5637  	kfree(tb);
5638  	return 0;
5639  }
5640  
5641  static int
ath12k_pull_pdev_temp_ev(struct ath12k_base * ab,struct sk_buff * skb,const struct wmi_pdev_temperature_event * ev)5642  ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb,
5643  			 const struct wmi_pdev_temperature_event *ev)
5644  {
5645  	const void **tb;
5646  	int ret;
5647  
5648  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5649  	if (IS_ERR(tb)) {
5650  		ret = PTR_ERR(tb);
5651  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5652  		return ret;
5653  	}
5654  
5655  	ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
5656  	if (!ev) {
5657  		ath12k_warn(ab, "failed to fetch pdev temp ev");
5658  		kfree(tb);
5659  		return -EPROTO;
5660  	}
5661  
5662  	kfree(tb);
5663  	return 0;
5664  }
5665  
ath12k_wmi_op_ep_tx_credits(struct ath12k_base * ab)5666  static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab)
5667  {
5668  	/* try to send pending beacons first. they take priority */
5669  	wake_up(&ab->wmi_ab.tx_credits_wq);
5670  }
5671  
ath12k_wmi_htc_tx_complete(struct ath12k_base * ab,struct sk_buff * skb)5672  static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
5673  				       struct sk_buff *skb)
5674  {
5675  	dev_kfree_skb(skb);
5676  }
5677  
ath12k_reg_is_world_alpha(char * alpha)5678  static bool ath12k_reg_is_world_alpha(char *alpha)
5679  {
5680  	if (alpha[0] == '0' && alpha[1] == '0')
5681  		return true;
5682  
5683  	if (alpha[0] == 'n' && alpha[1] == 'a')
5684  		return true;
5685  
5686  	return false;
5687  }
5688  
ath12k_reg_chan_list_event(struct ath12k_base * ab,struct sk_buff * skb)5689  static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
5690  {
5691  	struct ath12k_reg_info *reg_info = NULL;
5692  	struct ieee80211_regdomain *regd = NULL;
5693  	bool intersect = false;
5694  	int ret = 0, pdev_idx, i, j;
5695  	struct ath12k *ar;
5696  
5697  	reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
5698  	if (!reg_info) {
5699  		ret = -ENOMEM;
5700  		goto fallback;
5701  	}
5702  
5703  	ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
5704  
5705  	if (ret) {
5706  		ath12k_warn(ab, "failed to extract regulatory info from received event\n");
5707  		goto fallback;
5708  	}
5709  
5710  	if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
5711  		/* In case of failure to set the requested ctry,
5712  		 * fw retains the current regd. We print a failure info
5713  		 * and return from here.
5714  		 */
5715  		ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n");
5716  		goto mem_free;
5717  	}
5718  
5719  	pdev_idx = reg_info->phy_id;
5720  
5721  	if (pdev_idx >= ab->num_radios) {
5722  		/* Process the event for phy0 only if single_pdev_only
5723  		 * is true. If pdev_idx is valid but not 0, discard the
5724  		 * event. Otherwise, it goes to fallback.
5725  		 */
5726  		if (ab->hw_params->single_pdev_only &&
5727  		    pdev_idx < ab->hw_params->num_rxdma_per_pdev)
5728  			goto mem_free;
5729  		else
5730  			goto fallback;
5731  	}
5732  
5733  	/* Avoid multiple overwrites to default regd, during core
5734  	 * stop-start after mac registration.
5735  	 */
5736  	if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
5737  	    !memcmp(ab->default_regd[pdev_idx]->alpha2,
5738  		    reg_info->alpha2, 2))
5739  		goto mem_free;
5740  
5741  	/* Intersect new rules with default regd if a new country setting was
5742  	 * requested, i.e a default regd was already set during initialization
5743  	 * and the regd coming from this event has a valid country info.
5744  	 */
5745  	if (ab->default_regd[pdev_idx] &&
5746  	    !ath12k_reg_is_world_alpha((char *)
5747  		ab->default_regd[pdev_idx]->alpha2) &&
5748  	    !ath12k_reg_is_world_alpha((char *)reg_info->alpha2))
5749  		intersect = true;
5750  
5751  	regd = ath12k_reg_build_regd(ab, reg_info, intersect);
5752  	if (!regd) {
5753  		ath12k_warn(ab, "failed to build regd from reg_info\n");
5754  		goto fallback;
5755  	}
5756  
5757  	spin_lock(&ab->base_lock);
5758  	if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
5759  		/* Once mac is registered, ar is valid and all CC events from
5760  		 * fw is considered to be received due to user requests
5761  		 * currently.
5762  		 * Free previously built regd before assigning the newly
5763  		 * generated regd to ar. NULL pointer handling will be
5764  		 * taken care by kfree itself.
5765  		 */
5766  		ar = ab->pdevs[pdev_idx].ar;
5767  		kfree(ab->new_regd[pdev_idx]);
5768  		ab->new_regd[pdev_idx] = regd;
5769  		queue_work(ab->workqueue, &ar->regd_update_work);
5770  	} else {
5771  		/* Multiple events for the same *ar is not expected. But we
5772  		 * can still clear any previously stored default_regd if we
5773  		 * are receiving this event for the same radio by mistake.
5774  		 * NULL pointer handling will be taken care by kfree itself.
5775  		 */
5776  		kfree(ab->default_regd[pdev_idx]);
5777  		/* This regd would be applied during mac registration */
5778  		ab->default_regd[pdev_idx] = regd;
5779  	}
5780  	ab->dfs_region = reg_info->dfs_region;
5781  	spin_unlock(&ab->base_lock);
5782  
5783  	goto mem_free;
5784  
5785  fallback:
5786  	/* Fallback to older reg (by sending previous country setting
5787  	 * again if fw has succeeded and we failed to process here.
5788  	 * The Regdomain should be uniform across driver and fw. Since the
5789  	 * FW has processed the command and sent a success status, we expect
5790  	 * this function to succeed as well. If it doesn't, CTRY needs to be
5791  	 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
5792  	 */
5793  	/* TODO: This is rare, but still should also be handled */
5794  	WARN_ON(1);
5795  mem_free:
5796  	if (reg_info) {
5797  		kfree(reg_info->reg_rules_2g_ptr);
5798  		kfree(reg_info->reg_rules_5g_ptr);
5799  		if (reg_info->is_ext_reg_event) {
5800  			for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
5801  				kfree(reg_info->reg_rules_6g_ap_ptr[i]);
5802  
5803  			for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
5804  				for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
5805  					kfree(reg_info->reg_rules_6g_client_ptr[j][i]);
5806  		}
5807  		kfree(reg_info);
5808  	}
5809  	return ret;
5810  }
5811  
ath12k_wmi_rdy_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)5812  static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
5813  				const void *ptr, void *data)
5814  {
5815  	struct ath12k_wmi_rdy_parse *rdy_parse = data;
5816  	struct wmi_ready_event fixed_param;
5817  	struct ath12k_wmi_mac_addr_params *addr_list;
5818  	struct ath12k_pdev *pdev;
5819  	u32 num_mac_addr;
5820  	int i;
5821  
5822  	switch (tag) {
5823  	case WMI_TAG_READY_EVENT:
5824  		memset(&fixed_param, 0, sizeof(fixed_param));
5825  		memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
5826  		       min_t(u16, sizeof(fixed_param), len));
5827  		ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status);
5828  		rdy_parse->num_extra_mac_addr =
5829  			le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr);
5830  
5831  		ether_addr_copy(ab->mac_addr,
5832  				fixed_param.ready_event_min.mac_addr.addr);
5833  		ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum);
5834  		ab->wmi_ready = true;
5835  		break;
5836  	case WMI_TAG_ARRAY_FIXED_STRUCT:
5837  		addr_list = (struct ath12k_wmi_mac_addr_params *)ptr;
5838  		num_mac_addr = rdy_parse->num_extra_mac_addr;
5839  
5840  		if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
5841  			break;
5842  
5843  		for (i = 0; i < ab->num_radios; i++) {
5844  			pdev = &ab->pdevs[i];
5845  			ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
5846  		}
5847  		ab->pdevs_macaddr_valid = true;
5848  		break;
5849  	default:
5850  		break;
5851  	}
5852  
5853  	return 0;
5854  }
5855  
ath12k_ready_event(struct ath12k_base * ab,struct sk_buff * skb)5856  static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
5857  {
5858  	struct ath12k_wmi_rdy_parse rdy_parse = { };
5859  	int ret;
5860  
5861  	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
5862  				  ath12k_wmi_rdy_parse, &rdy_parse);
5863  	if (ret) {
5864  		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
5865  		return ret;
5866  	}
5867  
5868  	complete(&ab->wmi_ab.unified_ready);
5869  	return 0;
5870  }
5871  
ath12k_peer_delete_resp_event(struct ath12k_base * ab,struct sk_buff * skb)5872  static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
5873  {
5874  	struct wmi_peer_delete_resp_event peer_del_resp;
5875  	struct ath12k *ar;
5876  
5877  	if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
5878  		ath12k_warn(ab, "failed to extract peer delete resp");
5879  		return;
5880  	}
5881  
5882  	rcu_read_lock();
5883  	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id));
5884  	if (!ar) {
5885  		ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d",
5886  			    peer_del_resp.vdev_id);
5887  		rcu_read_unlock();
5888  		return;
5889  	}
5890  
5891  	complete(&ar->peer_delete_done);
5892  	rcu_read_unlock();
5893  	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
5894  		   peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
5895  }
5896  
ath12k_vdev_delete_resp_event(struct ath12k_base * ab,struct sk_buff * skb)5897  static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab,
5898  					  struct sk_buff *skb)
5899  {
5900  	struct ath12k *ar;
5901  	u32 vdev_id = 0;
5902  
5903  	if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
5904  		ath12k_warn(ab, "failed to extract vdev delete resp");
5905  		return;
5906  	}
5907  
5908  	rcu_read_lock();
5909  	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
5910  	if (!ar) {
5911  		ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
5912  			    vdev_id);
5913  		rcu_read_unlock();
5914  		return;
5915  	}
5916  
5917  	complete(&ar->vdev_delete_done);
5918  
5919  	rcu_read_unlock();
5920  
5921  	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n",
5922  		   vdev_id);
5923  }
5924  
ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)5925  static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)
5926  {
5927  	switch (vdev_resp_status) {
5928  	case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
5929  		return "invalid vdev id";
5930  	case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
5931  		return "not supported";
5932  	case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
5933  		return "dfs violation";
5934  	case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
5935  		return "invalid regdomain";
5936  	default:
5937  		return "unknown";
5938  	}
5939  }
5940  
ath12k_vdev_start_resp_event(struct ath12k_base * ab,struct sk_buff * skb)5941  static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
5942  {
5943  	struct wmi_vdev_start_resp_event vdev_start_resp;
5944  	struct ath12k *ar;
5945  	u32 status;
5946  
5947  	if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
5948  		ath12k_warn(ab, "failed to extract vdev start resp");
5949  		return;
5950  	}
5951  
5952  	rcu_read_lock();
5953  	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id));
5954  	if (!ar) {
5955  		ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d",
5956  			    vdev_start_resp.vdev_id);
5957  		rcu_read_unlock();
5958  		return;
5959  	}
5960  
5961  	ar->last_wmi_vdev_start_status = 0;
5962  
5963  	status = le32_to_cpu(vdev_start_resp.status);
5964  
5965  	if (WARN_ON_ONCE(status)) {
5966  		ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
5967  			    status, ath12k_wmi_vdev_resp_print(status));
5968  		ar->last_wmi_vdev_start_status = status;
5969  	}
5970  
5971  	complete(&ar->vdev_setup_done);
5972  
5973  	rcu_read_unlock();
5974  
5975  	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d",
5976  		   vdev_start_resp.vdev_id);
5977  }
5978  
ath12k_bcn_tx_status_event(struct ath12k_base * ab,struct sk_buff * skb)5979  static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb)
5980  {
5981  	u32 vdev_id, tx_status;
5982  
5983  	if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
5984  		ath12k_warn(ab, "failed to extract bcn tx status");
5985  		return;
5986  	}
5987  }
5988  
ath12k_vdev_stopped_event(struct ath12k_base * ab,struct sk_buff * skb)5989  static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb)
5990  {
5991  	struct ath12k *ar;
5992  	u32 vdev_id = 0;
5993  
5994  	if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
5995  		ath12k_warn(ab, "failed to extract vdev stopped event");
5996  		return;
5997  	}
5998  
5999  	rcu_read_lock();
6000  	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6001  	if (!ar) {
6002  		ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d",
6003  			    vdev_id);
6004  		rcu_read_unlock();
6005  		return;
6006  	}
6007  
6008  	complete(&ar->vdev_setup_done);
6009  
6010  	rcu_read_unlock();
6011  
6012  	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
6013  }
6014  
ath12k_mgmt_rx_event(struct ath12k_base * ab,struct sk_buff * skb)6015  static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
6016  {
6017  	struct ath12k_wmi_mgmt_rx_arg rx_ev = {0};
6018  	struct ath12k *ar;
6019  	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
6020  	struct ieee80211_hdr *hdr;
6021  	u16 fc;
6022  	struct ieee80211_supported_band *sband;
6023  
6024  	if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
6025  		ath12k_warn(ab, "failed to extract mgmt rx event");
6026  		dev_kfree_skb(skb);
6027  		return;
6028  	}
6029  
6030  	memset(status, 0, sizeof(*status));
6031  
6032  	ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n",
6033  		   rx_ev.status);
6034  
6035  	rcu_read_lock();
6036  	ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
6037  
6038  	if (!ar) {
6039  		ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
6040  			    rx_ev.pdev_id);
6041  		dev_kfree_skb(skb);
6042  		goto exit;
6043  	}
6044  
6045  	if ((test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) ||
6046  	    (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
6047  			     WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
6048  			     WMI_RX_STATUS_ERR_CRC))) {
6049  		dev_kfree_skb(skb);
6050  		goto exit;
6051  	}
6052  
6053  	if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
6054  		status->flag |= RX_FLAG_MMIC_ERROR;
6055  
6056  	if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ &&
6057  	    rx_ev.chan_freq <= ATH12K_MAX_6G_FREQ) {
6058  		status->band = NL80211_BAND_6GHZ;
6059  		status->freq = rx_ev.chan_freq;
6060  	} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
6061  		status->band = NL80211_BAND_2GHZ;
6062  	} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
6063  		status->band = NL80211_BAND_5GHZ;
6064  	} else {
6065  		/* Shouldn't happen unless list of advertised channels to
6066  		 * mac80211 has been changed.
6067  		 */
6068  		WARN_ON_ONCE(1);
6069  		dev_kfree_skb(skb);
6070  		goto exit;
6071  	}
6072  
6073  	if (rx_ev.phy_mode == MODE_11B &&
6074  	    (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
6075  		ath12k_dbg(ab, ATH12K_DBG_WMI,
6076  			   "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
6077  
6078  	sband = &ar->mac.sbands[status->band];
6079  
6080  	if (status->band != NL80211_BAND_6GHZ)
6081  		status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
6082  							      status->band);
6083  
6084  	status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
6085  	status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
6086  
6087  	hdr = (struct ieee80211_hdr *)skb->data;
6088  	fc = le16_to_cpu(hdr->frame_control);
6089  
6090  	/* Firmware is guaranteed to report all essential management frames via
6091  	 * WMI while it can deliver some extra via HTT. Since there can be
6092  	 * duplicates split the reporting wrt monitor/sniffing.
6093  	 */
6094  	status->flag |= RX_FLAG_SKIP_MONITOR;
6095  
6096  	/* In case of PMF, FW delivers decrypted frames with Protected Bit set
6097  	 * including group privacy action frames.
6098  	 */
6099  	if (ieee80211_has_protected(hdr->frame_control)) {
6100  		status->flag |= RX_FLAG_DECRYPTED;
6101  
6102  		if (!ieee80211_is_robust_mgmt_frame(skb)) {
6103  			status->flag |= RX_FLAG_IV_STRIPPED |
6104  					RX_FLAG_MMIC_STRIPPED;
6105  			hdr->frame_control = __cpu_to_le16(fc &
6106  					     ~IEEE80211_FCTL_PROTECTED);
6107  		}
6108  	}
6109  
6110  	if (ieee80211_is_beacon(hdr->frame_control))
6111  		ath12k_mac_handle_beacon(ar, skb);
6112  
6113  	ath12k_dbg(ab, ATH12K_DBG_MGMT,
6114  		   "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
6115  		   skb, skb->len,
6116  		   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
6117  
6118  	ath12k_dbg(ab, ATH12K_DBG_MGMT,
6119  		   "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
6120  		   status->freq, status->band, status->signal,
6121  		   status->rate_idx);
6122  
6123  	ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb);
6124  
6125  exit:
6126  	rcu_read_unlock();
6127  }
6128  
ath12k_mgmt_tx_compl_event(struct ath12k_base * ab,struct sk_buff * skb)6129  static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb)
6130  {
6131  	struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
6132  	struct ath12k *ar;
6133  
6134  	if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
6135  		ath12k_warn(ab, "failed to extract mgmt tx compl event");
6136  		return;
6137  	}
6138  
6139  	rcu_read_lock();
6140  	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id));
6141  	if (!ar) {
6142  		ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
6143  			    tx_compl_param.pdev_id);
6144  		goto exit;
6145  	}
6146  
6147  	wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id),
6148  				 le32_to_cpu(tx_compl_param.status));
6149  
6150  	ath12k_dbg(ab, ATH12K_DBG_MGMT,
6151  		   "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
6152  		   tx_compl_param.pdev_id, tx_compl_param.desc_id,
6153  		   tx_compl_param.status);
6154  
6155  exit:
6156  	rcu_read_unlock();
6157  }
6158  
ath12k_get_ar_on_scan_state(struct ath12k_base * ab,u32 vdev_id,enum ath12k_scan_state state)6159  static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab,
6160  						  u32 vdev_id,
6161  						  enum ath12k_scan_state state)
6162  {
6163  	int i;
6164  	struct ath12k_pdev *pdev;
6165  	struct ath12k *ar;
6166  
6167  	for (i = 0; i < ab->num_radios; i++) {
6168  		pdev = rcu_dereference(ab->pdevs_active[i]);
6169  		if (pdev && pdev->ar) {
6170  			ar = pdev->ar;
6171  
6172  			spin_lock_bh(&ar->data_lock);
6173  			if (ar->scan.state == state &&
6174  			    ar->scan.vdev_id == vdev_id) {
6175  				spin_unlock_bh(&ar->data_lock);
6176  				return ar;
6177  			}
6178  			spin_unlock_bh(&ar->data_lock);
6179  		}
6180  	}
6181  	return NULL;
6182  }
6183  
ath12k_scan_event(struct ath12k_base * ab,struct sk_buff * skb)6184  static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
6185  {
6186  	struct ath12k *ar;
6187  	struct wmi_scan_event scan_ev = {0};
6188  
6189  	if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
6190  		ath12k_warn(ab, "failed to extract scan event");
6191  		return;
6192  	}
6193  
6194  	rcu_read_lock();
6195  
6196  	/* In case the scan was cancelled, ex. during interface teardown,
6197  	 * the interface will not be found in active interfaces.
6198  	 * Rather, in such scenarios, iterate over the active pdev's to
6199  	 * search 'ar' if the corresponding 'ar' scan is ABORTING and the
6200  	 * aborting scan's vdev id matches this event info.
6201  	 */
6202  	if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED &&
6203  	    le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) {
6204  		ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
6205  						 ATH12K_SCAN_ABORTING);
6206  		if (!ar)
6207  			ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
6208  							 ATH12K_SCAN_RUNNING);
6209  	} else {
6210  		ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id));
6211  	}
6212  
6213  	if (!ar) {
6214  		ath12k_warn(ab, "Received scan event for unknown vdev");
6215  		rcu_read_unlock();
6216  		return;
6217  	}
6218  
6219  	spin_lock_bh(&ar->data_lock);
6220  
6221  	ath12k_dbg(ab, ATH12K_DBG_WMI,
6222  		   "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
6223  		   ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type),
6224  						  le32_to_cpu(scan_ev.reason)),
6225  		   le32_to_cpu(scan_ev.event_type),
6226  		   le32_to_cpu(scan_ev.reason),
6227  		   le32_to_cpu(scan_ev.channel_freq),
6228  		   le32_to_cpu(scan_ev.scan_req_id),
6229  		   le32_to_cpu(scan_ev.scan_id),
6230  		   le32_to_cpu(scan_ev.vdev_id),
6231  		   ath12k_scan_state_str(ar->scan.state), ar->scan.state);
6232  
6233  	switch (le32_to_cpu(scan_ev.event_type)) {
6234  	case WMI_SCAN_EVENT_STARTED:
6235  		ath12k_wmi_event_scan_started(ar);
6236  		break;
6237  	case WMI_SCAN_EVENT_COMPLETED:
6238  		ath12k_wmi_event_scan_completed(ar);
6239  		break;
6240  	case WMI_SCAN_EVENT_BSS_CHANNEL:
6241  		ath12k_wmi_event_scan_bss_chan(ar);
6242  		break;
6243  	case WMI_SCAN_EVENT_FOREIGN_CHAN:
6244  		ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq));
6245  		break;
6246  	case WMI_SCAN_EVENT_START_FAILED:
6247  		ath12k_warn(ab, "received scan start failure event\n");
6248  		ath12k_wmi_event_scan_start_failed(ar);
6249  		break;
6250  	case WMI_SCAN_EVENT_DEQUEUED:
6251  		__ath12k_mac_scan_finish(ar);
6252  		break;
6253  	case WMI_SCAN_EVENT_PREEMPTED:
6254  	case WMI_SCAN_EVENT_RESTARTED:
6255  	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
6256  	default:
6257  		break;
6258  	}
6259  
6260  	spin_unlock_bh(&ar->data_lock);
6261  
6262  	rcu_read_unlock();
6263  }
6264  
ath12k_peer_sta_kickout_event(struct ath12k_base * ab,struct sk_buff * skb)6265  static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb)
6266  {
6267  	struct wmi_peer_sta_kickout_arg arg = {};
6268  	struct ieee80211_sta *sta;
6269  	struct ath12k_peer *peer;
6270  	struct ath12k *ar;
6271  
6272  	if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
6273  		ath12k_warn(ab, "failed to extract peer sta kickout event");
6274  		return;
6275  	}
6276  
6277  	rcu_read_lock();
6278  
6279  	spin_lock_bh(&ab->base_lock);
6280  
6281  	peer = ath12k_peer_find_by_addr(ab, arg.mac_addr);
6282  
6283  	if (!peer) {
6284  		ath12k_warn(ab, "peer not found %pM\n",
6285  			    arg.mac_addr);
6286  		goto exit;
6287  	}
6288  
6289  	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
6290  	if (!ar) {
6291  		ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
6292  			    peer->vdev_id);
6293  		goto exit;
6294  	}
6295  
6296  	sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
6297  					   arg.mac_addr, NULL);
6298  	if (!sta) {
6299  		ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
6300  			    arg.mac_addr);
6301  		goto exit;
6302  	}
6303  
6304  	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
6305  		   arg.mac_addr);
6306  
6307  	ieee80211_report_low_ack(sta, 10);
6308  
6309  exit:
6310  	spin_unlock_bh(&ab->base_lock);
6311  	rcu_read_unlock();
6312  }
6313  
ath12k_roam_event(struct ath12k_base * ab,struct sk_buff * skb)6314  static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
6315  {
6316  	struct wmi_roam_event roam_ev = {};
6317  	struct ath12k *ar;
6318  	u32 vdev_id;
6319  	u8 roam_reason;
6320  
6321  	if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
6322  		ath12k_warn(ab, "failed to extract roam event");
6323  		return;
6324  	}
6325  
6326  	vdev_id = le32_to_cpu(roam_ev.vdev_id);
6327  	roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason),
6328  				   WMI_ROAM_REASON_MASK);
6329  
6330  	ath12k_dbg(ab, ATH12K_DBG_WMI,
6331  		   "wmi roam event vdev %u reason %d rssi %d\n",
6332  		   vdev_id, roam_reason, roam_ev.rssi);
6333  
6334  	rcu_read_lock();
6335  	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6336  	if (!ar) {
6337  		ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id);
6338  		rcu_read_unlock();
6339  		return;
6340  	}
6341  
6342  	if (roam_reason >= WMI_ROAM_REASON_MAX)
6343  		ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
6344  			    roam_reason, vdev_id);
6345  
6346  	switch (roam_reason) {
6347  	case WMI_ROAM_REASON_BEACON_MISS:
6348  		ath12k_mac_handle_beacon_miss(ar, vdev_id);
6349  		break;
6350  	case WMI_ROAM_REASON_BETTER_AP:
6351  	case WMI_ROAM_REASON_LOW_RSSI:
6352  	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
6353  	case WMI_ROAM_REASON_HO_FAILED:
6354  		ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
6355  			    roam_reason, vdev_id);
6356  		break;
6357  	}
6358  
6359  	rcu_read_unlock();
6360  }
6361  
ath12k_chan_info_event(struct ath12k_base * ab,struct sk_buff * skb)6362  static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
6363  {
6364  	struct wmi_chan_info_event ch_info_ev = {0};
6365  	struct ath12k *ar;
6366  	struct survey_info *survey;
6367  	int idx;
6368  	/* HW channel counters frequency value in hertz */
6369  	u32 cc_freq_hz = ab->cc_freq_hz;
6370  
6371  	if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
6372  		ath12k_warn(ab, "failed to extract chan info event");
6373  		return;
6374  	}
6375  
6376  	ath12k_dbg(ab, ATH12K_DBG_WMI,
6377  		   "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
6378  		   ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
6379  		   ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
6380  		   ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
6381  		   ch_info_ev.mac_clk_mhz);
6382  
6383  	if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) {
6384  		ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n");
6385  		return;
6386  	}
6387  
6388  	rcu_read_lock();
6389  	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id));
6390  	if (!ar) {
6391  		ath12k_warn(ab, "invalid vdev id in chan info ev %d",
6392  			    ch_info_ev.vdev_id);
6393  		rcu_read_unlock();
6394  		return;
6395  	}
6396  	spin_lock_bh(&ar->data_lock);
6397  
6398  	switch (ar->scan.state) {
6399  	case ATH12K_SCAN_IDLE:
6400  	case ATH12K_SCAN_STARTING:
6401  		ath12k_warn(ab, "received chan info event without a scan request, ignoring\n");
6402  		goto exit;
6403  	case ATH12K_SCAN_RUNNING:
6404  	case ATH12K_SCAN_ABORTING:
6405  		break;
6406  	}
6407  
6408  	idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq));
6409  	if (idx >= ARRAY_SIZE(ar->survey)) {
6410  		ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
6411  			    ch_info_ev.freq, idx);
6412  		goto exit;
6413  	}
6414  
6415  	/* If FW provides MAC clock frequency in Mhz, overriding the initialized
6416  	 * HW channel counters frequency value
6417  	 */
6418  	if (ch_info_ev.mac_clk_mhz)
6419  		cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000);
6420  
6421  	if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
6422  		survey = &ar->survey[idx];
6423  		memset(survey, 0, sizeof(*survey));
6424  		survey->noise = le32_to_cpu(ch_info_ev.noise_floor);
6425  		survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
6426  				 SURVEY_INFO_TIME_BUSY;
6427  		survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz);
6428  		survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count),
6429  					    cc_freq_hz);
6430  	}
6431  exit:
6432  	spin_unlock_bh(&ar->data_lock);
6433  	rcu_read_unlock();
6434  }
6435  
6436  static void
ath12k_pdev_bss_chan_info_event(struct ath12k_base * ab,struct sk_buff * skb)6437  ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
6438  {
6439  	struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
6440  	struct survey_info *survey;
6441  	struct ath12k *ar;
6442  	u32 cc_freq_hz = ab->cc_freq_hz;
6443  	u64 busy, total, tx, rx, rx_bss;
6444  	int idx;
6445  
6446  	if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
6447  		ath12k_warn(ab, "failed to extract pdev bss chan info event");
6448  		return;
6449  	}
6450  
6451  	busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 |
6452  		le32_to_cpu(bss_ch_info_ev.rx_clear_count_low);
6453  
6454  	total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 |
6455  		le32_to_cpu(bss_ch_info_ev.cycle_count_low);
6456  
6457  	tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 |
6458  		le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low);
6459  
6460  	rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 |
6461  		le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low);
6462  
6463  	rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 |
6464  		le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low);
6465  
6466  	ath12k_dbg(ab, ATH12K_DBG_WMI,
6467  		   "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
6468  		   bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
6469  		   bss_ch_info_ev.noise_floor, busy, total,
6470  		   tx, rx, rx_bss);
6471  
6472  	rcu_read_lock();
6473  	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id));
6474  
6475  	if (!ar) {
6476  		ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
6477  			    bss_ch_info_ev.pdev_id);
6478  		rcu_read_unlock();
6479  		return;
6480  	}
6481  
6482  	spin_lock_bh(&ar->data_lock);
6483  	idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq));
6484  	if (idx >= ARRAY_SIZE(ar->survey)) {
6485  		ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
6486  			    bss_ch_info_ev.freq, idx);
6487  		goto exit;
6488  	}
6489  
6490  	survey = &ar->survey[idx];
6491  
6492  	survey->noise     = le32_to_cpu(bss_ch_info_ev.noise_floor);
6493  	survey->time      = div_u64(total, cc_freq_hz);
6494  	survey->time_busy = div_u64(busy, cc_freq_hz);
6495  	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
6496  	survey->time_tx   = div_u64(tx, cc_freq_hz);
6497  	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
6498  			     SURVEY_INFO_TIME |
6499  			     SURVEY_INFO_TIME_BUSY |
6500  			     SURVEY_INFO_TIME_RX |
6501  			     SURVEY_INFO_TIME_TX);
6502  exit:
6503  	spin_unlock_bh(&ar->data_lock);
6504  	complete(&ar->bss_survey_done);
6505  
6506  	rcu_read_unlock();
6507  }
6508  
ath12k_vdev_install_key_compl_event(struct ath12k_base * ab,struct sk_buff * skb)6509  static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab,
6510  						struct sk_buff *skb)
6511  {
6512  	struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
6513  	struct ath12k *ar;
6514  
6515  	if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
6516  		ath12k_warn(ab, "failed to extract install key compl event");
6517  		return;
6518  	}
6519  
6520  	ath12k_dbg(ab, ATH12K_DBG_WMI,
6521  		   "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
6522  		   install_key_compl.key_idx, install_key_compl.key_flags,
6523  		   install_key_compl.macaddr, install_key_compl.status);
6524  
6525  	rcu_read_lock();
6526  	ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
6527  	if (!ar) {
6528  		ath12k_warn(ab, "invalid vdev id in install key compl ev %d",
6529  			    install_key_compl.vdev_id);
6530  		rcu_read_unlock();
6531  		return;
6532  	}
6533  
6534  	ar->install_key_status = 0;
6535  
6536  	if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
6537  		ath12k_warn(ab, "install key failed for %pM status %d\n",
6538  			    install_key_compl.macaddr, install_key_compl.status);
6539  		ar->install_key_status = install_key_compl.status;
6540  	}
6541  
6542  	complete(&ar->install_key_done);
6543  	rcu_read_unlock();
6544  }
6545  
ath12k_wmi_tlv_services_parser(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)6546  static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
6547  					  u16 tag, u16 len,
6548  					  const void *ptr,
6549  					  void *data)
6550  {
6551  	const struct wmi_service_available_event *ev;
6552  	u32 *wmi_ext2_service_bitmap;
6553  	int i, j;
6554  	u16 expected_len;
6555  
6556  	expected_len = WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32);
6557  	if (len < expected_len) {
6558  		ath12k_warn(ab, "invalid length %d for the WMI services available tag 0x%x\n",
6559  			    len, tag);
6560  		return -EINVAL;
6561  	}
6562  
6563  	switch (tag) {
6564  	case WMI_TAG_SERVICE_AVAILABLE_EVENT:
6565  		ev = (struct wmi_service_available_event *)ptr;
6566  		for (i = 0, j = WMI_MAX_SERVICE;
6567  		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
6568  		     i++) {
6569  			do {
6570  				if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) &
6571  				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
6572  					set_bit(j, ab->wmi_ab.svc_map);
6573  			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
6574  		}
6575  
6576  		ath12k_dbg(ab, ATH12K_DBG_WMI,
6577  			   "wmi_ext_service_bitmap 0x%x 0x%x 0x%x 0x%x",
6578  			   ev->wmi_service_segment_bitmap[0],
6579  			   ev->wmi_service_segment_bitmap[1],
6580  			   ev->wmi_service_segment_bitmap[2],
6581  			   ev->wmi_service_segment_bitmap[3]);
6582  		break;
6583  	case WMI_TAG_ARRAY_UINT32:
6584  		wmi_ext2_service_bitmap = (u32 *)ptr;
6585  		for (i = 0, j = WMI_MAX_EXT_SERVICE;
6586  		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
6587  		     i++) {
6588  			do {
6589  				if (wmi_ext2_service_bitmap[i] &
6590  				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
6591  					set_bit(j, ab->wmi_ab.svc_map);
6592  			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
6593  		}
6594  
6595  		ath12k_dbg(ab, ATH12K_DBG_WMI,
6596  			   "wmi_ext2_service_bitmap 0x%04x 0x%04x 0x%04x 0x%04x",
6597  			   wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
6598  			   wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
6599  		break;
6600  	}
6601  	return 0;
6602  }
6603  
ath12k_service_available_event(struct ath12k_base * ab,struct sk_buff * skb)6604  static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb)
6605  {
6606  	int ret;
6607  
6608  	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
6609  				  ath12k_wmi_tlv_services_parser,
6610  				  NULL);
6611  	return ret;
6612  }
6613  
ath12k_peer_assoc_conf_event(struct ath12k_base * ab,struct sk_buff * skb)6614  static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb)
6615  {
6616  	struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
6617  	struct ath12k *ar;
6618  
6619  	if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
6620  		ath12k_warn(ab, "failed to extract peer assoc conf event");
6621  		return;
6622  	}
6623  
6624  	ath12k_dbg(ab, ATH12K_DBG_WMI,
6625  		   "peer assoc conf ev vdev id %d macaddr %pM\n",
6626  		   peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
6627  
6628  	rcu_read_lock();
6629  	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
6630  
6631  	if (!ar) {
6632  		ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
6633  			    peer_assoc_conf.vdev_id);
6634  		rcu_read_unlock();
6635  		return;
6636  	}
6637  
6638  	complete(&ar->peer_assoc_done);
6639  	rcu_read_unlock();
6640  }
6641  
ath12k_update_stats_event(struct ath12k_base * ab,struct sk_buff * skb)6642  static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
6643  {
6644  }
6645  
6646  /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
6647   * is not part of BDF CTL(Conformance test limits) table entries.
6648   */
ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base * ab,struct sk_buff * skb)6649  static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
6650  						 struct sk_buff *skb)
6651  {
6652  	const void **tb;
6653  	const struct wmi_pdev_ctl_failsafe_chk_event *ev;
6654  	int ret;
6655  
6656  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6657  	if (IS_ERR(tb)) {
6658  		ret = PTR_ERR(tb);
6659  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6660  		return;
6661  	}
6662  
6663  	ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
6664  	if (!ev) {
6665  		ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
6666  		kfree(tb);
6667  		return;
6668  	}
6669  
6670  	ath12k_dbg(ab, ATH12K_DBG_WMI,
6671  		   "pdev ctl failsafe check ev status %d\n",
6672  		   ev->ctl_failsafe_status);
6673  
6674  	/* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
6675  	 * to 10 dBm else the CTL power entry in the BDF would be picked up.
6676  	 */
6677  	if (ev->ctl_failsafe_status != 0)
6678  		ath12k_warn(ab, "pdev ctl failsafe failure status %d",
6679  			    ev->ctl_failsafe_status);
6680  
6681  	kfree(tb);
6682  }
6683  
6684  static void
ath12k_wmi_process_csa_switch_count_event(struct ath12k_base * ab,const struct ath12k_wmi_pdev_csa_event * ev,const u32 * vdev_ids)6685  ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
6686  					  const struct ath12k_wmi_pdev_csa_event *ev,
6687  					  const u32 *vdev_ids)
6688  {
6689  	int i;
6690  	struct ath12k_vif *arvif;
6691  
6692  	/* Finish CSA once the switch count becomes NULL */
6693  	if (ev->current_switch_count)
6694  		return;
6695  
6696  	rcu_read_lock();
6697  	for (i = 0; i < le32_to_cpu(ev->num_vdevs); i++) {
6698  		arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
6699  
6700  		if (!arvif) {
6701  			ath12k_warn(ab, "Recvd csa status for unknown vdev %d",
6702  				    vdev_ids[i]);
6703  			continue;
6704  		}
6705  
6706  		if (arvif->is_up && arvif->vif->bss_conf.csa_active)
6707  			ieee80211_csa_finish(arvif->vif, 0);
6708  	}
6709  	rcu_read_unlock();
6710  }
6711  
6712  static void
ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base * ab,struct sk_buff * skb)6713  ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
6714  					      struct sk_buff *skb)
6715  {
6716  	const void **tb;
6717  	const struct ath12k_wmi_pdev_csa_event *ev;
6718  	const u32 *vdev_ids;
6719  	int ret;
6720  
6721  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6722  	if (IS_ERR(tb)) {
6723  		ret = PTR_ERR(tb);
6724  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6725  		return;
6726  	}
6727  
6728  	ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
6729  	vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
6730  
6731  	if (!ev || !vdev_ids) {
6732  		ath12k_warn(ab, "failed to fetch pdev csa switch count ev");
6733  		kfree(tb);
6734  		return;
6735  	}
6736  
6737  	ath12k_dbg(ab, ATH12K_DBG_WMI,
6738  		   "pdev csa switch count %d for pdev %d, num_vdevs %d",
6739  		   ev->current_switch_count, ev->pdev_id,
6740  		   ev->num_vdevs);
6741  
6742  	ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
6743  
6744  	kfree(tb);
6745  }
6746  
6747  static void
ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base * ab,struct sk_buff * skb)6748  ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
6749  {
6750  	const void **tb;
6751  	const struct ath12k_wmi_pdev_radar_event *ev;
6752  	struct ath12k *ar;
6753  	int ret;
6754  
6755  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6756  	if (IS_ERR(tb)) {
6757  		ret = PTR_ERR(tb);
6758  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6759  		return;
6760  	}
6761  
6762  	ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
6763  
6764  	if (!ev) {
6765  		ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev");
6766  		kfree(tb);
6767  		return;
6768  	}
6769  
6770  	ath12k_dbg(ab, ATH12K_DBG_WMI,
6771  		   "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
6772  		   ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
6773  		   ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
6774  		   ev->freq_offset, ev->sidx);
6775  
6776  	rcu_read_lock();
6777  
6778  	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
6779  
6780  	if (!ar) {
6781  		ath12k_warn(ab, "radar detected in invalid pdev %d\n",
6782  			    ev->pdev_id);
6783  		goto exit;
6784  	}
6785  
6786  	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
6787  		   ev->pdev_id);
6788  
6789  	if (ar->dfs_block_radar_events)
6790  		ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
6791  	else
6792  		ieee80211_radar_detected(ath12k_ar_to_hw(ar), NULL);
6793  
6794  exit:
6795  	rcu_read_unlock();
6796  
6797  	kfree(tb);
6798  }
6799  
6800  static void
ath12k_wmi_pdev_temperature_event(struct ath12k_base * ab,struct sk_buff * skb)6801  ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
6802  				  struct sk_buff *skb)
6803  {
6804  	struct ath12k *ar;
6805  	struct wmi_pdev_temperature_event ev = {0};
6806  
6807  	if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) {
6808  		ath12k_warn(ab, "failed to extract pdev temperature event");
6809  		return;
6810  	}
6811  
6812  	ath12k_dbg(ab, ATH12K_DBG_WMI,
6813  		   "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
6814  
6815  	rcu_read_lock();
6816  
6817  	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
6818  	if (!ar) {
6819  		ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
6820  		goto exit;
6821  	}
6822  
6823  exit:
6824  	rcu_read_unlock();
6825  }
6826  
ath12k_fils_discovery_event(struct ath12k_base * ab,struct sk_buff * skb)6827  static void ath12k_fils_discovery_event(struct ath12k_base *ab,
6828  					struct sk_buff *skb)
6829  {
6830  	const void **tb;
6831  	const struct wmi_fils_discovery_event *ev;
6832  	int ret;
6833  
6834  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6835  	if (IS_ERR(tb)) {
6836  		ret = PTR_ERR(tb);
6837  		ath12k_warn(ab,
6838  			    "failed to parse FILS discovery event tlv %d\n",
6839  			    ret);
6840  		return;
6841  	}
6842  
6843  	ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
6844  	if (!ev) {
6845  		ath12k_warn(ab, "failed to fetch FILS discovery event\n");
6846  		kfree(tb);
6847  		return;
6848  	}
6849  
6850  	ath12k_warn(ab,
6851  		    "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
6852  		    ev->vdev_id, ev->fils_tt, ev->tbtt);
6853  
6854  	kfree(tb);
6855  }
6856  
ath12k_probe_resp_tx_status_event(struct ath12k_base * ab,struct sk_buff * skb)6857  static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
6858  					      struct sk_buff *skb)
6859  {
6860  	const void **tb;
6861  	const struct wmi_probe_resp_tx_status_event *ev;
6862  	int ret;
6863  
6864  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6865  	if (IS_ERR(tb)) {
6866  		ret = PTR_ERR(tb);
6867  		ath12k_warn(ab,
6868  			    "failed to parse probe response transmission status event tlv: %d\n",
6869  			    ret);
6870  		return;
6871  	}
6872  
6873  	ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
6874  	if (!ev) {
6875  		ath12k_warn(ab,
6876  			    "failed to fetch probe response transmission status event");
6877  		kfree(tb);
6878  		return;
6879  	}
6880  
6881  	if (ev->tx_status)
6882  		ath12k_warn(ab,
6883  			    "Probe response transmission failed for vdev_id %u, status %u\n",
6884  			    ev->vdev_id, ev->tx_status);
6885  
6886  	kfree(tb);
6887  }
6888  
ath12k_wmi_p2p_noa_event(struct ath12k_base * ab,struct sk_buff * skb)6889  static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab,
6890  				    struct sk_buff *skb)
6891  {
6892  	const void **tb;
6893  	const struct wmi_p2p_noa_event *ev;
6894  	const struct ath12k_wmi_p2p_noa_info *noa;
6895  	struct ath12k *ar;
6896  	int ret, vdev_id;
6897  
6898  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6899  	if (IS_ERR(tb)) {
6900  		ret = PTR_ERR(tb);
6901  		ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret);
6902  		return ret;
6903  	}
6904  
6905  	ev = tb[WMI_TAG_P2P_NOA_EVENT];
6906  	noa = tb[WMI_TAG_P2P_NOA_INFO];
6907  
6908  	if (!ev || !noa) {
6909  		ret = -EPROTO;
6910  		goto out;
6911  	}
6912  
6913  	vdev_id = __le32_to_cpu(ev->vdev_id);
6914  
6915  	ath12k_dbg(ab, ATH12K_DBG_WMI,
6916  		   "wmi tlv p2p noa vdev_id %i descriptors %u\n",
6917  		   vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM));
6918  
6919  	rcu_read_lock();
6920  	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
6921  	if (!ar) {
6922  		ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n",
6923  			    vdev_id);
6924  		ret = -EINVAL;
6925  		goto unlock;
6926  	}
6927  
6928  	ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
6929  
6930  	ret = 0;
6931  
6932  unlock:
6933  	rcu_read_unlock();
6934  out:
6935  	kfree(tb);
6936  	return ret;
6937  }
6938  
ath12k_rfkill_state_change_event(struct ath12k_base * ab,struct sk_buff * skb)6939  static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
6940  					     struct sk_buff *skb)
6941  {
6942  	const struct wmi_rfkill_state_change_event *ev;
6943  	const void **tb;
6944  	int ret;
6945  
6946  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6947  	if (IS_ERR(tb)) {
6948  		ret = PTR_ERR(tb);
6949  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6950  		return;
6951  	}
6952  
6953  	ev = tb[WMI_TAG_RFKILL_EVENT];
6954  	if (!ev) {
6955  		kfree(tb);
6956  		return;
6957  	}
6958  
6959  	ath12k_dbg(ab, ATH12K_DBG_MAC,
6960  		   "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
6961  		   le32_to_cpu(ev->gpio_pin_num),
6962  		   le32_to_cpu(ev->int_type),
6963  		   le32_to_cpu(ev->radio_state));
6964  
6965  	spin_lock_bh(&ab->base_lock);
6966  	ab->rfkill_radio_on = (ev->radio_state == cpu_to_le32(WMI_RFKILL_RADIO_STATE_ON));
6967  	spin_unlock_bh(&ab->base_lock);
6968  
6969  	queue_work(ab->workqueue, &ab->rfkill_work);
6970  	kfree(tb);
6971  }
6972  
6973  static void
ath12k_wmi_diag_event(struct ath12k_base * ab,struct sk_buff * skb)6974  ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb)
6975  {
6976  	trace_ath12k_wmi_diag(ab, skb->data, skb->len);
6977  }
6978  
ath12k_wmi_twt_enable_event(struct ath12k_base * ab,struct sk_buff * skb)6979  static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab,
6980  					struct sk_buff *skb)
6981  {
6982  	const void **tb;
6983  	const struct wmi_twt_enable_event *ev;
6984  	int ret;
6985  
6986  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6987  	if (IS_ERR(tb)) {
6988  		ret = PTR_ERR(tb);
6989  		ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n",
6990  			    ret);
6991  		return;
6992  	}
6993  
6994  	ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT];
6995  	if (!ev) {
6996  		ath12k_warn(ab, "failed to fetch twt enable wmi event\n");
6997  		goto exit;
6998  	}
6999  
7000  	ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n",
7001  		   le32_to_cpu(ev->pdev_id),
7002  		   le32_to_cpu(ev->status));
7003  
7004  exit:
7005  	kfree(tb);
7006  }
7007  
ath12k_wmi_twt_disable_event(struct ath12k_base * ab,struct sk_buff * skb)7008  static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab,
7009  					 struct sk_buff *skb)
7010  {
7011  	const void **tb;
7012  	const struct wmi_twt_disable_event *ev;
7013  	int ret;
7014  
7015  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7016  	if (IS_ERR(tb)) {
7017  		ret = PTR_ERR(tb);
7018  		ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n",
7019  			    ret);
7020  		return;
7021  	}
7022  
7023  	ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT];
7024  	if (!ev) {
7025  		ath12k_warn(ab, "failed to fetch twt disable wmi event\n");
7026  		goto exit;
7027  	}
7028  
7029  	ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n",
7030  		   le32_to_cpu(ev->pdev_id),
7031  		   le32_to_cpu(ev->status));
7032  
7033  exit:
7034  	kfree(tb);
7035  }
7036  
ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)7037  static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab,
7038  					    u16 tag, u16 len,
7039  					    const void *ptr, void *data)
7040  {
7041  	const struct wmi_wow_ev_pg_fault_param *pf_param;
7042  	const struct wmi_wow_ev_param *param;
7043  	struct wmi_wow_ev_arg *arg = data;
7044  	int pf_len;
7045  
7046  	switch (tag) {
7047  	case WMI_TAG_WOW_EVENT_INFO:
7048  		param = ptr;
7049  		arg->wake_reason = le32_to_cpu(param->wake_reason);
7050  		ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n",
7051  			   arg->wake_reason, wow_reason(arg->wake_reason));
7052  		break;
7053  
7054  	case WMI_TAG_ARRAY_BYTE:
7055  		if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) {
7056  			pf_param = ptr;
7057  			pf_len = le32_to_cpu(pf_param->len);
7058  			if (pf_len > len - sizeof(pf_len) ||
7059  			    pf_len < 0) {
7060  				ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n",
7061  					    pf_len);
7062  				return -EINVAL;
7063  			}
7064  			ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n",
7065  				   pf_len);
7066  			ath12k_dbg_dump(ab, ATH12K_DBG_WMI,
7067  					"wow_reason_page_fault packet present",
7068  					"wow_pg_fault ",
7069  					pf_param->data,
7070  					pf_len);
7071  		}
7072  		break;
7073  	default:
7074  		break;
7075  	}
7076  
7077  	return 0;
7078  }
7079  
ath12k_wmi_event_wow_wakeup_host(struct ath12k_base * ab,struct sk_buff * skb)7080  static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb)
7081  {
7082  	struct wmi_wow_ev_arg arg = { };
7083  	int ret;
7084  
7085  	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
7086  				  ath12k_wmi_wow_wakeup_host_parse,
7087  				  &arg);
7088  	if (ret) {
7089  		ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n",
7090  			    ret);
7091  		return;
7092  	}
7093  
7094  	complete(&ab->wow.wakeup_completed);
7095  }
7096  
ath12k_wmi_gtk_offload_status_event(struct ath12k_base * ab,struct sk_buff * skb)7097  static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab,
7098  						struct sk_buff *skb)
7099  {
7100  	const struct wmi_gtk_offload_status_event *ev;
7101  	struct ath12k_vif *arvif;
7102  	__be64 replay_ctr_be;
7103  	u64 replay_ctr;
7104  	const void **tb;
7105  	int ret;
7106  
7107  	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
7108  	if (IS_ERR(tb)) {
7109  		ret = PTR_ERR(tb);
7110  		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
7111  		return;
7112  	}
7113  
7114  	ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
7115  	if (!ev) {
7116  		ath12k_warn(ab, "failed to fetch gtk offload status ev");
7117  		kfree(tb);
7118  		return;
7119  	}
7120  
7121  	rcu_read_lock();
7122  	arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id));
7123  	if (!arvif) {
7124  		rcu_read_unlock();
7125  		ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n",
7126  			    le32_to_cpu(ev->vdev_id));
7127  		kfree(tb);
7128  		return;
7129  	}
7130  
7131  	replay_ctr = le64_to_cpu(ev->replay_ctr);
7132  	arvif->rekey_data.replay_ctr = replay_ctr;
7133  	ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n",
7134  		   le32_to_cpu(ev->refresh_cnt), replay_ctr);
7135  
7136  	/* supplicant expects big-endian replay counter */
7137  	replay_ctr_be = cpu_to_be64(replay_ctr);
7138  
7139  	ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
7140  				   (void *)&replay_ctr_be, GFP_ATOMIC);
7141  
7142  	rcu_read_unlock();
7143  
7144  	kfree(tb);
7145  }
7146  
ath12k_wmi_op_rx(struct ath12k_base * ab,struct sk_buff * skb)7147  static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
7148  {
7149  	struct wmi_cmd_hdr *cmd_hdr;
7150  	enum wmi_tlv_event_id id;
7151  
7152  	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
7153  	id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID);
7154  
7155  	if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
7156  		goto out;
7157  
7158  	switch (id) {
7159  		/* Process all the WMI events here */
7160  	case WMI_SERVICE_READY_EVENTID:
7161  		ath12k_service_ready_event(ab, skb);
7162  		break;
7163  	case WMI_SERVICE_READY_EXT_EVENTID:
7164  		ath12k_service_ready_ext_event(ab, skb);
7165  		break;
7166  	case WMI_SERVICE_READY_EXT2_EVENTID:
7167  		ath12k_service_ready_ext2_event(ab, skb);
7168  		break;
7169  	case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
7170  		ath12k_reg_chan_list_event(ab, skb);
7171  		break;
7172  	case WMI_READY_EVENTID:
7173  		ath12k_ready_event(ab, skb);
7174  		break;
7175  	case WMI_PEER_DELETE_RESP_EVENTID:
7176  		ath12k_peer_delete_resp_event(ab, skb);
7177  		break;
7178  	case WMI_VDEV_START_RESP_EVENTID:
7179  		ath12k_vdev_start_resp_event(ab, skb);
7180  		break;
7181  	case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
7182  		ath12k_bcn_tx_status_event(ab, skb);
7183  		break;
7184  	case WMI_VDEV_STOPPED_EVENTID:
7185  		ath12k_vdev_stopped_event(ab, skb);
7186  		break;
7187  	case WMI_MGMT_RX_EVENTID:
7188  		ath12k_mgmt_rx_event(ab, skb);
7189  		/* mgmt_rx_event() owns the skb now! */
7190  		return;
7191  	case WMI_MGMT_TX_COMPLETION_EVENTID:
7192  		ath12k_mgmt_tx_compl_event(ab, skb);
7193  		break;
7194  	case WMI_SCAN_EVENTID:
7195  		ath12k_scan_event(ab, skb);
7196  		break;
7197  	case WMI_PEER_STA_KICKOUT_EVENTID:
7198  		ath12k_peer_sta_kickout_event(ab, skb);
7199  		break;
7200  	case WMI_ROAM_EVENTID:
7201  		ath12k_roam_event(ab, skb);
7202  		break;
7203  	case WMI_CHAN_INFO_EVENTID:
7204  		ath12k_chan_info_event(ab, skb);
7205  		break;
7206  	case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
7207  		ath12k_pdev_bss_chan_info_event(ab, skb);
7208  		break;
7209  	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
7210  		ath12k_vdev_install_key_compl_event(ab, skb);
7211  		break;
7212  	case WMI_SERVICE_AVAILABLE_EVENTID:
7213  		ath12k_service_available_event(ab, skb);
7214  		break;
7215  	case WMI_PEER_ASSOC_CONF_EVENTID:
7216  		ath12k_peer_assoc_conf_event(ab, skb);
7217  		break;
7218  	case WMI_UPDATE_STATS_EVENTID:
7219  		ath12k_update_stats_event(ab, skb);
7220  		break;
7221  	case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
7222  		ath12k_pdev_ctl_failsafe_check_event(ab, skb);
7223  		break;
7224  	case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
7225  		ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb);
7226  		break;
7227  	case WMI_PDEV_TEMPERATURE_EVENTID:
7228  		ath12k_wmi_pdev_temperature_event(ab, skb);
7229  		break;
7230  	case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
7231  		ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
7232  		break;
7233  	case WMI_HOST_FILS_DISCOVERY_EVENTID:
7234  		ath12k_fils_discovery_event(ab, skb);
7235  		break;
7236  	case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
7237  		ath12k_probe_resp_tx_status_event(ab, skb);
7238  		break;
7239  	case WMI_RFKILL_STATE_CHANGE_EVENTID:
7240  		ath12k_rfkill_state_change_event(ab, skb);
7241  		break;
7242  	case WMI_TWT_ENABLE_EVENTID:
7243  		ath12k_wmi_twt_enable_event(ab, skb);
7244  		break;
7245  	case WMI_TWT_DISABLE_EVENTID:
7246  		ath12k_wmi_twt_disable_event(ab, skb);
7247  		break;
7248  	case WMI_P2P_NOA_EVENTID:
7249  		ath12k_wmi_p2p_noa_event(ab, skb);
7250  		break;
7251  	/* add Unsupported events here */
7252  	case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
7253  	case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
7254  	case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
7255  		ath12k_dbg(ab, ATH12K_DBG_WMI,
7256  			   "ignoring unsupported event 0x%x\n", id);
7257  		break;
7258  	case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
7259  		ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
7260  		break;
7261  	case WMI_VDEV_DELETE_RESP_EVENTID:
7262  		ath12k_vdev_delete_resp_event(ab, skb);
7263  		break;
7264  	case WMI_DIAG_EVENTID:
7265  		ath12k_wmi_diag_event(ab, skb);
7266  		break;
7267  	case WMI_WOW_WAKEUP_HOST_EVENTID:
7268  		ath12k_wmi_event_wow_wakeup_host(ab, skb);
7269  		break;
7270  	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
7271  		ath12k_wmi_gtk_offload_status_event(ab, skb);
7272  		break;
7273  	/* TODO: Add remaining events */
7274  	default:
7275  		ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
7276  		break;
7277  	}
7278  
7279  out:
7280  	dev_kfree_skb(skb);
7281  }
7282  
ath12k_connect_pdev_htc_service(struct ath12k_base * ab,u32 pdev_idx)7283  static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
7284  					   u32 pdev_idx)
7285  {
7286  	int status;
7287  	u32 svc_id[] = { ATH12K_HTC_SVC_ID_WMI_CONTROL,
7288  			 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
7289  			 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
7290  	struct ath12k_htc_svc_conn_req conn_req = {};
7291  	struct ath12k_htc_svc_conn_resp conn_resp = {};
7292  
7293  	/* these fields are the same for all service endpoints */
7294  	conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete;
7295  	conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx;
7296  	conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits;
7297  
7298  	/* connect to control service */
7299  	conn_req.service_id = svc_id[pdev_idx];
7300  
7301  	status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
7302  	if (status) {
7303  		ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
7304  			    status);
7305  		return status;
7306  	}
7307  
7308  	ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
7309  	ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
7310  	ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
7311  
7312  	return 0;
7313  }
7314  
7315  static int
ath12k_wmi_send_unit_test_cmd(struct ath12k * ar,struct wmi_unit_test_cmd ut_cmd,u32 * test_args)7316  ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
7317  			      struct wmi_unit_test_cmd ut_cmd,
7318  			      u32 *test_args)
7319  {
7320  	struct ath12k_wmi_pdev *wmi = ar->wmi;
7321  	struct wmi_unit_test_cmd *cmd;
7322  	struct sk_buff *skb;
7323  	struct wmi_tlv *tlv;
7324  	void *ptr;
7325  	u32 *ut_cmd_args;
7326  	int buf_len, arg_len;
7327  	int ret;
7328  	int i;
7329  
7330  	arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args);
7331  	buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
7332  
7333  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
7334  	if (!skb)
7335  		return -ENOMEM;
7336  
7337  	cmd = (struct wmi_unit_test_cmd *)skb->data;
7338  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD,
7339  						 sizeof(ut_cmd));
7340  
7341  	cmd->vdev_id = ut_cmd.vdev_id;
7342  	cmd->module_id = ut_cmd.module_id;
7343  	cmd->num_args = ut_cmd.num_args;
7344  	cmd->diag_token = ut_cmd.diag_token;
7345  
7346  	ptr = skb->data + sizeof(ut_cmd);
7347  
7348  	tlv = ptr;
7349  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
7350  
7351  	ptr += TLV_HDR_SIZE;
7352  
7353  	ut_cmd_args = ptr;
7354  	for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++)
7355  		ut_cmd_args[i] = test_args[i];
7356  
7357  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
7358  		   "WMI unit test : module %d vdev %d n_args %d token %d\n",
7359  		   cmd->module_id, cmd->vdev_id, cmd->num_args,
7360  		   cmd->diag_token);
7361  
7362  	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
7363  
7364  	if (ret) {
7365  		ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
7366  			    ret);
7367  		dev_kfree_skb(skb);
7368  	}
7369  
7370  	return ret;
7371  }
7372  
ath12k_wmi_simulate_radar(struct ath12k * ar)7373  int ath12k_wmi_simulate_radar(struct ath12k *ar)
7374  {
7375  	struct ath12k_vif *arvif;
7376  	u32 dfs_args[DFS_MAX_TEST_ARGS];
7377  	struct wmi_unit_test_cmd wmi_ut;
7378  	bool arvif_found = false;
7379  
7380  	list_for_each_entry(arvif, &ar->arvifs, list) {
7381  		if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
7382  			arvif_found = true;
7383  			break;
7384  		}
7385  	}
7386  
7387  	if (!arvif_found)
7388  		return -EINVAL;
7389  
7390  	dfs_args[DFS_TEST_CMDID] = 0;
7391  	dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
7392  	/* Currently we could pass segment_id(b0 - b1), chirp(b2)
7393  	 * freq offset (b3 - b10) to unit test. For simulation
7394  	 * purpose this can be set to 0 which is valid.
7395  	 */
7396  	dfs_args[DFS_TEST_RADAR_PARAM] = 0;
7397  
7398  	wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id);
7399  	wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE);
7400  	wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS);
7401  	wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN);
7402  
7403  	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n");
7404  
7405  	return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
7406  }
7407  
ath12k_wmi_connect(struct ath12k_base * ab)7408  int ath12k_wmi_connect(struct ath12k_base *ab)
7409  {
7410  	u32 i;
7411  	u8 wmi_ep_count;
7412  
7413  	wmi_ep_count = ab->htc.wmi_ep_count;
7414  	if (wmi_ep_count > ab->hw_params->max_radios)
7415  		return -1;
7416  
7417  	for (i = 0; i < wmi_ep_count; i++)
7418  		ath12k_connect_pdev_htc_service(ab, i);
7419  
7420  	return 0;
7421  }
7422  
ath12k_wmi_pdev_detach(struct ath12k_base * ab,u8 pdev_id)7423  static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id)
7424  {
7425  	if (WARN_ON(pdev_id >= MAX_RADIOS))
7426  		return;
7427  
7428  	/* TODO: Deinit any pdev specific wmi resource */
7429  }
7430  
ath12k_wmi_pdev_attach(struct ath12k_base * ab,u8 pdev_id)7431  int ath12k_wmi_pdev_attach(struct ath12k_base *ab,
7432  			   u8 pdev_id)
7433  {
7434  	struct ath12k_wmi_pdev *wmi_handle;
7435  
7436  	if (pdev_id >= ab->hw_params->max_radios)
7437  		return -EINVAL;
7438  
7439  	wmi_handle = &ab->wmi_ab.wmi[pdev_id];
7440  
7441  	wmi_handle->wmi_ab = &ab->wmi_ab;
7442  
7443  	ab->wmi_ab.ab = ab;
7444  	/* TODO: Init remaining resource specific to pdev */
7445  
7446  	return 0;
7447  }
7448  
ath12k_wmi_attach(struct ath12k_base * ab)7449  int ath12k_wmi_attach(struct ath12k_base *ab)
7450  {
7451  	int ret;
7452  
7453  	ret = ath12k_wmi_pdev_attach(ab, 0);
7454  	if (ret)
7455  		return ret;
7456  
7457  	ab->wmi_ab.ab = ab;
7458  	ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
7459  
7460  	/* It's overwritten when service_ext_ready is handled */
7461  	if (ab->hw_params->single_pdev_only)
7462  		ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
7463  
7464  	/* TODO: Init remaining wmi soc resources required */
7465  	init_completion(&ab->wmi_ab.service_ready);
7466  	init_completion(&ab->wmi_ab.unified_ready);
7467  
7468  	return 0;
7469  }
7470  
ath12k_wmi_detach(struct ath12k_base * ab)7471  void ath12k_wmi_detach(struct ath12k_base *ab)
7472  {
7473  	int i;
7474  
7475  	/* TODO: Deinit wmi resource specific to SOC as required */
7476  
7477  	for (i = 0; i < ab->htc.wmi_ep_count; i++)
7478  		ath12k_wmi_pdev_detach(ab, i);
7479  
7480  	ath12k_wmi_free_dbring_caps(ab);
7481  }
7482  
ath12k_wmi_hw_data_filter_cmd(struct ath12k * ar,struct wmi_hw_data_filter_arg * arg)7483  int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg)
7484  {
7485  	struct wmi_hw_data_filter_cmd *cmd;
7486  	struct sk_buff *skb;
7487  	int len;
7488  
7489  	len = sizeof(*cmd);
7490  	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7491  
7492  	if (!skb)
7493  		return -ENOMEM;
7494  
7495  	cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
7496  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD,
7497  						 sizeof(*cmd));
7498  	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
7499  	cmd->enable = cpu_to_le32(arg->enable ? 1 : 0);
7500  
7501  	/* Set all modes in case of disable */
7502  	if (arg->enable)
7503  		cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap);
7504  	else
7505  		cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U);
7506  
7507  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
7508  		   "wmi hw data filter enable %d filter_bitmap 0x%x\n",
7509  		   arg->enable, arg->hw_filter_bitmap);
7510  
7511  	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
7512  }
7513  
ath12k_wmi_wow_host_wakeup_ind(struct ath12k * ar)7514  int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar)
7515  {
7516  	struct wmi_wow_host_wakeup_cmd *cmd;
7517  	struct sk_buff *skb;
7518  	size_t len;
7519  
7520  	len = sizeof(*cmd);
7521  	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7522  	if (!skb)
7523  		return -ENOMEM;
7524  
7525  	cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data;
7526  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
7527  						 sizeof(*cmd));
7528  
7529  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
7530  
7531  	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
7532  }
7533  
ath12k_wmi_wow_enable(struct ath12k * ar)7534  int ath12k_wmi_wow_enable(struct ath12k *ar)
7535  {
7536  	struct wmi_wow_enable_cmd *cmd;
7537  	struct sk_buff *skb;
7538  	int len;
7539  
7540  	len = sizeof(*cmd);
7541  	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7542  	if (!skb)
7543  		return -ENOMEM;
7544  
7545  	cmd = (struct wmi_wow_enable_cmd *)skb->data;
7546  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD,
7547  						 sizeof(*cmd));
7548  
7549  	cmd->enable = cpu_to_le32(1);
7550  	cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED);
7551  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n");
7552  
7553  	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
7554  }
7555  
ath12k_wmi_wow_add_wakeup_event(struct ath12k * ar,u32 vdev_id,enum wmi_wow_wakeup_event event,u32 enable)7556  int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
7557  				    enum wmi_wow_wakeup_event event,
7558  				    u32 enable)
7559  {
7560  	struct wmi_wow_add_del_event_cmd *cmd;
7561  	struct sk_buff *skb;
7562  	size_t len;
7563  
7564  	len = sizeof(*cmd);
7565  	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7566  	if (!skb)
7567  		return -ENOMEM;
7568  
7569  	cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
7570  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD,
7571  						 sizeof(*cmd));
7572  	cmd->vdev_id = cpu_to_le32(vdev_id);
7573  	cmd->is_add = cpu_to_le32(enable);
7574  	cmd->event_bitmap = cpu_to_le32((1 << event));
7575  
7576  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
7577  		   wow_wakeup_event(event), enable, vdev_id);
7578  
7579  	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
7580  }
7581  
ath12k_wmi_wow_add_pattern(struct ath12k * ar,u32 vdev_id,u32 pattern_id,const u8 * pattern,const u8 * mask,int pattern_len,int pattern_offset)7582  int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
7583  			       const u8 *pattern, const u8 *mask,
7584  			       int pattern_len, int pattern_offset)
7585  {
7586  	struct wmi_wow_add_pattern_cmd *cmd;
7587  	struct wmi_wow_bitmap_pattern_params *bitmap;
7588  	struct wmi_tlv *tlv;
7589  	struct sk_buff *skb;
7590  	void *ptr;
7591  	size_t len;
7592  
7593  	len = sizeof(*cmd) +
7594  	      sizeof(*tlv) +			/* array struct */
7595  	      sizeof(*bitmap) +			/* bitmap */
7596  	      sizeof(*tlv) +			/* empty ipv4 sync */
7597  	      sizeof(*tlv) +			/* empty ipv6 sync */
7598  	      sizeof(*tlv) +			/* empty magic */
7599  	      sizeof(*tlv) +			/* empty info timeout */
7600  	      sizeof(*tlv) + sizeof(u32);	/* ratelimit interval */
7601  
7602  	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7603  	if (!skb)
7604  		return -ENOMEM;
7605  
7606  	/* cmd */
7607  	ptr = skb->data;
7608  	cmd = ptr;
7609  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD,
7610  						 sizeof(*cmd));
7611  	cmd->vdev_id = cpu_to_le32(vdev_id);
7612  	cmd->pattern_id = cpu_to_le32(pattern_id);
7613  	cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
7614  
7615  	ptr += sizeof(*cmd);
7616  
7617  	/* bitmap */
7618  	tlv = ptr;
7619  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap));
7620  
7621  	ptr += sizeof(*tlv);
7622  
7623  	bitmap = ptr;
7624  	bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T,
7625  						    sizeof(*bitmap));
7626  	memcpy(bitmap->patternbuf, pattern, pattern_len);
7627  	memcpy(bitmap->bitmaskbuf, mask, pattern_len);
7628  	bitmap->pattern_offset = cpu_to_le32(pattern_offset);
7629  	bitmap->pattern_len = cpu_to_le32(pattern_len);
7630  	bitmap->bitmask_len = cpu_to_le32(pattern_len);
7631  	bitmap->pattern_id = cpu_to_le32(pattern_id);
7632  
7633  	ptr += sizeof(*bitmap);
7634  
7635  	/* ipv4 sync */
7636  	tlv = ptr;
7637  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
7638  
7639  	ptr += sizeof(*tlv);
7640  
7641  	/* ipv6 sync */
7642  	tlv = ptr;
7643  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
7644  
7645  	ptr += sizeof(*tlv);
7646  
7647  	/* magic */
7648  	tlv = ptr;
7649  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
7650  
7651  	ptr += sizeof(*tlv);
7652  
7653  	/* pattern info timeout */
7654  	tlv = ptr;
7655  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
7656  
7657  	ptr += sizeof(*tlv);
7658  
7659  	/* ratelimit interval */
7660  	tlv = ptr;
7661  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
7662  
7663  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n",
7664  		   vdev_id, pattern_id, pattern_offset, pattern_len);
7665  
7666  	ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ",
7667  			bitmap->patternbuf, pattern_len);
7668  	ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ",
7669  			bitmap->bitmaskbuf, pattern_len);
7670  
7671  	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
7672  }
7673  
ath12k_wmi_wow_del_pattern(struct ath12k * ar,u32 vdev_id,u32 pattern_id)7674  int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id)
7675  {
7676  	struct wmi_wow_del_pattern_cmd *cmd;
7677  	struct sk_buff *skb;
7678  	size_t len;
7679  
7680  	len = sizeof(*cmd);
7681  	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7682  	if (!skb)
7683  		return -ENOMEM;
7684  
7685  	cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
7686  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD,
7687  						 sizeof(*cmd));
7688  	cmd->vdev_id = cpu_to_le32(vdev_id);
7689  	cmd->pattern_id = cpu_to_le32(pattern_id);
7690  	cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
7691  
7692  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
7693  		   vdev_id, pattern_id);
7694  
7695  	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
7696  }
7697  
7698  static struct sk_buff *
ath12k_wmi_op_gen_config_pno_start(struct ath12k * ar,u32 vdev_id,struct wmi_pno_scan_req_arg * pno)7699  ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id,
7700  				   struct wmi_pno_scan_req_arg *pno)
7701  {
7702  	struct nlo_configured_params *nlo_list;
7703  	size_t len, nlo_list_len, channel_list_len;
7704  	struct wmi_wow_nlo_config_cmd *cmd;
7705  	__le32 *channel_list;
7706  	struct wmi_tlv *tlv;
7707  	struct sk_buff *skb;
7708  	void *ptr;
7709  	u32 i;
7710  
7711  	len = sizeof(*cmd) +
7712  	      sizeof(*tlv) +
7713  	      /* TLV place holder for array of structures
7714  	       * nlo_configured_params(nlo_list)
7715  	       */
7716  	      sizeof(*tlv);
7717  	      /* TLV place holder for array of uint32 channel_list */
7718  
7719  	channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
7720  	len += channel_list_len;
7721  
7722  	nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
7723  	len += nlo_list_len;
7724  
7725  	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7726  	if (!skb)
7727  		return ERR_PTR(-ENOMEM);
7728  
7729  	ptr = skb->data;
7730  	cmd = ptr;
7731  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd));
7732  
7733  	cmd->vdev_id = cpu_to_le32(pno->vdev_id);
7734  	cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
7735  
7736  	/* current FW does not support min-max range for dwell time */
7737  	cmd->active_dwell_time = cpu_to_le32(pno->active_max_time);
7738  	cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time);
7739  
7740  	if (pno->do_passive_scan)
7741  		cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
7742  
7743  	cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period);
7744  	cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period);
7745  	cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles);
7746  	cmd->delay_start_time = cpu_to_le32(pno->delay_start_time);
7747  
7748  	if (pno->enable_pno_scan_randomization) {
7749  		cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
7750  					  WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
7751  		ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
7752  		ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
7753  	}
7754  
7755  	ptr += sizeof(*cmd);
7756  
7757  	/* nlo_configured_params(nlo_list) */
7758  	cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count);
7759  	tlv = ptr;
7760  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len);
7761  
7762  	ptr += sizeof(*tlv);
7763  	nlo_list = ptr;
7764  	for (i = 0; i < pno->uc_networks_count; i++) {
7765  		tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
7766  		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
7767  						     sizeof(*nlo_list));
7768  
7769  		nlo_list[i].ssid.valid = cpu_to_le32(1);
7770  		nlo_list[i].ssid.ssid.ssid_len =
7771  			cpu_to_le32(pno->a_networks[i].ssid.ssid_len);
7772  		memcpy(nlo_list[i].ssid.ssid.ssid,
7773  		       pno->a_networks[i].ssid.ssid,
7774  		       le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
7775  
7776  		if (pno->a_networks[i].rssi_threshold &&
7777  		    pno->a_networks[i].rssi_threshold > -300) {
7778  			nlo_list[i].rssi_cond.valid = cpu_to_le32(1);
7779  			nlo_list[i].rssi_cond.rssi =
7780  					cpu_to_le32(pno->a_networks[i].rssi_threshold);
7781  		}
7782  
7783  		nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1);
7784  		nlo_list[i].bcast_nw_type.bcast_nw_type =
7785  					cpu_to_le32(pno->a_networks[i].bcast_nw_type);
7786  	}
7787  
7788  	ptr += nlo_list_len;
7789  	cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count);
7790  	tlv = ptr;
7791  	tlv->header =  ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len);
7792  	ptr += sizeof(*tlv);
7793  	channel_list = ptr;
7794  
7795  	for (i = 0; i < pno->a_networks[0].channel_count; i++)
7796  		channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]);
7797  
7798  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
7799  		   vdev_id);
7800  
7801  	return skb;
7802  }
7803  
ath12k_wmi_op_gen_config_pno_stop(struct ath12k * ar,u32 vdev_id)7804  static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar,
7805  							 u32 vdev_id)
7806  {
7807  	struct wmi_wow_nlo_config_cmd *cmd;
7808  	struct sk_buff *skb;
7809  	size_t len;
7810  
7811  	len = sizeof(*cmd);
7812  	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7813  	if (!skb)
7814  		return ERR_PTR(-ENOMEM);
7815  
7816  	cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
7817  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len);
7818  
7819  	cmd->vdev_id = cpu_to_le32(vdev_id);
7820  	cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP);
7821  
7822  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
7823  		   "wmi tlv stop pno config vdev_id %d\n", vdev_id);
7824  	return skb;
7825  }
7826  
ath12k_wmi_wow_config_pno(struct ath12k * ar,u32 vdev_id,struct wmi_pno_scan_req_arg * pno_scan)7827  int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
7828  			      struct wmi_pno_scan_req_arg  *pno_scan)
7829  {
7830  	struct sk_buff *skb;
7831  
7832  	if (pno_scan->enable)
7833  		skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
7834  	else
7835  		skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id);
7836  
7837  	if (IS_ERR_OR_NULL(skb))
7838  		return -ENOMEM;
7839  
7840  	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
7841  }
7842  
ath12k_wmi_fill_ns_offload(struct ath12k * ar,struct wmi_arp_ns_offload_arg * offload,void ** ptr,bool enable,bool ext)7843  static void ath12k_wmi_fill_ns_offload(struct ath12k *ar,
7844  				       struct wmi_arp_ns_offload_arg *offload,
7845  				       void **ptr,
7846  				       bool enable,
7847  				       bool ext)
7848  {
7849  	struct wmi_ns_offload_params *ns;
7850  	struct wmi_tlv *tlv;
7851  	void *buf_ptr = *ptr;
7852  	u32 ns_cnt, ns_ext_tuples;
7853  	int i, max_offloads;
7854  
7855  	ns_cnt = offload->ipv6_count;
7856  
7857  	tlv  = buf_ptr;
7858  
7859  	if (ext) {
7860  		ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
7861  		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
7862  						 ns_ext_tuples * sizeof(*ns));
7863  		i = WMI_MAX_NS_OFFLOADS;
7864  		max_offloads = offload->ipv6_count;
7865  	} else {
7866  		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
7867  						 WMI_MAX_NS_OFFLOADS * sizeof(*ns));
7868  		i = 0;
7869  		max_offloads = WMI_MAX_NS_OFFLOADS;
7870  	}
7871  
7872  	buf_ptr += sizeof(*tlv);
7873  
7874  	for (; i < max_offloads; i++) {
7875  		ns = buf_ptr;
7876  		ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE,
7877  							sizeof(*ns));
7878  
7879  		if (enable) {
7880  			if (i < ns_cnt)
7881  				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID);
7882  
7883  			memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
7884  			memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
7885  
7886  			if (offload->ipv6_type[i])
7887  				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST);
7888  
7889  			memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
7890  
7891  			if (!is_zero_ether_addr(ns->target_mac.addr))
7892  				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID);
7893  
7894  			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
7895  				   "wmi index %d ns_solicited %pI6 target %pI6",
7896  				   i, ns->solicitation_ipaddr,
7897  				   ns->target_ipaddr[0]);
7898  		}
7899  
7900  		buf_ptr += sizeof(*ns);
7901  	}
7902  
7903  	*ptr = buf_ptr;
7904  }
7905  
ath12k_wmi_fill_arp_offload(struct ath12k * ar,struct wmi_arp_ns_offload_arg * offload,void ** ptr,bool enable)7906  static void ath12k_wmi_fill_arp_offload(struct ath12k *ar,
7907  					struct wmi_arp_ns_offload_arg *offload,
7908  					void **ptr,
7909  					bool enable)
7910  {
7911  	struct wmi_arp_offload_params *arp;
7912  	struct wmi_tlv *tlv;
7913  	void *buf_ptr = *ptr;
7914  	int i;
7915  
7916  	/* fill arp tuple */
7917  	tlv = buf_ptr;
7918  	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
7919  					 WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
7920  	buf_ptr += sizeof(*tlv);
7921  
7922  	for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
7923  		arp = buf_ptr;
7924  		arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE,
7925  							 sizeof(*arp));
7926  
7927  		if (enable && i < offload->ipv4_count) {
7928  			/* Copy the target ip addr and flags */
7929  			arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID);
7930  			memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
7931  
7932  			ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4",
7933  				   arp->target_ipaddr);
7934  		}
7935  
7936  		buf_ptr += sizeof(*arp);
7937  	}
7938  
7939  	*ptr = buf_ptr;
7940  }
7941  
ath12k_wmi_arp_ns_offload(struct ath12k * ar,struct ath12k_vif * arvif,struct wmi_arp_ns_offload_arg * offload,bool enable)7942  int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
7943  			      struct ath12k_vif *arvif,
7944  			      struct wmi_arp_ns_offload_arg *offload,
7945  			      bool enable)
7946  {
7947  	struct wmi_set_arp_ns_offload_cmd *cmd;
7948  	struct wmi_tlv *tlv;
7949  	struct sk_buff *skb;
7950  	void *buf_ptr;
7951  	size_t len;
7952  	u8 ns_cnt, ns_ext_tuples = 0;
7953  
7954  	ns_cnt = offload->ipv6_count;
7955  
7956  	len = sizeof(*cmd) +
7957  	      sizeof(*tlv) +
7958  	      WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) +
7959  	      sizeof(*tlv) +
7960  	      WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params);
7961  
7962  	if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
7963  		ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
7964  		len += sizeof(*tlv) +
7965  		       ns_ext_tuples * sizeof(struct wmi_ns_offload_params);
7966  	}
7967  
7968  	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
7969  	if (!skb)
7970  		return -ENOMEM;
7971  
7972  	buf_ptr = skb->data;
7973  	cmd = buf_ptr;
7974  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
7975  						 sizeof(*cmd));
7976  	cmd->flags = cpu_to_le32(0);
7977  	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
7978  	cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples);
7979  
7980  	buf_ptr += sizeof(*cmd);
7981  
7982  	ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
7983  	ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
7984  
7985  	if (ns_ext_tuples)
7986  		ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
7987  
7988  	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
7989  }
7990  
ath12k_wmi_gtk_rekey_offload(struct ath12k * ar,struct ath12k_vif * arvif,bool enable)7991  int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
7992  				 struct ath12k_vif *arvif, bool enable)
7993  {
7994  	struct ath12k_rekey_data *rekey_data = &arvif->rekey_data;
7995  	struct wmi_gtk_rekey_offload_cmd *cmd;
7996  	struct sk_buff *skb;
7997  	__le64 replay_ctr;
7998  	int len;
7999  
8000  	len = sizeof(*cmd);
8001  	skb =  ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8002  	if (!skb)
8003  		return -ENOMEM;
8004  
8005  	cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
8006  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
8007  	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
8008  
8009  	if (enable) {
8010  		cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE);
8011  
8012  		/* the length in rekey_data and cmd is equal */
8013  		memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
8014  		memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
8015  
8016  		replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
8017  		memcpy(cmd->replay_ctr, &replay_ctr,
8018  		       sizeof(replay_ctr));
8019  	} else {
8020  		cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE);
8021  	}
8022  
8023  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
8024  		   arvif->vdev_id, enable);
8025  	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
8026  }
8027  
ath12k_wmi_gtk_rekey_getinfo(struct ath12k * ar,struct ath12k_vif * arvif)8028  int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
8029  				 struct ath12k_vif *arvif)
8030  {
8031  	struct wmi_gtk_rekey_offload_cmd *cmd;
8032  	struct sk_buff *skb;
8033  	int len;
8034  
8035  	len = sizeof(*cmd);
8036  	skb =  ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8037  	if (!skb)
8038  		return -ENOMEM;
8039  
8040  	cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
8041  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
8042  	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
8043  	cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE);
8044  
8045  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
8046  		   arvif->vdev_id);
8047  	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
8048  }
8049  
ath12k_wmi_sta_keepalive(struct ath12k * ar,const struct wmi_sta_keepalive_arg * arg)8050  int ath12k_wmi_sta_keepalive(struct ath12k *ar,
8051  			     const struct wmi_sta_keepalive_arg *arg)
8052  {
8053  	struct wmi_sta_keepalive_arp_resp_params *arp;
8054  	struct ath12k_wmi_pdev *wmi = ar->wmi;
8055  	struct wmi_sta_keepalive_cmd *cmd;
8056  	struct sk_buff *skb;
8057  	size_t len;
8058  
8059  	len = sizeof(*cmd) + sizeof(*arp);
8060  	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
8061  	if (!skb)
8062  		return -ENOMEM;
8063  
8064  	cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
8065  	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd));
8066  	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
8067  	cmd->enabled = cpu_to_le32(arg->enabled);
8068  	cmd->interval = cpu_to_le32(arg->interval);
8069  	cmd->method = cpu_to_le32(arg->method);
8070  
8071  	arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1);
8072  	arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
8073  						 sizeof(*arp));
8074  	if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
8075  	    arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
8076  		arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr);
8077  		arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr);
8078  		ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
8079  	}
8080  
8081  	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
8082  		   "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
8083  		   arg->vdev_id, arg->enabled, arg->method, arg->interval);
8084  
8085  	return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
8086  }
8087