xref: /wlan-dirver/qcacld-3.0/core/wma/src/wma_main.c (revision 75795d61a6758742814b135a806ae32b342c28f5)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  *  DOC:  wma_main.c
21  *
22  *  This file contains wma initialization and FW exchange
23  *  related functions.
24  */
25 
26 /* Header files */
27 
28 #include "wma.h"
29 #include "wma_api.h"
30 #include "cds_api.h"
31 #include "wmi_unified_api.h"
32 #include "wlan_qct_sys.h"
33 #include "wni_api.h"
34 #include "ani_global.h"
35 #include "wmi_unified.h"
36 #include "wni_cfg.h"
37 #include "cfg_api.h"
38 #if defined(CONFIG_HL_SUPPORT)
39 #include "wlan_tgt_def_config_hl.h"
40 #else
41 #include "wlan_tgt_def_config.h"
42 #endif
43 #include "qdf_nbuf.h"
44 #include "qdf_types.h"
45 #include "qdf_mem.h"
46 #include "wma_types.h"
47 #include "lim_api.h"
48 #include "lim_session_utils.h"
49 
50 #include "cds_utils.h"
51 
52 #if !defined(REMOVE_PKT_LOG)
53 #include "pktlog_ac.h"
54 #endif /* REMOVE_PKT_LOG */
55 
56 #include "dbglog_host.h"
57 #include "csr_api.h"
58 #include "ol_fw.h"
59 
60 #include "wma_internal.h"
61 
62 #include "wma_ocb.h"
63 #include "wlan_policy_mgr_api.h"
64 #include "cdp_txrx_cfg.h"
65 #include "cdp_txrx_flow_ctrl_legacy.h"
66 #include "cdp_txrx_flow_ctrl_v2.h"
67 #include "cdp_txrx_ipa.h"
68 #include "cdp_txrx_misc.h"
69 #include "wma_fips_api.h"
70 #include "wma_nan_datapath.h"
71 #include "wlan_lmac_if_def.h"
72 #include "wlan_lmac_if_api.h"
73 #include "target_if.h"
74 #include "wlan_global_lmac_if_api.h"
75 #include "target_if_pmo.h"
76 #include "wma_he.h"
77 #include "wlan_pmo_obj_mgmt_api.h"
78 
79 #include "wlan_reg_tgt_api.h"
80 #include "wlan_reg_services_api.h"
81 #include <cdp_txrx_handle.h>
82 #include <wlan_pmo_ucfg_api.h>
83 #include "wifi_pos_api.h"
84 #include "hif_main.h"
85 #include <target_if_spectral.h>
86 #include <wlan_spectral_utils_api.h>
87 #include "init_event_handler.h"
88 #include "init_deinit_lmac.h"
89 #include "target_if_green_ap.h"
90 #include "service_ready_param.h"
91 #include "wlan_cp_stats_mc_ucfg_api.h"
92 
93 #define WMA_LOG_COMPLETION_TIMER 3000 /* 3 seconds */
94 #define WMI_TLV_HEADROOM 128
95 
96 #define WMA_FW_TIME_SYNC_TIMER 60000 /* 1 min */
97 
98 uint8_t *mac_trace_get_wma_msg_string(uint16_t wmaMsg);
99 static uint32_t g_fw_wlan_feat_caps;
100 /**
101  * wma_get_fw_wlan_feat_caps() - get fw feature capablity
102  * @feature: feature enum value
103  *
104  * Return: true/false
105  */
106 bool wma_get_fw_wlan_feat_caps(enum cap_bitmap feature)
107 {
108 	return (g_fw_wlan_feat_caps & (1 << feature)) ? true : false;
109 }
110 
111 /**
112  * wma_set_fw_wlan_feat_caps() - set fw feature capablity
113  * @feature: feature enum value
114  *
115  * Return: None
116  */
117 void wma_set_fw_wlan_feat_caps(enum cap_bitmap feature)
118 {
119 	g_fw_wlan_feat_caps |= (1 << feature);
120 }
121 
122 /**
123  * wma_service_ready_ext_evt_timeout() - Service ready extended event timeout
124  * @data: Timeout handler data
125  *
126  * This function is called when the FW fails to send WMI_SERVICE_READY_EXT_EVENT
127  * message
128  *
129  * Return: None
130  */
131 static void wma_service_ready_ext_evt_timeout(void *data)
132 {
133 	tp_wma_handle wma_handle;
134 
135 	WMA_LOGA("%s: Timeout waiting for WMI_SERVICE_READY_EXT_EVENT",
136 			__func__);
137 
138 	wma_handle = (tp_wma_handle) data;
139 
140 	if (!wma_handle) {
141 		WMA_LOGE("%s: Invalid WMA handle", __func__);
142 		goto end;
143 	}
144 
145 end:
146 	/* Assert here. Panic is being called in insmod thread */
147 	QDF_ASSERT(0);
148 }
149 
150 /**
151  * wma_get_ini_handle() - API to get WMA ini info handle
152  * @wma: WMA Handle
153  *
154  * Returns the pointer to WMA ini structure.
155  * Return: struct wma_ini_config
156  */
157 struct wma_ini_config *wma_get_ini_handle(tp_wma_handle wma)
158 {
159 	if (!wma) {
160 		WMA_LOGE("%s: Invalid WMA context\n", __func__);
161 		return NULL;
162 	}
163 
164 	return &wma->ini_config;
165 }
166 
167 #define MAX_SUPPORTED_PEERS_REV1_1 14
168 #define MAX_SUPPORTED_PEERS_REV1_3 32
169 #define MIN_NO_OF_PEERS 1
170 
171 /**
172  * wma_get_number_of_peers_supported - API to query for number of peers
173  * supported
174  * @wma: WMA Handle
175  *
176  * Return: Max Number of Peers Supported
177  */
178 static uint8_t wma_get_number_of_peers_supported(tp_wma_handle wma)
179 {
180 	struct hif_target_info *tgt_info;
181 	struct wma_ini_config *cfg = wma_get_ini_handle(wma);
182 	uint8_t max_no_of_peers = cfg ? cfg->max_no_of_peers : MIN_NO_OF_PEERS;
183 	struct hif_opaque_softc *scn = cds_get_context(QDF_MODULE_ID_HIF);
184 
185 	if (!scn) {
186 		WMA_LOGE("%s: Invalid wma handle", __func__);
187 		return 0;
188 	}
189 
190 	tgt_info = hif_get_target_info_handle(scn);
191 
192 	switch (tgt_info->target_version) {
193 	case AR6320_REV1_1_VERSION:
194 		if (max_no_of_peers > MAX_SUPPORTED_PEERS_REV1_1)
195 			max_no_of_peers = MAX_SUPPORTED_PEERS_REV1_1;
196 		break;
197 	default:
198 		if (max_no_of_peers > MAX_SUPPORTED_PEERS_REV1_3)
199 			max_no_of_peers = MAX_SUPPORTED_PEERS_REV1_3;
200 		break;
201 	}
202 
203 	return max_no_of_peers;
204 }
205 
206 /**
207  * wma_get_number_of_tids_supported - API to query for number of tids supported
208  * @no_of_peers_supported: Number of peer supported
209  *
210  * Return: Max number of tids supported
211  */
212 #if defined(CONFIG_HL_SUPPORT)
213 static uint32_t wma_get_number_of_tids_supported(uint8_t no_of_peers_supported)
214 {
215 	return 4 * no_of_peers_supported;
216 }
217 #else
218 static uint32_t wma_get_number_of_tids_supported(uint8_t no_of_peers_supported)
219 {
220 	return 2 * (no_of_peers_supported + CFG_TGT_NUM_VDEV + 2);
221 }
222 #endif
223 
224 #ifdef PERE_IP_HDR_ALIGNMENT_WAR
225 static void wma_reset_rx_decap_mode(target_resource_config *tgt_cfg)
226 {
227 	/*
228 	 * To make the IP header begins at dword aligned address,
229 	 * we make the decapsulation mode as Native Wifi.
230 	 */
231 	tgt_cfg->rx_decap_mode = CFG_TGT_RX_DECAP_MODE_NWIFI;
232 }
233 #else
234 static void wma_reset_rx_decap_mode(target_resource_config *tgt_cfg)
235 {
236 }
237 
238 #endif
239 /**
240  * wma_set_default_tgt_config() - set default tgt config
241  * @wma_handle: wma handle
242  * @tgt_cfg: Resource config given to target
243  *
244  * Return: none
245  */
246 static void wma_set_default_tgt_config(tp_wma_handle wma_handle,
247 				       target_resource_config *tgt_cfg)
248 {
249 	uint8_t no_of_peers_supported;
250 
251 	qdf_mem_zero(tgt_cfg, sizeof(target_resource_config));
252 	tgt_cfg->num_vdevs = CFG_TGT_NUM_VDEV;
253 	tgt_cfg->num_peers = CFG_TGT_NUM_PEERS + CFG_TGT_NUM_VDEV + 2;
254 	tgt_cfg->num_offload_peers = CFG_TGT_NUM_OFFLOAD_PEERS;
255 	tgt_cfg->num_offload_reorder_buffs = CFG_TGT_NUM_OFFLOAD_REORDER_BUFFS;
256 	tgt_cfg->num_peer_keys = CFG_TGT_NUM_PEER_KEYS;
257 	tgt_cfg->num_tids = CFG_TGT_NUM_TIDS;
258 	tgt_cfg->ast_skid_limit = CFG_TGT_AST_SKID_LIMIT;
259 	tgt_cfg->tx_chain_mask = CFG_TGT_DEFAULT_TX_CHAIN_MASK;
260 	tgt_cfg->rx_chain_mask = CFG_TGT_DEFAULT_RX_CHAIN_MASK;
261 	tgt_cfg->rx_timeout_pri[0] = CFG_TGT_RX_TIMEOUT_LO_PRI;
262 	tgt_cfg->rx_timeout_pri[1] = CFG_TGT_RX_TIMEOUT_LO_PRI;
263 	tgt_cfg->rx_timeout_pri[2] = CFG_TGT_RX_TIMEOUT_LO_PRI;
264 	tgt_cfg->rx_timeout_pri[3] = CFG_TGT_RX_TIMEOUT_HI_PRI;
265 	tgt_cfg->rx_decap_mode = CFG_TGT_RX_DECAP_MODE;
266 	tgt_cfg->scan_max_pending_req = CFG_TGT_DEFAULT_SCAN_MAX_REQS;
267 	tgt_cfg->bmiss_offload_max_vdev =
268 			CFG_TGT_DEFAULT_BMISS_OFFLOAD_MAX_VDEV;
269 	tgt_cfg->roam_offload_max_vdev = CFG_TGT_DEFAULT_ROAM_OFFLOAD_MAX_VDEV;
270 	tgt_cfg->roam_offload_max_ap_profiles =
271 		CFG_TGT_DEFAULT_ROAM_OFFLOAD_MAX_PROFILES;
272 	tgt_cfg->num_mcast_groups = CFG_TGT_DEFAULT_NUM_MCAST_GROUPS;
273 	tgt_cfg->num_mcast_table_elems = CFG_TGT_DEFAULT_NUM_MCAST_TABLE_ELEMS;
274 	tgt_cfg->mcast2ucast_mode = CFG_TGT_DEFAULT_MCAST2UCAST_MODE;
275 	tgt_cfg->tx_dbg_log_size = CFG_TGT_DEFAULT_TX_DBG_LOG_SIZE;
276 	tgt_cfg->num_wds_entries = CFG_TGT_WDS_ENTRIES;
277 	tgt_cfg->dma_burst_size = CFG_TGT_DEFAULT_DMA_BURST_SIZE;
278 	tgt_cfg->mac_aggr_delim = CFG_TGT_DEFAULT_MAC_AGGR_DELIM;
279 	tgt_cfg->rx_skip_defrag_timeout_dup_detection_check =
280 		CFG_TGT_DEFAULT_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK,
281 	tgt_cfg->vow_config = CFG_TGT_DEFAULT_VOW_CONFIG;
282 	tgt_cfg->gtk_offload_max_vdev = CFG_TGT_DEFAULT_GTK_OFFLOAD_MAX_VDEV;
283 	tgt_cfg->num_msdu_desc = CFG_TGT_NUM_MSDU_DESC;
284 	tgt_cfg->max_frag_entries = CFG_TGT_MAX_FRAG_TABLE_ENTRIES;
285 	tgt_cfg->num_tdls_vdevs = CFG_TGT_NUM_TDLS_VDEVS;
286 	tgt_cfg->num_tdls_conn_table_entries =
287 		CFG_TGT_NUM_TDLS_CONN_TABLE_ENTRIES;
288 	tgt_cfg->beacon_tx_offload_max_vdev =
289 		CFG_TGT_DEFAULT_BEACON_TX_OFFLOAD_MAX_VDEV;
290 	tgt_cfg->num_multicast_filter_entries =
291 		CFG_TGT_MAX_MULTICAST_FILTER_ENTRIES;
292 	tgt_cfg->num_wow_filters = 0;
293 	tgt_cfg->num_keep_alive_pattern = 0;
294 	tgt_cfg->keep_alive_pattern_size = 0;
295 	tgt_cfg->max_tdls_concurrent_sleep_sta =
296 		CFG_TGT_NUM_TDLS_CONC_SLEEP_STAS;
297 	tgt_cfg->max_tdls_concurrent_buffer_sta =
298 		CFG_TGT_NUM_TDLS_CONC_BUFFER_STAS;
299 	tgt_cfg->wmi_send_separate = 0;
300 	tgt_cfg->num_ocb_vdevs = CFG_TGT_NUM_OCB_VDEVS;
301 	tgt_cfg->num_ocb_channels = CFG_TGT_NUM_OCB_CHANNELS;
302 	tgt_cfg->num_ocb_schedules = CFG_TGT_NUM_OCB_SCHEDULES;
303 
304 	no_of_peers_supported = wma_get_number_of_peers_supported(wma_handle);
305 	tgt_cfg->num_peers = no_of_peers_supported + CFG_TGT_NUM_VDEV + 2;
306 	tgt_cfg->num_tids = wma_get_number_of_tids_supported(
307 						no_of_peers_supported);
308 	tgt_cfg->scan_max_pending_req = wma_handle->max_scan;
309 
310 	tgt_cfg->mgmt_comp_evt_bundle_support = true;
311 	tgt_cfg->tx_msdu_new_partition_id_support = true;
312 
313 	/* reduce the peer/vdev if CFG_TGT_NUM_MSDU_DESC exceeds 1000 */
314 	wma_reset_rx_decap_mode(tgt_cfg);
315 
316 	if (cds_get_conparam() == QDF_GLOBAL_MONITOR_MODE)
317 		tgt_cfg->rx_decap_mode = CFG_TGT_RX_DECAP_MODE_RAW;
318 }
319 
320 /**
321  * wma_cli_get_command() - WMA "get" command processor
322  * @vdev_id: virtual device for the command
323  * @param_id: parameter id
324  * @vpdev: parameter category
325  *
326  * Return: parameter value on success, -EINVAL on failure
327  */
328 int wma_cli_get_command(int vdev_id, int param_id, int vpdev)
329 {
330 	int ret = 0;
331 	tp_wma_handle wma;
332 	struct wma_txrx_node *intr = NULL;
333 
334 	wma = cds_get_context(QDF_MODULE_ID_WMA);
335 
336 	if (NULL == wma) {
337 		WMA_LOGE("%s: Invalid wma handle", __func__);
338 		return -EINVAL;
339 	}
340 
341 	intr = wma->interfaces;
342 
343 	if (VDEV_CMD == vpdev) {
344 		switch (param_id) {
345 		case WMI_VDEV_PARAM_NSS:
346 			ret = intr[vdev_id].config.nss;
347 			break;
348 #ifdef QCA_SUPPORT_GTX
349 		case WMI_VDEV_PARAM_GTX_HT_MCS:
350 			ret = intr[vdev_id].config.gtx_info.gtxRTMask[0];
351 			break;
352 		case WMI_VDEV_PARAM_GTX_VHT_MCS:
353 			ret = intr[vdev_id].config.gtx_info.gtxRTMask[1];
354 			break;
355 		case WMI_VDEV_PARAM_GTX_USR_CFG:
356 			ret = intr[vdev_id].config.gtx_info.gtxUsrcfg;
357 			break;
358 		case WMI_VDEV_PARAM_GTX_THRE:
359 			ret = intr[vdev_id].config.gtx_info.gtxPERThreshold;
360 			break;
361 		case WMI_VDEV_PARAM_GTX_MARGIN:
362 			ret = intr[vdev_id].config.gtx_info.gtxPERMargin;
363 			break;
364 		case WMI_VDEV_PARAM_GTX_STEP:
365 			ret = intr[vdev_id].config.gtx_info.gtxTPCstep;
366 			break;
367 		case WMI_VDEV_PARAM_GTX_MINTPC:
368 			ret = intr[vdev_id].config.gtx_info.gtxTPCMin;
369 			break;
370 		case WMI_VDEV_PARAM_GTX_BW_MASK:
371 			ret = intr[vdev_id].config.gtx_info.gtxBWMask;
372 			break;
373 #endif /* QCA_SUPPORT_GTX */
374 		case WMI_VDEV_PARAM_LDPC:
375 			ret = intr[vdev_id].config.ldpc;
376 			break;
377 		case WMI_VDEV_PARAM_TX_STBC:
378 			ret = intr[vdev_id].config.tx_stbc;
379 			break;
380 		case WMI_VDEV_PARAM_RX_STBC:
381 			ret = intr[vdev_id].config.rx_stbc;
382 			break;
383 		case WMI_VDEV_PARAM_SGI:
384 			ret = intr[vdev_id].config.shortgi;
385 			break;
386 		case WMI_VDEV_PARAM_ENABLE_RTSCTS:
387 			ret = intr[vdev_id].config.rtscts_en;
388 			break;
389 		case WMI_VDEV_PARAM_CHWIDTH:
390 			ret = intr[vdev_id].config.chwidth;
391 			break;
392 		case WMI_VDEV_PARAM_FIXED_RATE:
393 			ret = intr[vdev_id].config.tx_rate;
394 			break;
395 		case WMI_VDEV_PARAM_HE_DCM:
396 		case WMI_VDEV_PARAM_HE_RANGE_EXT:
397 			ret = wma_get_he_vdev_param(&intr[vdev_id], param_id);
398 			break;
399 		default:
400 			WMA_LOGE("Invalid cli_get vdev command/Not yet implemented 0x%x",
401 				 param_id);
402 			return -EINVAL;
403 		}
404 	} else if (PDEV_CMD == vpdev) {
405 		switch (param_id) {
406 		case WMI_PDEV_PARAM_ANI_ENABLE:
407 			ret = wma->pdevconfig.ani_enable;
408 			break;
409 		case WMI_PDEV_PARAM_ANI_POLL_PERIOD:
410 			ret = wma->pdevconfig.ani_poll_len;
411 			break;
412 		case WMI_PDEV_PARAM_ANI_LISTEN_PERIOD:
413 			ret = wma->pdevconfig.ani_listen_len;
414 			break;
415 		case WMI_PDEV_PARAM_ANI_OFDM_LEVEL:
416 			ret = wma->pdevconfig.ani_ofdm_level;
417 			break;
418 		case WMI_PDEV_PARAM_ANI_CCK_LEVEL:
419 			ret = wma->pdevconfig.ani_cck_level;
420 			break;
421 		case WMI_PDEV_PARAM_DYNAMIC_BW:
422 			ret = wma->pdevconfig.cwmenable;
423 			break;
424 		case WMI_PDEV_PARAM_CTS_CBW:
425 			ret = wma->pdevconfig.cts_cbw;
426 			break;
427 		case WMI_PDEV_PARAM_TX_CHAIN_MASK:
428 			ret = wma->pdevconfig.txchainmask;
429 			break;
430 		case WMI_PDEV_PARAM_RX_CHAIN_MASK:
431 			ret = wma->pdevconfig.rxchainmask;
432 			break;
433 		case WMI_PDEV_PARAM_TXPOWER_LIMIT2G:
434 			ret = wma->pdevconfig.txpow2g;
435 			break;
436 		case WMI_PDEV_PARAM_TXPOWER_LIMIT5G:
437 			ret = wma->pdevconfig.txpow5g;
438 			break;
439 		default:
440 			WMA_LOGE("Invalid cli_get pdev command/Not yet implemented 0x%x",
441 				 param_id);
442 			return -EINVAL;
443 		}
444 	} else if (GEN_CMD == vpdev) {
445 		switch (param_id) {
446 		case GEN_VDEV_PARAM_AMPDU:
447 			ret = intr[vdev_id].config.ampdu;
448 			break;
449 		case GEN_VDEV_PARAM_AMSDU:
450 			ret = intr[vdev_id].config.amsdu;
451 			break;
452 		case GEN_VDEV_ROAM_SYNCH_DELAY:
453 			ret = intr[vdev_id].roam_synch_delay;
454 			break;
455 		default:
456 			WMA_LOGE("Invalid generic vdev command/Not yet implemented 0x%x",
457 				 param_id);
458 			return -EINVAL;
459 		}
460 	} else if (PPS_CMD == vpdev) {
461 		switch (param_id) {
462 		case WMI_VDEV_PPS_PAID_MATCH:
463 			ret = intr[vdev_id].config.pps_params.paid_match_enable;
464 			break;
465 		case WMI_VDEV_PPS_GID_MATCH:
466 			ret = intr[vdev_id].config.pps_params.gid_match_enable;
467 			break;
468 		case WMI_VDEV_PPS_EARLY_TIM_CLEAR:
469 			ret = intr[vdev_id].config.pps_params.tim_clear;
470 			break;
471 		case WMI_VDEV_PPS_EARLY_DTIM_CLEAR:
472 			ret = intr[vdev_id].config.pps_params.dtim_clear;
473 			break;
474 		case WMI_VDEV_PPS_EOF_PAD_DELIM:
475 			ret = intr[vdev_id].config.pps_params.eof_delim;
476 			break;
477 		case WMI_VDEV_PPS_MACADDR_MISMATCH:
478 			ret = intr[vdev_id].config.pps_params.mac_match;
479 			break;
480 		case WMI_VDEV_PPS_DELIM_CRC_FAIL:
481 			ret = intr[vdev_id].config.pps_params.delim_fail;
482 			break;
483 		case WMI_VDEV_PPS_GID_NSTS_ZERO:
484 			ret = intr[vdev_id].config.pps_params.nsts_zero;
485 			break;
486 		case WMI_VDEV_PPS_RSSI_CHECK:
487 			ret = intr[vdev_id].config.pps_params.rssi_chk;
488 			break;
489 		default:
490 			WMA_LOGE("Invalid pps vdev command/Not yet implemented 0x%x",
491 				 param_id);
492 			return -EINVAL;
493 		}
494 	} else if (QPOWER_CMD == vpdev) {
495 		switch (param_id) {
496 		case WMI_STA_PS_PARAM_QPOWER_PSPOLL_COUNT:
497 			ret = intr[vdev_id].config.qpower_params.
498 			      max_ps_poll_cnt;
499 			break;
500 		case WMI_STA_PS_PARAM_QPOWER_MAX_TX_BEFORE_WAKE:
501 			ret = intr[vdev_id].config.qpower_params.
502 			      max_tx_before_wake;
503 			break;
504 		case WMI_STA_PS_PARAM_QPOWER_SPEC_PSPOLL_WAKE_INTERVAL:
505 			ret = intr[vdev_id].config.qpower_params.
506 			      spec_ps_poll_wake_interval;
507 			break;
508 		case WMI_STA_PS_PARAM_QPOWER_SPEC_MAX_SPEC_NODATA_PSPOLL:
509 			ret = intr[vdev_id].config.qpower_params.
510 			      max_spec_nodata_ps_poll;
511 			break;
512 		default:
513 			WMA_LOGE("Invalid generic vdev command/Not yet implemented 0x%x",
514 				 param_id);
515 			return -EINVAL;
516 		}
517 	} else if (GTX_CMD == vpdev) {
518 		switch (param_id) {
519 		case WMI_VDEV_PARAM_GTX_HT_MCS:
520 			ret = intr[vdev_id].config.gtx_info.gtxRTMask[0];
521 			break;
522 		case WMI_VDEV_PARAM_GTX_VHT_MCS:
523 			ret = intr[vdev_id].config.gtx_info.gtxRTMask[1];
524 			break;
525 		case WMI_VDEV_PARAM_GTX_USR_CFG:
526 			ret = intr[vdev_id].config.gtx_info.gtxUsrcfg;
527 			break;
528 		case WMI_VDEV_PARAM_GTX_THRE:
529 			ret = intr[vdev_id].config.gtx_info.gtxPERThreshold;
530 			break;
531 		case WMI_VDEV_PARAM_GTX_MARGIN:
532 			ret = intr[vdev_id].config.gtx_info.gtxPERMargin;
533 			break;
534 		case WMI_VDEV_PARAM_GTX_STEP:
535 			ret = intr[vdev_id].config.gtx_info.gtxTPCstep;
536 			break;
537 		case WMI_VDEV_PARAM_GTX_MINTPC:
538 			ret = intr[vdev_id].config.gtx_info.gtxTPCMin;
539 			break;
540 		case WMI_VDEV_PARAM_GTX_BW_MASK:
541 			ret = intr[vdev_id].config.gtx_info.gtxBWMask;
542 			break;
543 		default:
544 			WMA_LOGE("Invalid generic vdev command/Not yet implemented 0x%x",
545 				 param_id);
546 			return -EINVAL;
547 		}
548 	}
549 	return ret;
550 }
551 
552 /**
553  * wma_cli_set2_command() - WMA "set 2 params" command processor
554  * @vdev_id: virtual device for the command
555  * @param_id: parameter id
556  * @sval1: first parameter value
557  * @sval2: second parameter value
558  * @vpdev: parameter category
559  *
560  * Command handler for set operations which require 2 parameters
561  *
562  * Return: 0 on success, errno on failure
563  */
564 int wma_cli_set2_command(int vdev_id, int param_id, int sval1,
565 			 int sval2, int vpdev)
566 {
567 	struct scheduler_msg msg = { 0 };
568 	wma_cli_set_cmd_t *iwcmd;
569 
570 	iwcmd = qdf_mem_malloc(sizeof(*iwcmd));
571 	if (!iwcmd) {
572 		WMA_LOGE("%s: Failed alloc memory for iwcmd", __func__);
573 		return -ENOMEM;
574 	}
575 
576 	qdf_mem_zero(iwcmd, sizeof(*iwcmd));
577 	iwcmd->param_value = sval1;
578 	iwcmd->param_sec_value = sval2;
579 	iwcmd->param_vdev_id = vdev_id;
580 	iwcmd->param_id = param_id;
581 	iwcmd->param_vp_dev = vpdev;
582 	msg.type = WMA_CLI_SET_CMD;
583 	msg.reserved = 0;
584 	msg.bodyptr = iwcmd;
585 
586 	if (QDF_STATUS_SUCCESS !=
587 	    scheduler_post_msg(QDF_MODULE_ID_WMA, &msg)) {
588 		WMA_LOGE("%s: Failed to post WMA_CLI_SET_CMD msg",
589 			  __func__);
590 		qdf_mem_free(iwcmd);
591 		return -EIO;
592 	}
593 	return 0;
594 }
595 
596 /**
597  * wma_cli_set_command() - WMA "set" command processor
598  * @vdev_id: virtual device for the command
599  * @param_id: parameter id
600  * @sval: parameter value
601  * @vpdev: parameter category
602  *
603  * Command handler for set operations
604  *
605  * Return: 0 on success, errno on failure
606  */
607 int wma_cli_set_command(int vdev_id, int param_id, int sval, int vpdev)
608 {
609 	return wma_cli_set2_command(vdev_id, param_id, sval, 0, vpdev);
610 
611 }
612 
613 QDF_STATUS wma_form_unit_test_cmd_and_send(uint32_t vdev_id,
614 			uint32_t module_id, uint32_t arg_count, uint32_t *arg)
615 {
616 	struct wmi_unit_test_cmd *unit_test_args;
617 	tp_wma_handle wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
618 	uint32_t i;
619 	QDF_STATUS status;
620 
621 	WMA_LOGD(FL("enter"));
622 	if (arg_count >= WMA_MAX_NUM_ARGS) {
623 		WMA_LOGE(FL("arg_count is crossed the boundary"));
624 		return QDF_STATUS_E_FAILURE;
625 	}
626 	if (!wma_handle || !wma_handle->wmi_handle) {
627 		WMA_LOGE(FL("Invalid WMA/WMI handle"));
628 		return QDF_STATUS_E_FAILURE;
629 	}
630 	unit_test_args = qdf_mem_malloc(sizeof(*unit_test_args));
631 	if (NULL == unit_test_args) {
632 		WMA_LOGE(FL("qdf_mem_malloc failed for unit_test_args"));
633 		return QDF_STATUS_E_NOMEM;
634 	}
635 	unit_test_args->vdev_id = vdev_id;
636 	unit_test_args->module_id = module_id;
637 	unit_test_args->num_args = arg_count;
638 	for (i = 0; i < arg_count; i++)
639 		unit_test_args->args[i] = arg[i];
640 
641 	status = wmi_unified_unit_test_cmd(wma_handle->wmi_handle,
642 					   unit_test_args);
643 	qdf_mem_free(unit_test_args);
644 	WMA_LOGD(FL("exit"));
645 
646 	return status;
647 }
648 
649 static void wma_process_send_addba_req(tp_wma_handle wma_handle,
650 		struct send_add_ba_req *send_addba)
651 {
652 	QDF_STATUS status;
653 
654 	if (!wma_handle || !wma_handle->wmi_handle) {
655 		WMA_LOGE(FL("Invalid WMA/WMI handle"));
656 		qdf_mem_free(send_addba);
657 		return;
658 	}
659 
660 	status = wmi_unified_addba_send_cmd_send(wma_handle->wmi_handle,
661 					   send_addba->mac_addr,
662 					   &send_addba->param);
663 	if (QDF_STATUS_SUCCESS != status) {
664 		WMA_LOGE(FL("Failed to process WMA_SEND_ADDBA_REQ"));
665 	}
666 	WMA_LOGD(FL("sent ADDBA req to" MAC_ADDRESS_STR "tid %d buff_size %d"),
667 			MAC_ADDR_ARRAY(send_addba->mac_addr),
668 			send_addba->param.tidno,
669 			send_addba->param.buffersize);
670 
671 	qdf_mem_free(send_addba);
672 }
673 
674 /**
675  * wma_ipa_get_stat() - get IPA data path stats from FW
676  *
677  * Return: 0 on success, errno on failure
678  */
679 #ifdef IPA_OFFLOAD
680 static int wma_ipa_get_stat(void)
681 {
682 	struct cdp_pdev *pdev;
683 
684 	pdev = cds_get_context(QDF_MODULE_ID_TXRX);
685 	if (!pdev) {
686 		WMA_LOGE("pdev NULL for uc stat");
687 		return -EINVAL;
688 	}
689 	cdp_ipa_get_stat(cds_get_context(QDF_MODULE_ID_SOC), pdev);
690 
691 	return 0;
692 }
693 #else
694 static int wma_ipa_get_stat(void)
695 {
696 	return 0;
697 }
698 #endif
699 
700 /**
701  * wma_ipa_uc_get_share_stats() - get Tx/Rx byte stats from FW
702  * @privcmd: private command
703  *
704  * Return: 0 on success, errno on failure
705  */
706 #if defined(IPA_OFFLOAD) && defined(FEATURE_METERING)
707 static int wma_ipa_uc_get_share_stats(wma_cli_set_cmd_t *privcmd)
708 {
709 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
710 	struct cdp_pdev *pdev;
711 	uint8_t reset_stats = privcmd->param_value;
712 
713 	WMA_LOGD("%s: reset_stats=%d",
714 			"WMA_VDEV_TXRX_GET_IPA_UC_SHARING_STATS_CMDID",
715 			reset_stats);
716 	pdev = cds_get_context(QDF_MODULE_ID_TXRX);
717 	if (!pdev) {
718 		WMA_LOGE("pdev NULL for uc get share stats");
719 		return -EINVAL;
720 	}
721 	cdp_ipa_uc_get_share_stats(soc, pdev, reset_stats);
722 
723 	return 0;
724 }
725 #else
726 static int wma_ipa_uc_get_share_stats(wma_cli_set_cmd_t *privcmd)
727 {
728 	return 0;
729 }
730 #endif
731 
732 /**
733  * wma_ipa_uc_set_quota() - set quota limit to FW
734  * @privcmd: private command
735  *
736  * Return: 0 on success, errno on failure
737  */
738 #if defined(IPA_OFFLOAD) && defined(FEATURE_METERING)
739 static int wma_ipa_uc_set_quota(wma_cli_set_cmd_t *privcmd)
740 {
741 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
742 	struct cdp_pdev *pdev;
743 	uint64_t quota_bytes = privcmd->param_sec_value;
744 
745 	quota_bytes <<= 32;
746 	quota_bytes |= privcmd->param_value;
747 
748 	WMA_LOGD("%s: quota_bytes=%llu",
749 			"WMA_VDEV_TXRX_SET_IPA_UC_QUOTA_CMDID",
750 			quota_bytes);
751 	pdev = cds_get_context(QDF_MODULE_ID_TXRX);
752 	if (!pdev) {
753 		WMA_LOGE("pdev NULL for uc set quota");
754 		return -EINVAL;
755 	}
756 	cdp_ipa_uc_set_quota(soc, pdev, quota_bytes);
757 
758 	return 0;
759 }
760 #else
761 static int wma_ipa_uc_set_quota(wma_cli_set_cmd_t *privcmd)
762 {
763 	return 0;
764 }
765 #endif
766 
767 /**
768  * wma_set_priv_cfg() - set private config parameters
769  * @wma_handle: wma handle
770  * @privcmd: private command
771  *
772  * Return: 0 for success or error code
773  */
774 static int32_t wma_set_priv_cfg(tp_wma_handle wma_handle,
775 				wma_cli_set_cmd_t *privcmd)
776 {
777 	int32_t ret = 0;
778 
779 	switch (privcmd->param_id) {
780 	case WMA_VDEV_TXRX_FWSTATS_ENABLE_CMDID:
781 		ret = wma_set_txrx_fw_stats_level(wma_handle,
782 						  privcmd->param_vdev_id,
783 						  privcmd->param_value);
784 		break;
785 	case WMA_VDEV_TXRX_FWSTATS_RESET_CMDID:
786 		ret = wma_txrx_fw_stats_reset(wma_handle,
787 					      privcmd->param_vdev_id,
788 					      privcmd->param_value);
789 		break;
790 	case WMI_STA_SMPS_FORCE_MODE_CMDID:
791 		ret = wma_set_mimops(wma_handle,
792 				     privcmd->param_vdev_id,
793 				     privcmd->param_value);
794 		break;
795 	case WMI_STA_SMPS_PARAM_CMDID:
796 		wma_set_smps_params(wma_handle, privcmd->param_vdev_id,
797 				    privcmd->param_value);
798 		break;
799 	case WMA_VDEV_MCC_SET_TIME_LATENCY:
800 	{
801 		/* Extract first MCC adapter/vdev channel number and latency */
802 		uint8_t mcc_channel = privcmd->param_value & 0x000000FF;
803 		uint8_t mcc_channel_latency =
804 			(privcmd->param_value & 0x0000FF00) >> 8;
805 		int ret = -1;
806 
807 		WMA_LOGD("%s: Parsed input: Channel #1:%d, latency:%dms",
808 			__func__, mcc_channel, mcc_channel_latency);
809 		ret = wma_set_mcc_channel_time_latency(wma_handle,
810 						       mcc_channel,
811 						       mcc_channel_latency);
812 	}
813 		break;
814 	case WMA_VDEV_MCC_SET_TIME_QUOTA:
815 	{
816 		/* Extract the MCC 2 adapters/vdevs channel numbers and time
817 		 * quota value for the first adapter only (which is specified
818 		 * in iwpriv command.
819 		 */
820 		uint8_t adapter_2_chan_number =
821 			privcmd->param_value & 0x000000FF;
822 		uint8_t adapter_1_chan_number =
823 			(privcmd->param_value & 0x0000FF00) >> 8;
824 		uint8_t adapter_1_quota =
825 			(privcmd->param_value & 0x00FF0000) >> 16;
826 		int ret = -1;
827 
828 		WMA_LOGD("%s: Parsed input: Channel #1:%d, Channel #2:%d, quota 1:%dms",
829 			  __func__, adapter_1_chan_number,
830 			 adapter_2_chan_number, adapter_1_quota);
831 
832 		ret = wma_set_mcc_channel_time_quota(wma_handle,
833 						     adapter_1_chan_number,
834 						     adapter_1_quota,
835 						     adapter_2_chan_number);
836 	}
837 		break;
838 	case WMA_VDEV_IBSS_SET_ATIM_WINDOW_SIZE:
839 	{
840 		wma_handle->wma_ibss_power_save_params.atimWindowLength =
841 							privcmd->param_value;
842 		WMA_LOGD("%s: IBSS power save ATIM Window = %d",
843 			 __func__, wma_handle->wma_ibss_power_save_params.
844 			 atimWindowLength);
845 	}
846 		break;
847 	case WMA_VDEV_IBSS_SET_POWER_SAVE_ALLOWED:
848 	{
849 		wma_handle->wma_ibss_power_save_params.isPowerSaveAllowed =
850 							privcmd->param_value;
851 		WMA_LOGD("%s: IBSS is Power Save Allowed = %d",
852 			 __func__, wma_handle->wma_ibss_power_save_params.
853 			 isPowerSaveAllowed);
854 	}
855 		break;
856 	case WMA_VDEV_IBSS_SET_POWER_COLLAPSE_ALLOWED:
857 	{
858 		wma_handle->wma_ibss_power_save_params.	isPowerCollapseAllowed =
859 							 privcmd->param_value;
860 		WMA_LOGD("%s: IBSS is Power Collapse Allowed = %d",
861 			 __func__, wma_handle->wma_ibss_power_save_params.
862 			 isPowerCollapseAllowed);
863 	}
864 		break;
865 	case WMA_VDEV_IBSS_SET_AWAKE_ON_TX_RX:
866 	{
867 		wma_handle->wma_ibss_power_save_params.isAwakeonTxRxEnabled =
868 							 privcmd->param_value;
869 		WMA_LOGD("%s: IBSS Power Save Awake on Tx/Rx Enabled = %d",
870 			__func__, wma_handle->wma_ibss_power_save_params.
871 			isAwakeonTxRxEnabled);
872 	}
873 		break;
874 	case WMA_VDEV_IBSS_SET_INACTIVITY_TIME:
875 	{
876 		wma_handle->wma_ibss_power_save_params.inactivityCount =
877 							privcmd->param_value;
878 		WMA_LOGD("%s: IBSS Power Save Data Inactivity Count = %d",
879 			__func__, wma_handle->wma_ibss_power_save_params.
880 			inactivityCount);
881 	}
882 		break;
883 	case WMA_VDEV_IBSS_SET_TXSP_END_INACTIVITY_TIME:
884 	{
885 		wma_handle->wma_ibss_power_save_params.txSPEndInactivityTime =
886 							 privcmd->param_value;
887 		WMA_LOGD("%s: IBSS Power Save Transmit EOSP inactivity time out = %d",
888 			__func__, wma_handle->wma_ibss_power_save_params.
889 			txSPEndInactivityTime);
890 	}
891 		break;
892 	case WMA_VDEV_IBSS_PS_SET_WARMUP_TIME_SECS:
893 	{
894 		wma_handle->wma_ibss_power_save_params.ibssPsWarmupTime =
895 							privcmd->param_value;
896 		WMA_LOGD("%s: IBSS Power Save Warm Up Time in Seconds = %d",
897 			__func__, wma_handle->wma_ibss_power_save_params.
898 			ibssPsWarmupTime);
899 	}
900 		break;
901 	case WMA_VDEV_IBSS_PS_SET_1RX_CHAIN_IN_ATIM_WINDOW:
902 	{
903 		wma_handle->wma_ibss_power_save_params.ibssPs1RxChainInAtimEnable
904 							 = privcmd->param_value;
905 		WMA_LOGD("%s: IBSS Power Save single RX Chain Enable In ATIM  = %d",
906 			__func__, wma_handle->wma_ibss_power_save_params.
907 			ibssPs1RxChainInAtimEnable);
908 	}
909 		break;
910 
911 	case WMA_VDEV_TXRX_GET_IPA_UC_FW_STATS_CMDID:
912 	{
913 		wma_ipa_get_stat();
914 	}
915 		break;
916 
917 	case WMA_VDEV_TXRX_GET_IPA_UC_SHARING_STATS_CMDID:
918 	{
919 		wma_ipa_uc_get_share_stats(privcmd);
920 	}
921 		break;
922 
923 	case WMA_VDEV_TXRX_SET_IPA_UC_QUOTA_CMDID:
924 	{
925 		wma_ipa_uc_set_quota(privcmd);
926 
927 	}
928 		break;
929 
930 	default:
931 		WMA_LOGE("Invalid wma config command id:%d", privcmd->param_id);
932 		ret = -EINVAL;
933 	}
934 	return ret;
935 }
936 
937 /**
938  * wma_set_dtim_period() - set dtim period to FW
939  * @wma: wma handle
940  * @dtim_params: dtim params
941  *
942  * Return: none
943  */
944 static void wma_set_dtim_period(tp_wma_handle wma,
945 				struct set_dtim_params *dtim_params)
946 {
947 	struct wma_txrx_node *iface =
948 		&wma->interfaces[dtim_params->session_id];
949 	if (!wma_is_vdev_valid(dtim_params->session_id)) {
950 		WMA_LOGE("%s: invalid VDEV", __func__);
951 		return;
952 	}
953 	WMA_LOGD("%s: set dtim_period %d", __func__,
954 			dtim_params->dtim_period);
955 	iface->dtimPeriod = dtim_params->dtim_period;
956 
957 }
958 /**
959  * wma_set_modulated_dtim() - function to configure modulated dtim
960  * @wma: wma handle
961  * @privcmd: structure containing parameters
962  *
963  * This function configures the modulated dtim in firmware
964  *
965  * Return: none
966  */
967 static void wma_set_modulated_dtim(tp_wma_handle wma,
968 				   wma_cli_set_cmd_t *privcmd)
969 {
970 	uint8_t vdev_id = privcmd->param_vdev_id;
971 	struct wma_txrx_node *iface =
972 		&wma->interfaces[vdev_id];
973 	bool prev_dtim_enabled;
974 	uint32_t listen_interval;
975 	QDF_STATUS ret;
976 
977 	iface->alt_modulated_dtim = privcmd->param_value;
978 
979 	prev_dtim_enabled = iface->alt_modulated_dtim_enabled;
980 
981 	if (1 != privcmd->param_value)
982 		iface->alt_modulated_dtim_enabled = true;
983 	else
984 		iface->alt_modulated_dtim_enabled = false;
985 
986 	if ((true == iface->alt_modulated_dtim_enabled) ||
987 	    (true == prev_dtim_enabled)) {
988 
989 		listen_interval = iface->alt_modulated_dtim
990 			* iface->dtimPeriod;
991 
992 		ret = wma_vdev_set_param(wma->wmi_handle,
993 						privcmd->param_vdev_id,
994 						WMI_VDEV_PARAM_LISTEN_INTERVAL,
995 						listen_interval);
996 		if (QDF_IS_STATUS_ERROR(ret))
997 			/* Even if it fails, continue */
998 			WMA_LOGW("Failed to set listen interval %d",
999 				 listen_interval);
1000 
1001 		ret = wma_vdev_set_param(wma->wmi_handle,
1002 						privcmd->param_vdev_id,
1003 						WMI_VDEV_PARAM_DTIM_POLICY,
1004 						NORMAL_DTIM);
1005 		if (QDF_IS_STATUS_ERROR(ret))
1006 			WMA_LOGE("Failed to Set to Normal DTIM policy");
1007 	}
1008 }
1009 
1010 /**
1011  * wma_override_listen_interval() - function to override static/ini based LI
1012  * @wma: wma handle
1013  * @privcmd: structure containing parameters
1014  *
1015  * This function override static/ini based LI in firmware
1016  *
1017  * Return: none
1018  */
1019 static void wma_override_listen_interval(tp_wma_handle wma,
1020 				   wma_cli_set_cmd_t *privcmd)
1021 {
1022 	uint8_t vdev_id = privcmd->param_vdev_id;
1023 	struct wma_txrx_node *iface =
1024 		&wma->interfaces[vdev_id];
1025 	u32 old_override_li, new_override_li, listen_interval;
1026 	struct sAniSirGlobal *mac;
1027 	QDF_STATUS ret;
1028 
1029 	mac = cds_get_context(QDF_MODULE_ID_PE);
1030 	if (!mac) {
1031 		WMA_LOGE(FL("Failed to get mac context"));
1032 		return;
1033 	}
1034 
1035 	old_override_li = iface->override_li;
1036 	new_override_li = privcmd->param_value;
1037 	iface->override_li = new_override_li;
1038 
1039 	if (new_override_li &&
1040 	    (new_override_li != old_override_li)) {
1041 		listen_interval = new_override_li;
1042 	} else if (!new_override_li &&
1043 		   (new_override_li != old_override_li)) {
1044 		/* Configure default LI as we do on resume */
1045 		if ((wlan_cfg_get_int(mac, WNI_CFG_LISTEN_INTERVAL,
1046 				      &listen_interval) != eSIR_SUCCESS)) {
1047 			QDF_TRACE(QDF_MODULE_ID_WMA, QDF_TRACE_LEVEL_ERROR,
1048 				  "Failed to get value for listen interval");
1049 			listen_interval = POWERSAVE_DEFAULT_LISTEN_INTERVAL;
1050 		}
1051 	} else {
1052 		return;
1053 	}
1054 
1055 	ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
1056 			WMI_VDEV_PARAM_LISTEN_INTERVAL,
1057 			listen_interval);
1058 	if (QDF_IS_STATUS_ERROR(ret)) {
1059 		/* Even it fails continue Fw will take default LI */
1060 		WMA_LOGE("Failed to Set Listen Interval vdevId %d",
1061 			 vdev_id);
1062 	}
1063 	WMA_LOGD("%s: Set Listen Interval vdevId %d Listen Intv %d",
1064 			__func__, vdev_id, listen_interval);
1065 	ret = wma_vdev_set_param(wma->wmi_handle,
1066 			privcmd->param_vdev_id,
1067 			WMI_VDEV_PARAM_DTIM_POLICY,
1068 			NORMAL_DTIM);
1069 	if (QDF_IS_STATUS_ERROR(ret))
1070 		WMA_LOGE("Failed to Set to Normal DTIM policy");
1071 
1072 }
1073 
1074 
1075 /**
1076  * wma_process_cli_set_cmd() - set parameters to fw
1077  * @wma: wma handle
1078  * @privcmd: command
1079  *
1080  * Return: none
1081  */
1082 static void wma_process_cli_set_cmd(tp_wma_handle wma,
1083 				    wma_cli_set_cmd_t *privcmd)
1084 {
1085 	int vid = privcmd->param_vdev_id, pps_val = 0;
1086 	QDF_STATUS ret;
1087 	struct wma_txrx_node *intr = wma->interfaces;
1088 	tpAniSirGlobal pMac = cds_get_context(QDF_MODULE_ID_PE);
1089 	struct qpower_params *qparams = &intr[vid].config.qpower_params;
1090 	struct pdev_params pdev_param;
1091 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1092 	struct target_psoc_info *tgt_hdl;
1093 	struct sir_set_tx_rx_aggregation_size aggr;
1094 
1095 	WMA_LOGD("wmihandle %pK", wma->wmi_handle);
1096 	qdf_mem_zero(&aggr, sizeof(aggr));
1097 
1098 	if (NULL == pMac) {
1099 		WMA_LOGE("%s: Failed to get pMac", __func__);
1100 		return;
1101 	}
1102 
1103 	tgt_hdl = wlan_psoc_get_tgt_if_handle(wma->psoc);
1104 	if (!tgt_hdl) {
1105 		WMA_LOGE("%s: target psoc info is NULL", __func__);
1106 		return;
1107 	}
1108 
1109 	if (privcmd->param_id >= WMI_CMDID_MAX) {
1110 		/*
1111 		 * This configuration setting is not done using any wmi
1112 		 * command, call appropriate handler.
1113 		 */
1114 		if (wma_set_priv_cfg(wma, privcmd))
1115 			WMA_LOGE("Failed to set wma priv congiuration");
1116 		return;
1117 	}
1118 
1119 	switch (privcmd->param_vp_dev) {
1120 	case VDEV_CMD:
1121 		if (!wma->interfaces[privcmd->param_vdev_id].is_vdev_valid) {
1122 			WMA_LOGE("%s Vdev id is not valid", __func__);
1123 			return;
1124 		}
1125 
1126 		WMA_LOGD("vdev id %d pid %d pval %d", privcmd->param_vdev_id,
1127 			 privcmd->param_id, privcmd->param_value);
1128 		ret = wma_vdev_set_param(wma->wmi_handle,
1129 						      privcmd->param_vdev_id,
1130 						      privcmd->param_id,
1131 						      privcmd->param_value);
1132 		if (QDF_IS_STATUS_ERROR(ret)) {
1133 			WMA_LOGE("wma_vdev_set_param failed ret %d",
1134 				  ret);
1135 			return;
1136 		}
1137 		break;
1138 	case PDEV_CMD:
1139 		WMA_LOGD("pdev pid %d pval %d", privcmd->param_id,
1140 			 privcmd->param_value);
1141 		if ((privcmd->param_id == WMI_PDEV_PARAM_RX_CHAIN_MASK) ||
1142 		    (privcmd->param_id == WMI_PDEV_PARAM_TX_CHAIN_MASK)) {
1143 			if (QDF_STATUS_SUCCESS !=
1144 					wma_check_txrx_chainmask(
1145 					target_if_get_num_rf_chains(tgt_hdl),
1146 					privcmd->param_value)) {
1147 				WMA_LOGD("Chainmask value is invalid");
1148 				return;
1149 			}
1150 		}
1151 		pdev_param.param_id = privcmd->param_id;
1152 		pdev_param.param_value = privcmd->param_value;
1153 		ret = wmi_unified_pdev_param_send(wma->wmi_handle,
1154 						 &pdev_param,
1155 						 WMA_WILDCARD_PDEV_ID);
1156 		if (QDF_IS_STATUS_ERROR(ret)) {
1157 			WMA_LOGE("wma_vdev_set_param failed ret %d",
1158 				 ret);
1159 			return;
1160 		}
1161 		break;
1162 	case GEN_CMD:
1163 	{
1164 		struct cdp_vdev *vdev = NULL;
1165 		struct wma_txrx_node *intr = wma->interfaces;
1166 
1167 		vdev = wma_find_vdev_by_id(wma, privcmd->param_vdev_id);
1168 		if (!vdev) {
1169 			WMA_LOGE("%s:Invalid vdev handle", __func__);
1170 			return;
1171 		}
1172 
1173 		WMA_LOGD("gen pid %d pval %d", privcmd->param_id,
1174 			 privcmd->param_value);
1175 
1176 		switch (privcmd->param_id) {
1177 		case GEN_VDEV_PARAM_AMSDU:
1178 		case GEN_VDEV_PARAM_AMPDU:
1179 			if (!soc) {
1180 				WMA_LOGE("%s:SOC context is NULL", __func__);
1181 				return;
1182 			}
1183 
1184 			if (privcmd->param_id == GEN_VDEV_PARAM_AMPDU) {
1185 				ret = cdp_aggr_cfg(soc, vdev,
1186 						privcmd->param_value, 0);
1187 				if (ret)
1188 					WMA_LOGE("cdp_aggr_cfg set ampdu failed ret %d",
1189 						ret);
1190 				else
1191 					intr[privcmd->param_vdev_id].config.
1192 						ampdu = privcmd->param_value;
1193 
1194 				aggr.aggr_type =
1195 					WMI_VDEV_CUSTOM_AGGR_TYPE_AMPDU;
1196 			} else {
1197 				aggr.aggr_type =
1198 					WMI_VDEV_CUSTOM_AGGR_TYPE_AMSDU;
1199 			}
1200 
1201 			aggr.vdev_id = vid;
1202 			aggr.tx_aggregation_size = privcmd->param_value;
1203 			aggr.rx_aggregation_size = privcmd->param_value;
1204 
1205 			ret = wma_set_tx_rx_aggregation_size(&aggr);
1206 			if (QDF_IS_STATUS_ERROR(ret)) {
1207 				WMA_LOGE("set_aggr_size failed ret %d", ret);
1208 				return;
1209 			}
1210 			break;
1211 		case GEN_PARAM_CRASH_INJECT:
1212 			if (QDF_GLOBAL_FTM_MODE  == cds_get_conparam())
1213 				WMA_LOGE("Crash inject not allowed in FTM mode");
1214 			else
1215 				ret = wma_crash_inject(wma,
1216 						privcmd->param_value,
1217 						privcmd->param_sec_value);
1218 			break;
1219 		case GEN_PARAM_CAPTURE_TSF:
1220 			ret = wma_capture_tsf(wma, privcmd->param_value);
1221 			break;
1222 		case GEN_PARAM_RESET_TSF_GPIO:
1223 			ret = wma_reset_tsf_gpio(wma, privcmd->param_value);
1224 			break;
1225 		case GEN_PARAM_MODULATED_DTIM:
1226 			wma_set_modulated_dtim(wma, privcmd);
1227 			break;
1228 		case GEN_PARAM_LISTEN_INTERVAL:
1229 			wma_override_listen_interval(wma, privcmd);
1230 			break;
1231 		default:
1232 			WMA_LOGE("Invalid param id 0x%x",
1233 				 privcmd->param_id);
1234 			break;
1235 		}
1236 		break;
1237 	}
1238 	case DBG_CMD:
1239 		WMA_LOGD("dbg pid %d pval %d", privcmd->param_id,
1240 			 privcmd->param_value);
1241 		switch (privcmd->param_id) {
1242 		case WMI_DBGLOG_LOG_LEVEL:
1243 			ret = dbglog_set_log_lvl(wma->wmi_handle,
1244 						   privcmd->param_value);
1245 			if (ret)
1246 				WMA_LOGE("dbglog_set_log_lvl failed ret %d",
1247 					 ret);
1248 			break;
1249 		case WMI_DBGLOG_VAP_ENABLE:
1250 			ret = dbglog_vap_log_enable(wma->wmi_handle,
1251 						    privcmd->param_value, true);
1252 			if (ret)
1253 				WMA_LOGE("dbglog_vap_log_enable failed ret %d",
1254 					 ret);
1255 			break;
1256 		case WMI_DBGLOG_VAP_DISABLE:
1257 			ret = dbglog_vap_log_enable(wma->wmi_handle,
1258 						privcmd->param_value, false);
1259 			if (ret)
1260 				WMA_LOGE("dbglog_vap_log_enable failed ret %d",
1261 					 ret);
1262 			break;
1263 		case WMI_DBGLOG_MODULE_ENABLE:
1264 			ret = dbglog_module_log_enable(wma->wmi_handle,
1265 						privcmd->param_value, true);
1266 			if (ret)
1267 				WMA_LOGE("dbglog_module_log_enable failed ret %d",
1268 					 ret);
1269 			break;
1270 		case WMI_DBGLOG_MODULE_DISABLE:
1271 			ret = dbglog_module_log_enable(wma->wmi_handle,
1272 						privcmd->param_value, false);
1273 			if (ret)
1274 				WMA_LOGE("dbglog_module_log_enable failed ret %d",
1275 					 ret);
1276 			break;
1277 		case WMI_DBGLOG_MOD_LOG_LEVEL:
1278 			ret = dbglog_set_mod_log_lvl(wma->wmi_handle,
1279 						       privcmd->param_value);
1280 			if (ret)
1281 				WMA_LOGE("dbglog_module_log_enable failed ret %d",
1282 					 ret);
1283 			break;
1284 		case WMI_DBGLOG_TYPE:
1285 			ret = dbglog_parser_type_init(wma->wmi_handle,
1286 							privcmd->param_value);
1287 			if (ret)
1288 				WMA_LOGE("dbglog_parser_type_init failed ret %d",
1289 					 ret);
1290 			break;
1291 		case WMI_DBGLOG_REPORT_ENABLE:
1292 			ret = dbglog_report_enable(wma->wmi_handle,
1293 						     privcmd->param_value);
1294 			if (ret)
1295 				WMA_LOGE("dbglog_report_enable failed ret %d",
1296 					 ret);
1297 			break;
1298 		case WMI_WLAN_PROFILE_TRIGGER_CMDID:
1299 			ret = wma_unified_fw_profiling_cmd(wma->wmi_handle,
1300 					 WMI_WLAN_PROFILE_TRIGGER_CMDID,
1301 					 privcmd->param_value, 0);
1302 			if (ret)
1303 				WMA_LOGE("Profile cmd failed for %d ret %d",
1304 					WMI_WLAN_PROFILE_TRIGGER_CMDID, ret);
1305 			break;
1306 		case WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID:
1307 			ret = wma_unified_fw_profiling_cmd(wma->wmi_handle,
1308 				  WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
1309 				  privcmd->param_value,
1310 				  privcmd->param_sec_value);
1311 			if (ret)
1312 				WMA_LOGE("Profile cmd failed for %d ret %d",
1313 				   WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
1314 				   ret);
1315 			break;
1316 		case WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID:
1317 			ret = wma_unified_fw_profiling_cmd(wma->wmi_handle,
1318 					 WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
1319 					 privcmd->param_value,
1320 					 privcmd->param_sec_value);
1321 			if (ret)
1322 				WMA_LOGE("Profile cmd failed for %d ret %d",
1323 					WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
1324 					ret);
1325 			break;
1326 		case WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID:
1327 			ret = wma_unified_fw_profiling_cmd(wma->wmi_handle,
1328 					 WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
1329 					 0, 0);
1330 			if (ret)
1331 				WMA_LOGE("Profile cmd failed for %d ret %d",
1332 					WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
1333 					ret);
1334 			break;
1335 		case WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID:
1336 			ret = wma_unified_fw_profiling_cmd(wma->wmi_handle,
1337 					WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
1338 					0, 0);
1339 			if (ret)
1340 				WMA_LOGE("Profile cmd failed for %d ret %d",
1341 				   WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
1342 				   ret);
1343 			break;
1344 		case WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID:
1345 			/* Set the Green AP */
1346 			ret = wmi_unified_green_ap_ps_send
1347 					(wma->wmi_handle, privcmd->param_value,
1348 					 WMA_WILDCARD_PDEV_ID);
1349 			if (ret) {
1350 				WMA_LOGE("Set GreenAP Failed val %d",
1351 					 privcmd->param_value);
1352 			}
1353 			break;
1354 
1355 		default:
1356 			WMA_LOGE("Invalid param id 0x%x", privcmd->param_id);
1357 			break;
1358 		}
1359 		break;
1360 	case PPS_CMD:
1361 		WMA_LOGD("dbg pid %d pval %d", privcmd->param_id,
1362 			 privcmd->param_value);
1363 		switch (privcmd->param_id) {
1364 
1365 		case WMI_VDEV_PPS_PAID_MATCH:
1366 			pps_val = ((privcmd->param_value << 31) & 0xffff0000) |
1367 				  (PKT_PWR_SAVE_PAID_MATCH & 0xffff);
1368 			intr[vid].config.pps_params.paid_match_enable =
1369 				privcmd->param_value;
1370 			break;
1371 		case WMI_VDEV_PPS_GID_MATCH:
1372 			pps_val = ((privcmd->param_value << 31) & 0xffff0000) |
1373 				  (PKT_PWR_SAVE_GID_MATCH & 0xffff);
1374 			intr[vid].config.pps_params.gid_match_enable =
1375 				privcmd->param_value;
1376 			break;
1377 		case WMI_VDEV_PPS_EARLY_TIM_CLEAR:
1378 			pps_val = ((privcmd->param_value << 31) & 0xffff0000) |
1379 				  (PKT_PWR_SAVE_EARLY_TIM_CLEAR & 0xffff);
1380 			intr[vid].config.pps_params.tim_clear =
1381 				privcmd->param_value;
1382 			break;
1383 		case WMI_VDEV_PPS_EARLY_DTIM_CLEAR:
1384 			pps_val = ((privcmd->param_value << 31) & 0xffff0000) |
1385 				  (PKT_PWR_SAVE_EARLY_DTIM_CLEAR & 0xffff);
1386 			intr[vid].config.pps_params.dtim_clear =
1387 				privcmd->param_value;
1388 			break;
1389 		case WMI_VDEV_PPS_EOF_PAD_DELIM:
1390 			pps_val = ((privcmd->param_value << 31) & 0xffff0000) |
1391 				  (PKT_PWR_SAVE_EOF_PAD_DELIM & 0xffff);
1392 			intr[vid].config.pps_params.eof_delim =
1393 				privcmd->param_value;
1394 			break;
1395 		case WMI_VDEV_PPS_MACADDR_MISMATCH:
1396 			pps_val = ((privcmd->param_value << 31) & 0xffff0000) |
1397 				  (PKT_PWR_SAVE_MACADDR_MISMATCH & 0xffff);
1398 			intr[vid].config.pps_params.mac_match =
1399 				privcmd->param_value;
1400 			break;
1401 		case WMI_VDEV_PPS_DELIM_CRC_FAIL:
1402 			pps_val = ((privcmd->param_value << 31) & 0xffff0000) |
1403 				  (PKT_PWR_SAVE_DELIM_CRC_FAIL & 0xffff);
1404 			intr[vid].config.pps_params.delim_fail =
1405 				privcmd->param_value;
1406 			break;
1407 		case WMI_VDEV_PPS_GID_NSTS_ZERO:
1408 			pps_val = ((privcmd->param_value << 31) & 0xffff0000) |
1409 				  (PKT_PWR_SAVE_GID_NSTS_ZERO & 0xffff);
1410 			intr[vid].config.pps_params.nsts_zero =
1411 				privcmd->param_value;
1412 			break;
1413 		case WMI_VDEV_PPS_RSSI_CHECK:
1414 			pps_val = ((privcmd->param_value << 31) & 0xffff0000) |
1415 				  (PKT_PWR_SAVE_RSSI_CHECK & 0xffff);
1416 			intr[vid].config.pps_params.rssi_chk =
1417 				privcmd->param_value;
1418 			break;
1419 		case WMI_VDEV_PPS_5G_EBT:
1420 			pps_val = ((privcmd->param_value << 31) & 0xffff0000) |
1421 				  (PKT_PWR_SAVE_5G_EBT & 0xffff);
1422 			intr[vid].config.pps_params.ebt_5g =
1423 				privcmd->param_value;
1424 			break;
1425 		default:
1426 			WMA_LOGE("Invalid param id 0x%x", privcmd->param_id);
1427 			break;
1428 		}
1429 		break;
1430 
1431 	case QPOWER_CMD:
1432 		WMA_LOGD("QPOWER CLI CMD pid %d pval %d", privcmd->param_id,
1433 			 privcmd->param_value);
1434 		switch (privcmd->param_id) {
1435 		case WMI_STA_PS_PARAM_QPOWER_PSPOLL_COUNT:
1436 			WMA_LOGD("QPOWER CLI CMD:Ps Poll Cnt val %d",
1437 				 privcmd->param_value);
1438 			/* Set the QPower Ps Poll Count */
1439 			ret = wma_unified_set_sta_ps_param(wma->wmi_handle,
1440 				vid, WMI_STA_PS_PARAM_QPOWER_PSPOLL_COUNT,
1441 				privcmd->param_value);
1442 			if (ret) {
1443 				WMA_LOGE("Set Q-PsPollCnt Failed vdevId %d val %d",
1444 					vid, privcmd->param_value);
1445 			} else {
1446 				qparams->max_ps_poll_cnt = privcmd->param_value;
1447 			}
1448 			break;
1449 		case WMI_STA_PS_PARAM_QPOWER_MAX_TX_BEFORE_WAKE:
1450 			WMA_LOGD("QPOWER CLI CMD:Max Tx Before wake val %d",
1451 				 privcmd->param_value);
1452 			/* Set the QPower Max Tx Before Wake */
1453 			ret = wma_unified_set_sta_ps_param(wma->wmi_handle,
1454 				vid, WMI_STA_PS_PARAM_QPOWER_MAX_TX_BEFORE_WAKE,
1455 				privcmd->param_value);
1456 			if (ret) {
1457 				WMA_LOGE("Set Q-MaxTxBefWake Failed vId %d val %d",
1458 					vid, privcmd->param_value);
1459 			} else {
1460 				qparams->max_tx_before_wake =
1461 						privcmd->param_value;
1462 			}
1463 			break;
1464 		case WMI_STA_PS_PARAM_QPOWER_SPEC_PSPOLL_WAKE_INTERVAL:
1465 			WMA_LOGD("QPOWER CLI CMD:Ps Poll Wake Inv val %d",
1466 				 privcmd->param_value);
1467 			/* Set the QPower Spec Ps Poll Wake Inv */
1468 			ret = wma_unified_set_sta_ps_param(wma->wmi_handle, vid,
1469 				WMI_STA_PS_PARAM_QPOWER_SPEC_PSPOLL_WAKE_INTERVAL,
1470 				privcmd->param_value);
1471 			if (ret) {
1472 				WMA_LOGE("Set Q-PsPoll WakeIntv Failed vId %d val %d",
1473 					vid, privcmd->param_value);
1474 			} else {
1475 				qparams->spec_ps_poll_wake_interval =
1476 					privcmd->param_value;
1477 			}
1478 			break;
1479 		case WMI_STA_PS_PARAM_QPOWER_SPEC_MAX_SPEC_NODATA_PSPOLL:
1480 			WMA_LOGD("QPOWER CLI CMD:Spec NoData Ps Poll val %d",
1481 				 privcmd->param_value);
1482 			/* Set the QPower Spec NoData PsPoll */
1483 			ret = wma_unified_set_sta_ps_param(wma->wmi_handle, vid,
1484 				WMI_STA_PS_PARAM_QPOWER_SPEC_MAX_SPEC_NODATA_PSPOLL,
1485 				privcmd->param_value);
1486 			if (ret) {
1487 				WMA_LOGE("Set Q-SpecNoDataPsPoll Failed vId %d val %d",
1488 					vid, privcmd->param_value);
1489 			} else {
1490 				qparams->max_spec_nodata_ps_poll =
1491 					privcmd->param_value;
1492 			}
1493 			break;
1494 
1495 		default:
1496 			WMA_LOGE("Invalid param id 0x%x", privcmd->param_id);
1497 			break;
1498 		}
1499 		break;
1500 	case GTX_CMD:
1501 		WMA_LOGD("vdev id %d pid %d pval %d", privcmd->param_vdev_id,
1502 			 privcmd->param_id, privcmd->param_value);
1503 		switch (privcmd->param_id) {
1504 		case WMI_VDEV_PARAM_GTX_HT_MCS:
1505 			intr[vid].config.gtx_info.gtxRTMask[0] =
1506 				privcmd->param_value;
1507 			ret = wmi_unified_vdev_set_gtx_cfg_send(wma->wmi_handle,
1508 					privcmd->param_vdev_id,
1509 					&intr[vid].config.gtx_info);
1510 			break;
1511 		case WMI_VDEV_PARAM_GTX_VHT_MCS:
1512 			intr[vid].config.gtx_info.gtxRTMask[1] =
1513 				privcmd->param_value;
1514 			ret = wmi_unified_vdev_set_gtx_cfg_send(wma->wmi_handle,
1515 					privcmd->param_vdev_id,
1516 					&intr[vid].config.gtx_info);
1517 			break;
1518 
1519 		case WMI_VDEV_PARAM_GTX_USR_CFG:
1520 			intr[vid].config.gtx_info.gtxUsrcfg =
1521 				privcmd->param_value;
1522 			ret = wmi_unified_vdev_set_gtx_cfg_send(wma->wmi_handle,
1523 					privcmd->param_vdev_id,
1524 					&intr[vid].config.gtx_info);
1525 			break;
1526 
1527 		case WMI_VDEV_PARAM_GTX_THRE:
1528 			intr[vid].config.gtx_info.gtxPERThreshold =
1529 				privcmd->param_value;
1530 			ret = wmi_unified_vdev_set_gtx_cfg_send(wma->wmi_handle,
1531 					privcmd->param_vdev_id,
1532 					&intr[vid].config.gtx_info);
1533 			break;
1534 
1535 		case WMI_VDEV_PARAM_GTX_MARGIN:
1536 			intr[vid].config.gtx_info.gtxPERMargin =
1537 				privcmd->param_value;
1538 			ret = wmi_unified_vdev_set_gtx_cfg_send(wma->wmi_handle,
1539 					privcmd->param_vdev_id,
1540 					&intr[vid].config.gtx_info);
1541 			break;
1542 
1543 		case WMI_VDEV_PARAM_GTX_STEP:
1544 			intr[vid].config.gtx_info.gtxTPCstep =
1545 				privcmd->param_value;
1546 			ret = wmi_unified_vdev_set_gtx_cfg_send(wma->wmi_handle,
1547 					privcmd->param_vdev_id,
1548 					&intr[vid].config.gtx_info);
1549 			break;
1550 
1551 		case WMI_VDEV_PARAM_GTX_MINTPC:
1552 			intr[vid].config.gtx_info.gtxTPCMin =
1553 				privcmd->param_value;
1554 			ret = wmi_unified_vdev_set_gtx_cfg_send(wma->wmi_handle,
1555 					privcmd->param_vdev_id,
1556 					&intr[vid].config.gtx_info);
1557 			break;
1558 
1559 		case WMI_VDEV_PARAM_GTX_BW_MASK:
1560 			intr[vid].config.gtx_info.gtxBWMask =
1561 				privcmd->param_value;
1562 			ret = wmi_unified_vdev_set_gtx_cfg_send(wma->wmi_handle,
1563 					privcmd->param_vdev_id,
1564 					&intr[vid].config.gtx_info);
1565 			if (ret) {
1566 				WMA_LOGE("wma_vdev_set_param failed ret %d",
1567 					 ret);
1568 				return;
1569 			}
1570 			break;
1571 		default:
1572 			break;
1573 		}
1574 		break;
1575 
1576 	default:
1577 		WMA_LOGE("Invalid vpdev command id");
1578 	}
1579 	if (1 == privcmd->param_vp_dev) {
1580 		switch (privcmd->param_id) {
1581 		case WMI_VDEV_PARAM_NSS:
1582 			intr[vid].config.nss = privcmd->param_value;
1583 			break;
1584 		case WMI_VDEV_PARAM_LDPC:
1585 			intr[vid].config.ldpc = privcmd->param_value;
1586 			break;
1587 		case WMI_VDEV_PARAM_TX_STBC:
1588 			intr[vid].config.tx_stbc = privcmd->param_value;
1589 			break;
1590 		case WMI_VDEV_PARAM_RX_STBC:
1591 			intr[vid].config.rx_stbc = privcmd->param_value;
1592 			break;
1593 		case WMI_VDEV_PARAM_SGI:
1594 			intr[vid].config.shortgi = privcmd->param_value;
1595 			break;
1596 		case WMI_VDEV_PARAM_ENABLE_RTSCTS:
1597 			intr[vid].config.rtscts_en = privcmd->param_value;
1598 			break;
1599 		case WMI_VDEV_PARAM_CHWIDTH:
1600 			intr[vid].config.chwidth = privcmd->param_value;
1601 			break;
1602 		case WMI_VDEV_PARAM_FIXED_RATE:
1603 			intr[vid].config.tx_rate = privcmd->param_value;
1604 			break;
1605 		case WMI_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE:
1606 			intr[vid].config.erx_adjust = privcmd->param_value;
1607 			break;
1608 		case WMI_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM:
1609 			intr[vid].config.erx_bmiss_num = privcmd->param_value;
1610 			break;
1611 		case WMI_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE:
1612 			intr[vid].config.erx_bmiss_cycle = privcmd->param_value;
1613 			break;
1614 		case WMI_VDEV_PARAM_EARLY_RX_SLOP_STEP:
1615 			intr[vid].config.erx_slop_step = privcmd->param_value;
1616 			break;
1617 		case WMI_VDEV_PARAM_EARLY_RX_INIT_SLOP:
1618 			intr[vid].config.erx_init_slop = privcmd->param_value;
1619 			break;
1620 		case WMI_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE:
1621 			intr[vid].config.erx_adj_pause = privcmd->param_value;
1622 			break;
1623 		case WMI_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE:
1624 			intr[vid].config.erx_dri_sample = privcmd->param_value;
1625 			break;
1626 		case WMI_VDEV_PARAM_HE_DCM:
1627 		case WMI_VDEV_PARAM_HE_RANGE_EXT:
1628 			wma_set_he_vdev_param(&intr[vid], privcmd->param_id,
1629 					      privcmd->param_value);
1630 			break;
1631 		default:
1632 			WMA_LOGE("Invalid wma_cli_set vdev command/Not yet implemented 0x%x",
1633 				 privcmd->param_id);
1634 			break;
1635 		}
1636 	} else if (2 == privcmd->param_vp_dev) {
1637 		switch (privcmd->param_id) {
1638 		case WMI_PDEV_PARAM_ANI_ENABLE:
1639 			wma->pdevconfig.ani_enable = privcmd->param_value;
1640 			break;
1641 		case WMI_PDEV_PARAM_ANI_POLL_PERIOD:
1642 			wma->pdevconfig.ani_poll_len = privcmd->param_value;
1643 			break;
1644 		case WMI_PDEV_PARAM_ANI_LISTEN_PERIOD:
1645 			wma->pdevconfig.ani_listen_len = privcmd->param_value;
1646 			break;
1647 		case WMI_PDEV_PARAM_ANI_OFDM_LEVEL:
1648 			wma->pdevconfig.ani_ofdm_level = privcmd->param_value;
1649 			break;
1650 		case WMI_PDEV_PARAM_ANI_CCK_LEVEL:
1651 			wma->pdevconfig.ani_cck_level = privcmd->param_value;
1652 			break;
1653 		case WMI_PDEV_PARAM_DYNAMIC_BW:
1654 			wma->pdevconfig.cwmenable = privcmd->param_value;
1655 			break;
1656 		case WMI_PDEV_PARAM_CTS_CBW:
1657 			wma->pdevconfig.cts_cbw = privcmd->param_value;
1658 			break;
1659 		case WMI_PDEV_PARAM_TX_CHAIN_MASK:
1660 			wma->pdevconfig.txchainmask = privcmd->param_value;
1661 			break;
1662 		case WMI_PDEV_PARAM_RX_CHAIN_MASK:
1663 			wma->pdevconfig.rxchainmask = privcmd->param_value;
1664 			break;
1665 		case WMI_PDEV_PARAM_TXPOWER_LIMIT2G:
1666 			wma->pdevconfig.txpow2g = privcmd->param_value;
1667 			if ((pMac->roam.configParam.bandCapability ==
1668 			     BAND_ALL) ||
1669 			    (pMac->roam.configParam.bandCapability ==
1670 			     BAND_2G)) {
1671 				if (cfg_set_int(pMac,
1672 						WNI_CFG_CURRENT_TX_POWER_LEVEL,
1673 						privcmd->param_value) !=
1674 								eSIR_SUCCESS)
1675 					WMA_LOGE("could not set WNI_CFG_CURRENT_TX_POWER_LEVEL");
1676 
1677 			} else {
1678 				WMA_LOGE("Current band is not 2G");
1679 			}
1680 			break;
1681 		case WMI_PDEV_PARAM_TXPOWER_LIMIT5G:
1682 			wma->pdevconfig.txpow5g = privcmd->param_value;
1683 			if ((pMac->roam.configParam.bandCapability ==
1684 			     BAND_ALL) ||
1685 			    (pMac->roam.configParam.bandCapability ==
1686 			     BAND_5G)) {
1687 				if (cfg_set_int(pMac,
1688 						WNI_CFG_CURRENT_TX_POWER_LEVEL,
1689 						privcmd->param_value) !=
1690 							    eSIR_SUCCESS)
1691 					WMA_LOGE("could not set WNI_CFG_CURRENT_TX_POWER_LEVEL");
1692 
1693 			} else {
1694 				WMA_LOGE("Current band is not 5G");
1695 			}
1696 			break;
1697 		default:
1698 			WMA_LOGD("Invalid wma_cli_set pdev command/Not yet implemented 0x%x",
1699 				 privcmd->param_id);
1700 			break;
1701 		}
1702 	} else if (5 == privcmd->param_vp_dev) {
1703 		ret = wma_vdev_set_param(wma->wmi_handle,
1704 					privcmd->param_vdev_id,
1705 					WMI_VDEV_PARAM_PACKET_POWERSAVE,
1706 					pps_val);
1707 		if (ret)
1708 			WMA_LOGE("Failed to send wmi packet power save cmd");
1709 		else
1710 			WMA_LOGD("Sent packet power save cmd %d value %x to target",
1711 				privcmd->param_id, pps_val);
1712 	}
1713 }
1714 
1715 uint32_t wma_critical_events_in_flight(void)
1716 {
1717 	t_wma_handle *wma;
1718 
1719 	wma = cds_get_context(QDF_MODULE_ID_WMA);
1720 	if (!wma)
1721 		return 0;
1722 
1723 	return qdf_atomic_read(&wma->critical_events_in_flight);
1724 }
1725 
1726 static bool wma_event_is_critical(uint32_t event_id)
1727 {
1728 	switch (event_id) {
1729 	case WMI_ROAM_SYNCH_EVENTID:
1730 		return true;
1731 	default:
1732 		return false;
1733 	}
1734 }
1735 
1736 /**
1737  * wma_process_fw_event() - process any fw event
1738  * @wma: wma handle
1739  * @buf: fw event buffer
1740  *
1741  * This function process any fw event to serialize it through mc thread.
1742  *
1743  * Return: none
1744  */
1745 static int wma_process_fw_event(tp_wma_handle wma,
1746 				wma_process_fw_event_params *buf)
1747 {
1748 	struct wmi_unified *wmi_handle = (struct wmi_unified *)buf->wmi_handle;
1749 	uint32_t event_id = WMI_GET_FIELD(qdf_nbuf_data(buf->evt_buf),
1750 					  WMI_CMD_HDR, COMMANDID);
1751 
1752 	wmi_process_fw_event(wmi_handle, buf->evt_buf);
1753 
1754 	if (wma_event_is_critical(event_id))
1755 		qdf_atomic_dec(&wma->critical_events_in_flight);
1756 
1757 	return 0;
1758 }
1759 
1760 /**
1761  * wmi_process_fw_event_tasklet_ctx() - process in tasklet context
1762  * @ctx: handle to wmi
1763  * @ev: wmi event buffer
1764  *
1765  * Event process by below function will be in tasket context,
1766  * need to use this method only for time sensitive functions.
1767  *
1768  * Return: none
1769  */
1770 static int wma_process_fw_event_tasklet_ctx(void *ctx, void *ev)
1771 {
1772 	wmi_process_fw_event(ctx, ev);
1773 
1774 	return 0;
1775 }
1776 
1777 /**
1778  * wma_process_hal_pwr_dbg_cmd() - send hal pwr dbg cmd to fw.
1779  * @handle: wma handle
1780  * @sir_pwr_dbg_params: unit test command
1781  *
1782  * This function send unit test command to fw.
1783  *
1784  * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_** on error
1785  */
1786 QDF_STATUS wma_process_hal_pwr_dbg_cmd(WMA_HANDLE handle,
1787 				       struct sir_mac_pwr_dbg_cmd *
1788 				       sir_pwr_dbg_params)
1789 {
1790 	tp_wma_handle wma_handle = (tp_wma_handle)handle;
1791 	int i;
1792 	struct wmi_power_dbg_params wmi_pwr_dbg_params;
1793 	QDF_STATUS status;
1794 
1795 	if (!sir_pwr_dbg_params) {
1796 		WMA_LOGE("%s: sir_pwr_dbg_params is null", __func__);
1797 		return QDF_STATUS_E_INVAL;
1798 	}
1799 	wmi_pwr_dbg_params.module_id = sir_pwr_dbg_params->module_id;
1800 	wmi_pwr_dbg_params.pdev_id = sir_pwr_dbg_params->pdev_id;
1801 	wmi_pwr_dbg_params.num_args = sir_pwr_dbg_params->num_args;
1802 
1803 	for (i = 0; i < wmi_pwr_dbg_params.num_args; i++)
1804 		wmi_pwr_dbg_params.args[i] = sir_pwr_dbg_params->args[i];
1805 
1806 	status = wmi_unified_send_power_dbg_cmd(wma_handle->wmi_handle,
1807 						&wmi_pwr_dbg_params);
1808 
1809 	return status;
1810 }
1811 
1812 static void wma_discard_fw_event(struct scheduler_msg *msg)
1813 {
1814 	if (!msg->bodyptr)
1815 		return;
1816 
1817 	switch (msg->type) {
1818 	case WMA_PROCESS_FW_EVENT:
1819 		qdf_nbuf_free(((wma_process_fw_event_params *)msg->bodyptr)
1820 				->evt_buf);
1821 		break;
1822 	case WMA_SET_LINK_STATE:
1823 		qdf_mem_free(((tpLinkStateParams) msg->bodyptr)->callbackArg);
1824 		break;
1825 	}
1826 
1827 	qdf_mem_free(msg->bodyptr);
1828 	msg->bodyptr = NULL;
1829 	msg->bodyval = 0;
1830 	msg->type = 0;
1831 }
1832 
1833 /**
1834  * wma_process_fw_event_handler() - common event handler to serialize
1835  *                                  event processing through mc_thread
1836  * @ctx: wmi context
1837  * @ev: event buffer
1838  * @rx_ctx: rx execution context
1839  *
1840  * Return: 0 on success, errno on failure
1841  */
1842 static int wma_process_fw_event_mc_thread_ctx(void *ctx, void *ev)
1843 {
1844 	wma_process_fw_event_params *params_buf;
1845 	struct scheduler_msg cds_msg = { 0 };
1846 	tp_wma_handle wma;
1847 	uint32_t event_id;
1848 
1849 	params_buf = qdf_mem_malloc(sizeof(wma_process_fw_event_params));
1850 	if (!params_buf) {
1851 		WMA_LOGE("%s: Failed alloc memory for params_buf", __func__);
1852 		qdf_nbuf_free(ev);
1853 		return -ENOMEM;
1854 	}
1855 
1856 	params_buf->wmi_handle = (struct wmi_unified *)ctx;
1857 	params_buf->evt_buf = (wmi_buf_t *)ev;
1858 
1859 	wma = cds_get_context(QDF_MODULE_ID_WMA);
1860 	event_id = WMI_GET_FIELD(qdf_nbuf_data(params_buf->evt_buf),
1861 				 WMI_CMD_HDR, COMMANDID);
1862 	if (wma && wma_event_is_critical(event_id))
1863 		qdf_atomic_inc(&wma->critical_events_in_flight);
1864 
1865 	cds_msg.type = WMA_PROCESS_FW_EVENT;
1866 	cds_msg.bodyptr = params_buf;
1867 	cds_msg.bodyval = 0;
1868 	cds_msg.flush_callback = wma_discard_fw_event;
1869 
1870 	if (QDF_STATUS_SUCCESS !=
1871 		scheduler_post_msg(QDF_MODULE_ID_WMA, &cds_msg)) {
1872 		WMA_LOGE("%s: Failed to post WMA_PROCESS_FW_EVENT msg",
1873 			 __func__);
1874 		qdf_nbuf_free(ev);
1875 		qdf_mem_free(params_buf);
1876 		return -EFAULT;
1877 	}
1878 	return 0;
1879 
1880 }
1881 
1882 /**
1883  * wma_process_fw_event_handler() - common event handler to serialize
1884  *                                  event processing through mc_thread
1885  * @ctx: wmi context
1886  * @ev: event buffer
1887  * @rx_ctx: rx execution context
1888  *
1889  * Return: 0 on success, errno on failure
1890  */
1891 int wma_process_fw_event_handler(void *ctx, void *ev, uint8_t rx_ctx)
1892 {
1893 	int err = 0;
1894 
1895 	if (rx_ctx == WMA_RX_SERIALIZER_CTX) {
1896 		err = wma_process_fw_event_mc_thread_ctx(ctx, ev);
1897 	} else if (rx_ctx == WMA_RX_TASKLET_CTX) {
1898 		wma_process_fw_event_tasklet_ctx(ctx, ev);
1899 	} else {
1900 		WMA_LOGE("%s: invalid wmi event execution context", __func__);
1901 		qdf_nbuf_free(ev);
1902 	}
1903 
1904 	return err;
1905 }
1906 
1907 #ifdef WLAN_FEATURE_NAN
1908 /**
1909  * wma_set_nan_enable() - set nan enable flag in WMA handle
1910  * @wma_handle: Pointer to wma handle
1911  * @cds_cfg: Pointer to CDS Configuration
1912  *
1913  * Return: none
1914  */
1915 static void wma_set_nan_enable(tp_wma_handle wma_handle,
1916 				struct cds_config_info *cds_cfg)
1917 {
1918 	wma_handle->is_nan_enabled = cds_cfg->is_nan_enabled;
1919 }
1920 #else
1921 static void wma_set_nan_enable(tp_wma_handle wma_handle,
1922 				struct cds_config_info *cds_cfg)
1923 {
1924 }
1925 #endif
1926 
1927 /**
1928  * wma_init_max_no_of_peers - API to initialize wma configuration params
1929  * @wma_handle: WMA Handle
1930  * @max_peers: Max Peers supported
1931  *
1932  * Return: void
1933  */
1934 static void wma_init_max_no_of_peers(tp_wma_handle wma_handle,
1935 				     uint16_t max_peers)
1936 {
1937 	struct wma_ini_config *cfg = wma_get_ini_handle(wma_handle);
1938 
1939 	if (cfg == NULL) {
1940 		WMA_LOGE("%s: NULL WMA ini handle", __func__);
1941 		return;
1942 	}
1943 
1944 	cfg->max_no_of_peers = max_peers;
1945 }
1946 
1947 /**
1948  * wma_cleanup_vdev_resp_queue() - cleanup vdev response queue
1949  * @wma: wma handle
1950  *
1951  * Return: none
1952  */
1953 static void wma_cleanup_vdev_resp_queue(tp_wma_handle wma)
1954 {
1955 	struct wma_target_req *req_msg = NULL;
1956 	qdf_list_node_t *node1 = NULL;
1957 
1958 	qdf_spin_lock_bh(&wma->vdev_respq_lock);
1959 	if (!qdf_list_size(&wma->vdev_resp_queue)) {
1960 		qdf_spin_unlock_bh(&wma->vdev_respq_lock);
1961 		WMA_LOGD(FL("request queue maybe empty"));
1962 		return;
1963 	}
1964 
1965 	WMA_LOGD(FL("Cleaning up vdev resp queue"));
1966 
1967 	/* peek front, and then cleanup it in wma_vdev_resp_timer */
1968 	while (qdf_list_peek_front(&wma->vdev_resp_queue, &node1) ==
1969 				   QDF_STATUS_SUCCESS) {
1970 		req_msg = qdf_container_of(node1, struct wma_target_req, node);
1971 		qdf_spin_unlock_bh(&wma->vdev_respq_lock);
1972 		qdf_mc_timer_stop(&req_msg->event_timeout);
1973 		wma_vdev_resp_timer(req_msg);
1974 		qdf_spin_lock_bh(&wma->vdev_respq_lock);
1975 	}
1976 	qdf_spin_unlock_bh(&wma->vdev_respq_lock);
1977 }
1978 
1979 /**
1980  * wma_cleanup_hold_req() - cleanup hold request queue
1981  * @wma: wma handle
1982  *
1983  * Return: none
1984  */
1985 static void wma_cleanup_hold_req(tp_wma_handle wma)
1986 {
1987 	struct wma_target_req *req_msg = NULL;
1988 	qdf_list_node_t *node1 = NULL;
1989 
1990 	qdf_spin_lock_bh(&wma->wma_hold_req_q_lock);
1991 	if (!qdf_list_size(&wma->wma_hold_req_queue)) {
1992 		qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
1993 		WMA_LOGD(FL("request queue is empty"));
1994 		return;
1995 	}
1996 
1997 	/* peek front, and then cleanup it in wma_hold_req_timer */
1998 	while (QDF_STATUS_SUCCESS ==
1999 		qdf_list_peek_front(&wma->wma_hold_req_queue, &node1)) {
2000 		req_msg = qdf_container_of(node1, struct wma_target_req, node);
2001 		qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
2002 		/* Cleanup timeout handler */
2003 		qdf_mc_timer_stop(&req_msg->event_timeout);
2004 		wma_hold_req_timer(req_msg);
2005 		qdf_spin_lock_bh(&wma->wma_hold_req_q_lock);
2006 	}
2007 	qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
2008 }
2009 
2010 /**
2011  * wma_cleanup_vdev_resp_and_hold_req() - cleaunup the vdev resp and hold req
2012  * queue
2013  * @msg :scheduler msg
2014  *
2015  * Return: QDF_STATUS
2016  */
2017 static QDF_STATUS
2018 wma_cleanup_vdev_resp_and_hold_req(struct scheduler_msg *msg)
2019 {
2020 	if (!msg || !msg->bodyptr) {
2021 		WMA_LOGE(FL("msg or body pointer is NULL"));
2022 		return QDF_STATUS_E_INVAL;
2023 	}
2024 
2025 	wma_cleanup_vdev_resp_queue(msg->bodyptr);
2026 	wma_cleanup_hold_req(msg->bodyptr);
2027 
2028 	return QDF_STATUS_SUCCESS;
2029 }
2030 
2031 /**
2032  * wma_shutdown_notifier_cb - Shutdown notifer call back
2033  * @priv : WMA handle
2034  *
2035  * During recovery, WMA may wait for resume to complete if the crash happens
2036  * while in suspend. This may cause delays in completing the recovery. This call
2037  * back would be called during recovery and the event is completed so that if
2038  * the resume is waiting on FW to respond then it can get out of the wait so
2039  * that recovery thread can start bringing down all the modules.
2040  *
2041  * Return: None
2042  */
2043 static void wma_shutdown_notifier_cb(void *priv)
2044 {
2045 	tp_wma_handle wma_handle = priv;
2046 	struct scheduler_msg msg = { 0 };
2047 	QDF_STATUS status;
2048 
2049 	qdf_event_set(&wma_handle->wma_resume_event);
2050 	pmo_ucfg_psoc_wakeup_host_event_received(wma_handle->psoc);
2051 
2052 	msg.bodyptr = priv;
2053 	msg.callback = wma_cleanup_vdev_resp_and_hold_req;
2054 	status = scheduler_post_msg(QDF_MODULE_ID_TARGET_IF, &msg);
2055 	if (QDF_IS_STATUS_ERROR(status))
2056 		WMA_LOGE(FL("Failed to post SYS_MSG_ID_CLEAN_VDEV_RSP_QUEUE"));
2057 }
2058 
2059 struct wma_version_info g_wmi_version_info;
2060 
2061 #ifdef WLAN_FEATURE_MEMDUMP_ENABLE
2062 /**
2063  * wma_state_info_dump() - prints state information of wma layer
2064  * @buf: buffer pointer
2065  * @size: size of buffer to be filled
2066  *
2067  * This function is used to dump state information of wma layer
2068  *
2069  * Return: None
2070  */
2071 #ifdef QCA_SUPPORT_CP_STATS
2072 static void wma_state_info_dump(char **buf_ptr, uint16_t *size)
2073 {
2074 	uint8_t vdev_id;
2075 	uint16_t len = 0;
2076 	t_wma_handle *wma;
2077 	char *buf = *buf_ptr;
2078 	struct wma_txrx_node *iface;
2079 	struct wake_lock_stats stats;
2080 	struct wlan_objmgr_vdev *vdev;
2081 
2082 	wma = cds_get_context(QDF_MODULE_ID_WMA);
2083 	if (!wma) {
2084 		WMA_LOGE("%s: WMA context is invald!", __func__);
2085 		return;
2086 	}
2087 
2088 	WMA_LOGE("%s: size of buffer: %d", __func__, *size);
2089 
2090 	for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) {
2091 		iface = &wma->interfaces[vdev_id];
2092 		if (!iface->handle)
2093 			continue;
2094 
2095 		vdev = wlan_objmgr_get_vdev_by_id_from_psoc(wma->psoc,
2096 						vdev_id, WLAN_LEGACY_WMA_ID);
2097 		if (vdev == NULL)
2098 			continue;
2099 		ucfg_mc_cp_stats_get_vdev_wake_lock_stats(vdev, &stats);
2100 		len += qdf_scnprintf(buf + len, *size - len,
2101 			"\n"
2102 			"vdev_id %d\n"
2103 			"WoW Stats\n"
2104 			"\tpno_match %u\n"
2105 			"\tpno_complete %u\n"
2106 			"\tgscan %u\n"
2107 			"\tlow_rssi %u\n"
2108 			"\trssi_breach %u\n"
2109 			"\tucast %u\n"
2110 			"\tbcast %u\n"
2111 			"\ticmpv4 %u\n"
2112 			"\ticmpv6 %u\n"
2113 			"\tipv4_mcast %u\n"
2114 			"\tipv6_mcast %u\n"
2115 			"\tipv6_mcast_ra %u\n"
2116 			"\tipv6_mcast_ns %u\n"
2117 			"\tipv6_mcast_na %u\n"
2118 			"\toem_response %u\n"
2119 			"conn_state %d\n"
2120 			"dtimPeriod %d\n"
2121 			"chanmode %d\n"
2122 			"vht_capable %d\n"
2123 			"ht_capable %d\n"
2124 			"chan_width %d\n"
2125 			"vdev_active %d\n"
2126 			"vdev_up %d\n"
2127 			"aid %d\n"
2128 			"rate_flags %d\n"
2129 			"nss %d\n"
2130 			"tx_power %d\n"
2131 			"max_tx_power %d\n"
2132 			"nwType %d\n"
2133 			"tx_streams %d\n"
2134 			"rx_streams %d\n"
2135 			"chain_mask %d\n"
2136 			"nss_2g %d\n"
2137 			"nss_5g %d",
2138 			vdev_id,
2139 			stats.pno_match_wake_up_count,
2140 			stats.pno_complete_wake_up_count,
2141 			stats.gscan_wake_up_count,
2142 			stats.low_rssi_wake_up_count,
2143 			stats.rssi_breach_wake_up_count,
2144 			stats.ucast_wake_up_count,
2145 			stats.bcast_wake_up_count,
2146 			stats.icmpv4_count,
2147 			stats.icmpv6_count,
2148 			stats.ipv4_mcast_wake_up_count,
2149 			stats.ipv6_mcast_wake_up_count,
2150 			stats.ipv6_mcast_ra_stats,
2151 			stats.ipv6_mcast_ns_stats,
2152 			stats.ipv6_mcast_na_stats,
2153 			stats.oem_response_wake_up_count,
2154 			iface->conn_state,
2155 			iface->dtimPeriod,
2156 			iface->chanmode,
2157 			iface->vht_capable,
2158 			iface->ht_capable,
2159 			iface->chan_width,
2160 			iface->vdev_active,
2161 			wma_is_vdev_up(vdev_id),
2162 			iface->aid,
2163 			iface->rate_flags,
2164 			iface->nss,
2165 			iface->tx_power,
2166 			iface->max_tx_power,
2167 			iface->nwType,
2168 			iface->tx_streams,
2169 			iface->rx_streams,
2170 			iface->chain_mask,
2171 			iface->nss_2g,
2172 			iface->nss_5g);
2173 		wlan_objmgr_vdev_release_ref(vdev, WLAN_LEGACY_WMA_ID);
2174 	}
2175 
2176 	*size -= len;
2177 	*buf_ptr += len;
2178 }
2179 #else /* QCA_SUPPORT_CP_STATS */
2180 static void wma_state_info_dump(char **buf_ptr, uint16_t *size)
2181 {
2182 	t_wma_handle *wma;
2183 	struct sir_vdev_wow_stats *stats;
2184 	uint16_t len = 0;
2185 	char *buf = *buf_ptr;
2186 	struct wma_txrx_node *iface;
2187 	uint8_t vdev_id;
2188 
2189 	wma = cds_get_context(QDF_MODULE_ID_WMA);
2190 	if (!wma) {
2191 		WMA_LOGE("%s: WMA context is invald!", __func__);
2192 		return;
2193 	}
2194 
2195 	WMA_LOGE("%s: size of buffer: %d", __func__, *size);
2196 
2197 	for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) {
2198 		iface = &wma->interfaces[vdev_id];
2199 		if (!iface->handle)
2200 			continue;
2201 
2202 		stats = &iface->wow_stats;
2203 		len += qdf_scnprintf(buf + len, *size - len,
2204 			"\n"
2205 			"vdev_id %d\n"
2206 			"WoW Stats\n"
2207 			"\tpno_match %u\n"
2208 			"\tpno_complete %u\n"
2209 			"\tgscan %u\n"
2210 			"\tlow_rssi %u\n"
2211 			"\trssi_breach %u\n"
2212 			"\tucast %u\n"
2213 			"\tbcast %u\n"
2214 			"\ticmpv4 %u\n"
2215 			"\ticmpv6 %u\n"
2216 			"\tipv4_mcast %u\n"
2217 			"\tipv6_mcast %u\n"
2218 			"\tipv6_mcast_ra %u\n"
2219 			"\tipv6_mcast_ns %u\n"
2220 			"\tipv6_mcast_na %u\n"
2221 			"\toem_response %u\n"
2222 			"conn_state %d\n"
2223 			"dtimPeriod %d\n"
2224 			"chanmode %d\n"
2225 			"vht_capable %d\n"
2226 			"ht_capable %d\n"
2227 			"chan_width %d\n"
2228 			"vdev_active %d\n"
2229 			"vdev_up %d\n"
2230 			"aid %d\n"
2231 			"rate_flags %d\n"
2232 			"nss %d\n"
2233 			"tx_power %d\n"
2234 			"max_tx_power %d\n"
2235 			"nwType %d\n"
2236 			"tx_streams %d\n"
2237 			"rx_streams %d\n"
2238 			"chain_mask %d\n"
2239 			"nss_2g %d\n"
2240 			"nss_5g %d",
2241 			vdev_id,
2242 			stats->pno_match,
2243 			stats->pno_complete,
2244 			stats->gscan,
2245 			stats->low_rssi,
2246 			stats->rssi_breach,
2247 			stats->ucast,
2248 			stats->bcast,
2249 			stats->icmpv4,
2250 			stats->icmpv6,
2251 			stats->ipv4_mcast,
2252 			stats->ipv6_mcast,
2253 			stats->ipv6_mcast_ra,
2254 			stats->ipv6_mcast_ns,
2255 			stats->ipv6_mcast_na,
2256 			stats->oem_response,
2257 			iface->conn_state,
2258 			iface->dtimPeriod,
2259 			iface->chanmode,
2260 			iface->vht_capable,
2261 			iface->ht_capable,
2262 			iface->chan_width,
2263 			iface->vdev_active,
2264 			wma_is_vdev_up(vdev_id),
2265 			iface->aid,
2266 			iface->rate_flags,
2267 			iface->nss,
2268 			iface->tx_power,
2269 			iface->max_tx_power,
2270 			iface->nwType,
2271 			iface->tx_streams,
2272 			iface->rx_streams,
2273 			iface->chain_mask,
2274 			iface->nss_2g,
2275 			iface->nss_5g);
2276 	}
2277 
2278 	*size -= len;
2279 	*buf_ptr += len;
2280 }
2281 #endif /* QCA_SUPPORT_CP_STATS */
2282 
2283 /**
2284  * wma_register_debug_callback() - registration function for wma layer
2285  * to print wma state information
2286  */
2287 static void wma_register_debug_callback(void)
2288 {
2289 	qdf_register_debug_callback(QDF_MODULE_ID_WMA, &wma_state_info_dump);
2290 }
2291 #else /* WLAN_FEATURE_MEMDUMP_ENABLE */
2292 static void wma_register_debug_callback(void)
2293 {
2294 }
2295 #endif /* WLAN_FEATURE_MEMDUMP_ENABLE */
2296 /**
2297  * wma_register_tx_ops_handler() - register tx_ops of southbound
2298  * @tx_ops:  tx_ops pointer in southbound
2299  *
2300  * Return: 0 on success, errno on failure
2301  */
2302 static QDF_STATUS
2303 wma_register_tx_ops_handler(struct wlan_lmac_if_tx_ops *tx_ops)
2304 {
2305 	/*
2306 	 * Assign tx_ops, it's up to UMAC modules to declare and define these
2307 	 * functions which are used to send wmi command to target.
2308 	 */
2309 
2310 	if (!tx_ops) {
2311 		WMA_LOGE("%s: pointer to lmac if tx ops is NULL", __func__);
2312 		return QDF_STATUS_E_INVAL;
2313 	}
2314 
2315 	/* mgmt_txrx component's tx ops */
2316 	tx_ops->mgmt_txrx_tx_ops.mgmt_tx_send = wma_mgmt_unified_cmd_send;
2317 
2318 	return QDF_STATUS_SUCCESS;
2319 }
2320 
2321 /**
2322  * wma_target_if_open() - Attach UMAC modules' interface with wmi layer
2323  * @wma_handle: wma handle
2324  *
2325  * Separate module defines below functions:
2326  * 1. tgt_wmi_<module>_<action> api sends wmi command, assigned to south bound
2327  *    tx_ops function pointers;
2328  * 2. module's south dispatcher handles information from lower layer, assigned
2329  *    to south bound rx_ops function pointers;
2330  * 3. wmi event handler deals with wmi event, extracts umac needed information,
2331  *    and call rx_ops(module's dispatcher). It executes in tasklet context and
2332  *    is up to dispatcher to decide the context to reside in tasklet or in
2333  *    thread context.
2334  *
2335  * Return: None
2336  */
2337 static void wma_target_if_open(tp_wma_handle wma_handle)
2338 {
2339 	struct wlan_objmgr_psoc *psoc = wma_handle->psoc;
2340 
2341 	if (!psoc)
2342 		return;
2343 
2344 	wlan_global_lmac_if_set_txops_registration_cb(WLAN_DEV_OL,
2345 					target_if_register_tx_ops);
2346 	wlan_lmac_if_set_umac_txops_registration_cb(
2347 		wma_register_tx_ops_handler);
2348 	wlan_global_lmac_if_open(psoc);
2349 
2350 }
2351 
2352 /**
2353  * wma_target_if_close() - Detach UMAC modules' interface with wmi layer
2354  * @wma_handle: wma handle
2355  *
2356  * Return: None
2357  */
2358 static void wma_target_if_close(tp_wma_handle wma_handle)
2359 {
2360 	struct wlan_objmgr_psoc *psoc = wma_handle->psoc;
2361 
2362 	if (!psoc)
2363 		return;
2364 
2365 	wlan_global_lmac_if_close(psoc);
2366 }
2367 
2368 /**
2369  * wma_get_psoc_from_scn_handle() - API to get psoc from scn handle
2370  * @scn_handle: opaque wma handle
2371  *
2372  * API to get psoc from scn handle
2373  *
2374  * Return: None
2375  */
2376 static struct wlan_objmgr_psoc *wma_get_psoc_from_scn_handle(void *scn_handle)
2377 {
2378 	tp_wma_handle wma_handle;
2379 
2380 	if (!scn_handle) {
2381 		WMA_LOGE("invalid scn handle");
2382 		return NULL;
2383 	}
2384 	wma_handle = (tp_wma_handle)scn_handle;
2385 
2386 	return wma_handle->psoc;
2387 }
2388 
2389 /**
2390  * wma_get_pdev_from_scn_handle() - API to get pdev from scn handle
2391  * @scn_handle: opaque wma handle
2392  *
2393  * API to get pdev from scn handle
2394  *
2395  * Return: None
2396  */
2397 static struct wlan_objmgr_pdev *wma_get_pdev_from_scn_handle(void *scn_handle)
2398 {
2399 	tp_wma_handle wma_handle;
2400 
2401 	if (!scn_handle) {
2402 		WMA_LOGE("invalid scn handle");
2403 		return NULL;
2404 	}
2405 	wma_handle = (tp_wma_handle)scn_handle;
2406 
2407 	return wma_handle->pdev;
2408 }
2409 
2410 /**
2411  * wma_legacy_service_ready_event_handler() - legacy (ext)service ready handler
2412  * @event_id: event_id
2413  * @handle: wma handle
2414  * @event_data: event data
2415  * @length: event length
2416  *
2417  * Return: 0 for success, negative error code for failure
2418  */
2419 static int wma_legacy_service_ready_event_handler(uint32_t event_id,
2420 						  void *handle,
2421 						  uint8_t *event_data,
2422 						  uint32_t length)
2423 {
2424 	switch (event_id) {
2425 	case wmi_service_ready_event_id:
2426 		return wma_rx_service_ready_event(handle, event_data, length);
2427 	case wmi_service_ready_ext_event_id:
2428 		return wma_rx_service_ready_ext_event(handle, event_data,
2429 						      length);
2430 	case wmi_ready_event_id:
2431 		return wma_rx_ready_event(handle, event_data, length);
2432 	default:
2433 		WMA_LOGE("Legacy callback invoked with invalid event_id:%d",
2434 			 event_id);
2435 		QDF_BUG(0);
2436 	}
2437 
2438 	return 0;
2439 }
2440 
2441 /**
2442  * wma_flush_complete_evt_handler() - FW log flush complete event handler
2443  * @handle: WMI handle
2444  * @event:  Event recevied from FW
2445  * @len:    Length of the event
2446  *
2447  */
2448 static int wma_flush_complete_evt_handler(void *handle,
2449 		u_int8_t *event,
2450 		u_int32_t len)
2451 {
2452 	QDF_STATUS status;
2453 	tp_wma_handle wma = (tp_wma_handle) handle;
2454 
2455 	WMI_DEBUG_MESG_FLUSH_COMPLETE_EVENTID_param_tlvs *param_buf;
2456 	wmi_debug_mesg_flush_complete_fixed_param *wmi_event;
2457 	wmi_debug_mesg_fw_data_stall_param *data_stall_event;
2458 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
2459 	uint8_t *buf_ptr;
2460 	uint32_t reason_code;
2461 
2462 	param_buf = (WMI_DEBUG_MESG_FLUSH_COMPLETE_EVENTID_param_tlvs *) event;
2463 	if (!param_buf) {
2464 		WMA_LOGE("Invalid log flush complete event buffer");
2465 		return QDF_STATUS_E_FAILURE;
2466 	}
2467 
2468 	wmi_event = param_buf->fixed_param;
2469 	reason_code = wmi_event->reserved0;
2470 	WMA_LOGD("Received reason code %d from FW", reason_code);
2471 
2472 	buf_ptr = (uint8_t *)wmi_event;
2473 	buf_ptr = buf_ptr + sizeof(wmi_debug_mesg_flush_complete_fixed_param) +
2474 		  WMI_TLV_HDR_SIZE;
2475 	data_stall_event = (wmi_debug_mesg_fw_data_stall_param *) buf_ptr;
2476 
2477 	if (((data_stall_event->tlv_header & 0xFFFF0000) >> 16 ==
2478 	      WMITLV_TAG_STRUC_wmi_debug_mesg_fw_data_stall_param)) {
2479 		/**
2480 		 * Log data stall info received from FW:
2481 		 *
2482 		 * Possible data stall recovery types:
2483 		 * WLAN_DBG_DATA_STALL_RECOVERY_CONNECT_DISCONNECT
2484 		 * WLAN_DBG_DATA_STALL_RECOVERY_CONNECT_MAC_PHY_RESET
2485 		 * WLAN_DBG_DATA_STALL_RECOVERY_CONNECT_PDR
2486 		 *
2487 		 * Possible data stall event types:
2488 		 * WLAN_DBG_DATA_STALL_VDEV_PAUSE
2489 		 * WLAN_DBG_DATA_STALL_HWSCHED_CMD_FILTER
2490 		 * WLAN_DBG_DATA_STALL_HWSCHED_CMD_FLUSH
2491 		 * WLAN_DBG_DATA_STALL_RX_REFILL_FAILED
2492 		 * WLAN_DBG_DATA_STALL_RX_FCS_LEN_ERROR
2493 		 *
2494 		 * reason_code1:
2495 		 * The information stored in reason_code1 varies based on the
2496 		 * data stall type values:
2497 		 *
2498 		 * data_stall_type      | reason_code1
2499 		 * -----------------------------------------------------
2500 		 * HWSCHED_CMD_FLUSH    | flush req reason (0-40)
2501 		 * RX_REFILL_FAILED     | ring_id (0-7)
2502 		 * RX_FCS_LEN_ERROR     | exact error type
2503 		 *
2504 		 * reasone_code2:
2505 		 * on which tid/hwq stall happened
2506 		 *
2507 		 */
2508 		QDF_TRACE(QDF_MODULE_ID_WMA, QDF_TRACE_LEVEL_DEBUG,
2509 			  "Data Stall event:");
2510 		QDF_TRACE(QDF_MODULE_ID_WMA, QDF_TRACE_LEVEL_DEBUG,
2511 			  "data_stall_type: %x vdev_id_bitmap: %x reason_code1: %x reason_code2: %x recovery_type: %x ",
2512 			  data_stall_event->data_stall_type,
2513 			  data_stall_event->vdev_id_bitmap,
2514 			  data_stall_event->reason_code1,
2515 			  data_stall_event->reason_code2,
2516 			  data_stall_event->recovery_type);
2517 
2518 		cdp_post_data_stall_event(soc,
2519 					DATA_STALL_LOG_INDICATOR_FIRMWARE,
2520 					data_stall_event->data_stall_type,
2521 					0XFF,
2522 					data_stall_event->vdev_id_bitmap,
2523 					data_stall_event->recovery_type);
2524 	}
2525 
2526 	/*
2527 	 * reason_code = 0; Flush event in response to flush command
2528 	 * reason_code = other value; Asynchronous flush event for fatal events
2529 	 */
2530 	if (!reason_code && (cds_is_log_report_in_progress() == false)) {
2531 		WMA_LOGD("Received WMI flush event without sending CMD");
2532 		return -EINVAL;
2533 	} else if (!reason_code && cds_is_log_report_in_progress() == true) {
2534 		/* Flush event in response to flush command */
2535 		WMA_LOGD("Received WMI flush event in response to flush CMD");
2536 		status = qdf_mc_timer_stop(&wma->log_completion_timer);
2537 		if (status != QDF_STATUS_SUCCESS)
2538 			WMA_LOGE("Failed to stop the log completion timeout");
2539 		cds_logging_set_fw_flush_complete();
2540 		return QDF_STATUS_SUCCESS;
2541 	} else if (reason_code && cds_is_log_report_in_progress() == false) {
2542 		/* Asynchronous flush event for fatal events */
2543 		status = cds_set_log_completion(WLAN_LOG_TYPE_FATAL,
2544 				WLAN_LOG_INDICATOR_FIRMWARE,
2545 				reason_code, false);
2546 		if (QDF_STATUS_SUCCESS != status) {
2547 			WMA_LOGE("%s: Failed to set log trigger params",
2548 					__func__);
2549 			return QDF_STATUS_E_FAILURE;
2550 		}
2551 		cds_logging_set_fw_flush_complete();
2552 		return status;
2553 	} else {
2554 		/* Asynchronous flush event for fatal event,
2555 		 * but, report in progress already
2556 		 */
2557 		WMA_LOGD("%s: Bug report already in progress - dropping! type:%d, indicator=%d reason_code=%d",
2558 				__func__, WLAN_LOG_TYPE_FATAL,
2559 				WLAN_LOG_INDICATOR_FIRMWARE, reason_code);
2560 		return QDF_STATUS_E_FAILURE;
2561 	}
2562 	/* Asynchronous flush event for fatal event,
2563 	 * but, report in progress already
2564 	 */
2565 	WMA_LOGW("%s: Bug report already in progress - dropping! type:%d, indicator=%d reason_code=%d",
2566 			__func__, WLAN_LOG_TYPE_FATAL,
2567 			WLAN_LOG_INDICATOR_FIRMWARE, reason_code);
2568 	return QDF_STATUS_E_FAILURE;
2569 }
2570 
2571 #ifdef WLAN_CONV_SPECTRAL_ENABLE
2572 /**
2573  * wma_extract_single_phyerr_spectral() - extract single phy error from event
2574  * @handle: wma handle
2575  * @param evt_buf: pointer to event buffer
2576  * @param datalen: data length of event buffer
2577  * @param buf_offset: Pointer to hold value of current event buffer offset
2578  * post extraction
2579  * @param phyerr: Pointer to hold phyerr
2580  *
2581  * Return: QDF_STATUS
2582  */
2583 static QDF_STATUS wma_extract_single_phyerr_spectral(void *handle,
2584 		void *evt_buf,
2585 		uint16_t datalen, uint16_t *buf_offset,
2586 		wmi_host_phyerr_t *phyerr)
2587 {
2588 	wmi_single_phyerr_rx_event *ev;
2589 	int n = *buf_offset;
2590 
2591 	ev = (wmi_single_phyerr_rx_event *)((uint8_t *)evt_buf + n);
2592 
2593 	if (n < datalen) {
2594 		/* ensure there's at least space for the header */
2595 		if ((datalen - n) < sizeof(ev->hdr)) {
2596 			WMA_LOGE("%s: not enough space? (datalen=%d, n=%d, hdr=%zu bytes",
2597 					__func__, datalen, n, sizeof(ev->hdr));
2598 			return QDF_STATUS_E_FAILURE;
2599 		}
2600 
2601 		phyerr->bufp = ev->bufp;
2602 		phyerr->buf_len = ev->hdr.buf_len;
2603 
2604 		/*
2605 		 * Sanity check the buffer length of the event against
2606 		 * what we currently have.
2607 		 *
2608 		 * Since buf_len is 32 bits, we check if it overflows
2609 		 * a large 32 bit value.  It's not 0x7fffffff because
2610 		 * we increase n by (buf_len + sizeof(hdr)), which would
2611 		 * in itself cause n to overflow.
2612 		 *
2613 		 * If "int" is 64 bits then this becomes a moot point.
2614 		 */
2615 		if (ev->hdr.buf_len > 0x7f000000) {
2616 			WMA_LOGE("%s: buf_len is garbage? (0x%x)",
2617 				__func__, ev->hdr.buf_len);
2618 			return QDF_STATUS_E_FAILURE;
2619 		}
2620 		if (n + ev->hdr.buf_len > datalen) {
2621 			WMA_LOGE("%s: buf_len exceeds available space n=%d, buf_len=%d, datalen=%d",
2622 				__func__, n, ev->hdr.buf_len, datalen);
2623 			return QDF_STATUS_E_FAILURE;
2624 		}
2625 
2626 		phyerr->phy_err_code = WMI_UNIFIED_PHYERRCODE_GET(&ev->hdr);
2627 		phyerr->tsf_timestamp = ev->hdr.tsf_timestamp;
2628 
2629 #ifdef DEBUG_SPECTRAL_SCAN
2630 		WMA_LOGD("%s: len=%d, tsf=0x%08x, rssi = 0x%x/0x%x/0x%x/0x%x, comb rssi = 0x%x, phycode=%d",
2631 				__func__,
2632 				ev->hdr.buf_len,
2633 				ev->hdr.tsf_timestamp,
2634 				ev->hdr.rssi_chain0,
2635 				ev->hdr.rssi_chain1,
2636 				ev->hdr.rssi_chain2,
2637 				ev->hdr.rssi_chain3,
2638 				WMI_UNIFIED_RSSI_COMB_GET(&ev->hdr),
2639 					  phyerr->phy_err_code);
2640 
2641 		/*
2642 		 * For now, unroll this loop - the chain 'value' field isn't
2643 		 * a variable but glued together into a macro field definition.
2644 		 * Grr. :-)
2645 		 */
2646 		WMA_LOGD("%s: chain 0: raw=0x%08x; pri20=%d sec20=%d sec40=%d sec80=%d",
2647 				__func__,
2648 				ev->hdr.rssi_chain0,
2649 				WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, PRI20),
2650 				WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, SEC20),
2651 				WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, SEC40),
2652 				WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, SEC80));
2653 
2654 		WMA_LOGD("%s: chain 1: raw=0x%08x: pri20=%d sec20=%d sec40=%d sec80=%d",
2655 				__func__,
2656 				ev->hdr.rssi_chain1,
2657 				WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, PRI20),
2658 				WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, SEC20),
2659 				WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, SEC40),
2660 				WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, SEC80));
2661 
2662 		WMA_LOGD("%s: chain 2: raw=0x%08x: pri20=%d sec20=%d sec40=%d sec80=%d",
2663 				__func__,
2664 				ev->hdr.rssi_chain2,
2665 				WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, PRI20),
2666 				WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, SEC20),
2667 				WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, SEC40),
2668 				WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, SEC80));
2669 
2670 		WMA_LOGD("%s: chain 3: raw=0x%08x: pri20=%d sec20=%d sec40=%d sec80=%d",
2671 				__func__,
2672 				ev->hdr.rssi_chain3,
2673 				WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, PRI20),
2674 				WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, SEC20),
2675 				WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, SEC40),
2676 				WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, SEC80));
2677 
2678 
2679 		WMA_LOGD("%s: freq_info_1=0x%08x, freq_info_2=0x%08x",
2680 			   __func__, ev->hdr.freq_info_1, ev->hdr.freq_info_2);
2681 
2682 		/*
2683 		 * The NF chain values are signed and are negative - hence
2684 		 * the cast evilness.
2685 		 */
2686 		WMA_LOGD("%s: nfval[1]=0x%08x, nfval[2]=0x%08x, nf=%d/%d/%d/%d, freq1=%d, freq2=%d, cw=%d",
2687 				__func__,
2688 				ev->hdr.nf_list_1,
2689 				ev->hdr.nf_list_2,
2690 				(int) WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 0),
2691 				(int) WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 1),
2692 				(int) WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 2),
2693 				(int) WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 3),
2694 				WMI_UNIFIED_FREQ_INFO_GET(&ev->hdr, 1),
2695 				WMI_UNIFIED_FREQ_INFO_GET(&ev->hdr, 2),
2696 				WMI_UNIFIED_CHWIDTH_GET(&ev->hdr));
2697 #endif
2698 
2699 		/*
2700 		 * If required, pass spectral events to the spectral module
2701 		 */
2702 		if (ev->hdr.buf_len > 0) {
2703 
2704 			/* Initialize the NF values to Zero. */
2705 			phyerr->rf_info.noise_floor[0] =
2706 			    WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 0);
2707 			phyerr->rf_info.noise_floor[1] =
2708 			    WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 1);
2709 			phyerr->rf_info.noise_floor[2] =
2710 			    WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 2);
2711 			phyerr->rf_info.noise_floor[3] =
2712 			    WMI_UNIFIED_NF_CHAIN_GET(&ev->hdr, 3);
2713 
2714 			/* populate the rf info */
2715 			phyerr->rf_info.rssi_comb =
2716 			    WMI_UNIFIED_RSSI_COMB_GET(&ev->hdr);
2717 
2718 			/* Need to unroll loop due to macro
2719 			 * constraints chain 0
2720 			 */
2721 			phyerr->rf_info.pc_rssi_info[0].rssi_pri20 =
2722 			WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, PRI20);
2723 			phyerr->rf_info.pc_rssi_info[0].rssi_sec20 =
2724 			WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, SEC20);
2725 			phyerr->rf_info.pc_rssi_info[0].rssi_sec40 =
2726 			WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, SEC40);
2727 			phyerr->rf_info.pc_rssi_info[0].rssi_sec80 =
2728 			WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 0, SEC80);
2729 
2730 			/* chain 1 */
2731 			phyerr->rf_info.pc_rssi_info[1].rssi_pri20 =
2732 			WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, PRI20);
2733 			phyerr->rf_info.pc_rssi_info[1].rssi_sec20 =
2734 			WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, SEC20);
2735 			phyerr->rf_info.pc_rssi_info[1].rssi_sec40 =
2736 			WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, SEC40);
2737 			phyerr->rf_info.pc_rssi_info[1].rssi_sec80 =
2738 			WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 1, SEC80);
2739 
2740 			/* chain 2 */
2741 			phyerr->rf_info.pc_rssi_info[2].rssi_pri20 =
2742 			WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, PRI20);
2743 			phyerr->rf_info.pc_rssi_info[2].rssi_sec20 =
2744 			WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, SEC20);
2745 			phyerr->rf_info.pc_rssi_info[2].rssi_sec40 =
2746 			WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, SEC40);
2747 			phyerr->rf_info.pc_rssi_info[2].rssi_sec80 =
2748 			WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 2, SEC80);
2749 
2750 			/* chain 3 */
2751 			phyerr->rf_info.pc_rssi_info[3].rssi_pri20 =
2752 			WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, PRI20);
2753 			phyerr->rf_info.pc_rssi_info[3].rssi_sec20 =
2754 			WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, SEC20);
2755 			phyerr->rf_info.pc_rssi_info[3].rssi_sec40 =
2756 			WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, SEC40);
2757 			phyerr->rf_info.pc_rssi_info[3].rssi_sec80 =
2758 			WMI_UNIFIED_RSSI_CHAN_GET(&ev->hdr, 3, SEC80);
2759 
2760 			phyerr->chan_info.center_freq1 =
2761 			    WMI_UNIFIED_FREQ_INFO_GET(&ev->hdr, 1);
2762 			phyerr->chan_info.center_freq2 =
2763 			    WMI_UNIFIED_FREQ_INFO_GET(&ev->hdr, 2);
2764 
2765 		}
2766 
2767 		/*
2768 		 * Advance the buffer pointer to the next PHY error.
2769 		 * buflen is the length of this payload, so we need to
2770 		 * advance past the current header _AND_ the payload.
2771 		 */
2772 		 n += sizeof(*ev) + ev->hdr.buf_len;
2773 	}
2774 	*buf_offset += n;
2775 
2776 	return QDF_STATUS_SUCCESS;
2777 }
2778 
2779 /**
2780  * spectral_phyerr_event_handler() - spectral phyerr event handler
2781  * @handle: wma handle
2782  * @data: data buffer
2783  * @datalen: buffer length
2784  *
2785  * Return:  QDF_STATUS
2786  */
2787 static QDF_STATUS spectral_phyerr_event_handler(void *handle,
2788 						uint8_t *data,
2789 						uint32_t datalen)
2790 {
2791 	tp_wma_handle wma = (tp_wma_handle) handle;
2792 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2793 	uint16_t buf_offset, event_buf_len = 0;
2794 	wmi_single_phyerr_rx_event *ev;
2795 	wmi_host_phyerr_t phyerr;
2796 	struct target_if_spectral_rfqual_info rfqual_info;
2797 	struct target_if_spectral_chan_info chan_info;
2798 	struct target_if_spectral_acs_stats acs_stats;
2799 
2800 	if (NULL == wma) {
2801 		WMA_LOGE("%s:wma handle is NULL", __func__);
2802 		return QDF_STATUS_E_FAILURE;
2803 	}
2804 
2805 	memset(&phyerr, 0, sizeof(wmi_host_phyerr_t));
2806 	status = wmi_extract_comb_phyerr(wma->wmi_handle, data, datalen,
2807 					 &buf_offset, &phyerr);
2808 	if (QDF_IS_STATUS_ERROR(status)) {
2809 		WMA_LOGE("%s: extract comb phyerr failed", __func__);
2810 		return QDF_STATUS_E_FAILURE;
2811 	}
2812 
2813 	ev = (wmi_single_phyerr_rx_event *)phyerr.bufp;
2814 	event_buf_len = phyerr.buf_len;
2815 	/* Loop over the bufp, extracting out phyerrors */
2816 	buf_offset = 0;
2817 	while (buf_offset < event_buf_len) {
2818 		if (wma_extract_single_phyerr_spectral(handle, ev,
2819 			event_buf_len, &buf_offset, &phyerr)) {
2820 			WMA_LOGE("%s: extract single phy err failed", __func__);
2821 			return QDF_STATUS_E_FAILURE;
2822 		}
2823 
2824 		if (phyerr.buf_len > 0) {
2825 			if (sizeof(phyerr.rf_info) > sizeof(rfqual_info))
2826 				qdf_mem_copy(&rfqual_info, &phyerr.rf_info,
2827 						sizeof(rfqual_info));
2828 			else
2829 				qdf_mem_copy(&rfqual_info, &phyerr.rf_info,
2830 						sizeof(phyerr.rf_info));
2831 
2832 			if (sizeof(phyerr.chan_info) > sizeof(chan_info))
2833 				qdf_mem_copy(&chan_info, &phyerr.chan_info,
2834 						sizeof(chan_info));
2835 			else
2836 				qdf_mem_copy(&chan_info, &phyerr.chan_info,
2837 						sizeof(phyerr.chan_info));
2838 
2839 			target_if_spectral_process_phyerr(wma->pdev, phyerr.bufp,
2840 							phyerr.buf_len,
2841 							&rfqual_info,
2842 							&chan_info,
2843 							phyerr.tsf64,
2844 							&acs_stats);
2845 		}
2846 	}
2847 
2848 	return status;
2849 }
2850 #else
2851 static QDF_STATUS spectral_phyerr_event_handler(void *handle,
2852 					uint8_t *data, uint32_t datalen)
2853 {
2854 	return QDF_STATUS_SUCCESS;
2855 }
2856 #endif
2857 
2858 /**
2859  * dfs_phyerr_event_handler() - dfs phyerr event handler
2860  * @handle: wma handle
2861  * @data: data buffer
2862  * @datalen: buffer length
2863  * @fulltsf: 64 bit event TSF
2864  *
2865  * Function to process DFS phy errors.
2866  *
2867  * Return: QDF_STATUS
2868  */
2869 static QDF_STATUS dfs_phyerr_event_handler(tp_wma_handle handle,
2870 					   uint8_t *data,
2871 					   uint32_t datalen,
2872 					   uint64_t fulltsf)
2873 {
2874 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2875 	struct wlan_lmac_if_dfs_rx_ops *dfs_rx_ops;
2876 	wmi_host_phyerr_t phyerr;
2877 	int8_t rssi_comb;
2878 	uint16_t buf_offset;
2879 
2880 	if (!handle->psoc) {
2881 		WMA_LOGE("%s: psoc is null", __func__);
2882 		return QDF_STATUS_E_INVAL;
2883 	}
2884 
2885 	dfs_rx_ops = wlan_lmac_if_get_dfs_rx_ops(handle->psoc);
2886 	if (!dfs_rx_ops) {
2887 		WMA_LOGE("%s: dfs_rx_ops is null", __func__);
2888 		return QDF_STATUS_E_INVAL;
2889 	}
2890 
2891 	if (!dfs_rx_ops->dfs_process_phyerr) {
2892 		WMA_LOGE("%s: dfs_process_phyerr handler is null", __func__);
2893 		return QDF_STATUS_E_INVAL;
2894 	}
2895 
2896 	if (!handle->pdev) {
2897 		WMA_LOGE("%s: pdev is null", __func__);
2898 		return -EINVAL;
2899 	}
2900 
2901 	buf_offset = 0;
2902 	while (buf_offset < datalen) {
2903 		status = wmi_extract_single_phyerr(handle->wmi_handle, data, datalen,
2904 						   &buf_offset, &phyerr);
2905 		if (QDF_IS_STATUS_ERROR(status)) {
2906 			/* wmi_extract_single_phyerr has logs */
2907 			return status;
2908 		}
2909 
2910 		rssi_comb = phyerr.rf_info.rssi_comb & 0xFF;
2911 		if (phyerr.buf_len > 0)
2912 			dfs_rx_ops->dfs_process_phyerr(handle->pdev,
2913 						       &phyerr.bufp[0],
2914 						       phyerr.buf_len,
2915 						       rssi_comb,
2916 						       rssi_comb,
2917 						       phyerr.tsf_timestamp,
2918 						       fulltsf);
2919 	}
2920 
2921 	return QDF_STATUS_SUCCESS;
2922 }
2923 
2924 /**
2925  * wma_unified_phyerr_rx_event_handler() - phyerr event handler
2926  * @handle: wma handle
2927  * @data: data buffer
2928  * @datalen: buffer length
2929  *
2930  * WMI Handler for WMI_PHYERR_EVENTID event from firmware.
2931  * This handler is currently handling DFS and spectral scan
2932  * phy errors.
2933  *
2934  * Return: 0 for success, other value for failure
2935  */
2936 static int wma_unified_phyerr_rx_event_handler(void *handle,
2937 					       uint8_t *data,
2938 					       uint32_t datalen)
2939 {
2940 	/* phyerr handling is moved to cmn project
2941 	 * As WIN still uses handler registration in non-cmn code.
2942 	 * need complete testing of non offloaded DFS code before we enable
2943 	 * it in cmn code.
2944 	 **/
2945 	tp_wma_handle wma = (tp_wma_handle) handle;
2946 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2947 	wmi_host_phyerr_t phyerr;
2948 	uint16_t buf_offset = 0;
2949 
2950 	if (!wma) {
2951 		WMA_LOGE("%s: wma handle is null", __func__);
2952 		return -EINVAL;
2953 	}
2954 
2955 	/* sanity check on data length */
2956 	status = wmi_extract_comb_phyerr(wma->wmi_handle, data, datalen,
2957 					 &buf_offset, &phyerr);
2958 	if (QDF_IS_STATUS_ERROR(status)) {
2959 		WMA_LOGE("%s: extract phyerr failed: %d", __func__, status);
2960 		return qdf_status_to_os_return(status);
2961 	}
2962 
2963 	/* handle different PHY Error conditions */
2964 	if (((phyerr.phy_err_mask0 & (WMI_PHY_ERROR_MASK0_RADAR |
2965 	    WMI_PHY_ERROR_MASK0_FALSE_RADAR_EXT |
2966 	    WMI_PHY_ERROR_MASK0_SPECTRAL_SCAN)) == 0)) {
2967 		WMA_LOGD("%s: Unknown phy error event", __func__);
2968 		return -EINVAL;
2969 	}
2970 
2971 	/* Handle Spectral or DFS PHY Error */
2972 	if (phyerr.phy_err_mask0 & (WMI_PHY_ERROR_MASK0_RADAR |
2973 	    WMI_PHY_ERROR_MASK0_FALSE_RADAR_EXT)) {
2974 		if (wma->is_dfs_offloaded) {
2975 			WMA_LOGD("%s: Unexpected phy error, dfs offloaded",
2976 				 __func__);
2977 			return -EINVAL;
2978 		}
2979 		status = dfs_phyerr_event_handler(wma,
2980 						  phyerr.bufp,
2981 						  phyerr.buf_len,
2982 						  phyerr.tsf64);
2983 	} else if (phyerr.phy_err_mask0 & (WMI_PHY_ERROR_MASK0_SPECTRAL_SCAN |
2984 		   WMI_PHY_ERROR_MASK0_FALSE_RADAR_EXT)) {
2985 		status = spectral_phyerr_event_handler(wma, data, datalen);
2986 	}
2987 
2988 	return qdf_status_to_os_return(status);
2989 }
2990 
2991 void wma_vdev_init(struct wma_txrx_node *vdev)
2992 {
2993 	qdf_wake_lock_create(&vdev->vdev_start_wakelock, "vdev_start");
2994 	qdf_wake_lock_create(&vdev->vdev_stop_wakelock, "vdev_stop");
2995 	qdf_wake_lock_create(&vdev->vdev_set_key_wakelock, "vdev_set_key");
2996 	vdev->is_waiting_for_key = false;
2997 }
2998 
2999 void wma_vdev_deinit(struct wma_txrx_node *vdev)
3000 {
3001 	qdf_wake_lock_destroy(&vdev->vdev_start_wakelock);
3002 	qdf_wake_lock_destroy(&vdev->vdev_stop_wakelock);
3003 	qdf_wake_lock_destroy(&vdev->vdev_set_key_wakelock);
3004 	vdev->is_waiting_for_key = false;
3005 }
3006 
3007 /**
3008  * wma_wmi_stop() - generic function to block WMI commands
3009  * @return: None
3010  */
3011 void wma_wmi_stop(void)
3012 {
3013 	tp_wma_handle wma_handle;
3014 
3015 	wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
3016 	if (wma_handle == NULL) {
3017 		QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
3018 			  "wma_handle is NULL\n");
3019 		return;
3020 	}
3021 	wmi_stop(wma_handle->wmi_handle);
3022 }
3023 
3024 #ifdef QCA_SUPPORT_CP_STATS
3025 static void wma_register_stats_events(wmi_unified_t wmi_handle) {}
3026 #else
3027 static void wma_register_stats_events(wmi_unified_t wmi_handle)
3028 {
3029 	wmi_unified_register_event_handler(wmi_handle,
3030 					   wmi_update_stats_event_id,
3031 					   wma_stats_event_handler,
3032 					   WMA_RX_SERIALIZER_CTX);
3033 }
3034 #endif
3035 
3036 /**
3037  * wma_open() - Allocate wma context and initialize it.
3038  * @cds_context:  cds context
3039  * @wma_tgt_cfg_cb: tgt config callback fun
3040  * @radar_ind_cb: dfs radar indication callback
3041  * @cds_cfg:  mac parameters
3042  *
3043  * Return: 0 on success, errno on failure
3044  */
3045 QDF_STATUS wma_open(struct wlan_objmgr_psoc *psoc,
3046 		    wma_tgt_cfg_cb tgt_cfg_cb,
3047 		    struct cds_config_info *cds_cfg,
3048 		    uint32_t target_type)
3049 {
3050 	tp_wma_handle wma_handle;
3051 	HTC_HANDLE htc_handle;
3052 	qdf_device_t qdf_dev;
3053 	void *wmi_handle;
3054 	QDF_STATUS qdf_status;
3055 	struct wmi_unified_attach_params *params;
3056 	struct policy_mgr_wma_cbacks wma_cbacks;
3057 	struct target_psoc_info *tgt_psoc_info;
3058 	int i;
3059 	void *cds_context;
3060 	target_resource_config *wlan_res_cfg;
3061 
3062 	WMA_LOGD("%s: Enter", __func__);
3063 
3064 	cds_context = cds_get_global_context();
3065 	if (!cds_context) {
3066 		WMA_LOGE("%s: Invalid CDS context", __func__);
3067 		return QDF_STATUS_E_INVAL;
3068 	}
3069 
3070 	g_wmi_version_info.major = __WMI_VER_MAJOR_;
3071 	g_wmi_version_info.minor = __WMI_VER_MINOR_;
3072 	g_wmi_version_info.revision = __WMI_REVISION_;
3073 
3074 	qdf_dev = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
3075 	htc_handle = cds_get_context(QDF_MODULE_ID_HTC);
3076 
3077 	if (!htc_handle) {
3078 		WMA_LOGE("%s: Invalid HTC handle", __func__);
3079 		return QDF_STATUS_E_INVAL;
3080 	}
3081 
3082 	/* Alloc memory for WMA Context */
3083 	qdf_status = cds_alloc_context(QDF_MODULE_ID_WMA,
3084 				       (void **)&wma_handle,
3085 				       sizeof(*wma_handle));
3086 
3087 	if (qdf_status != QDF_STATUS_SUCCESS) {
3088 		WMA_LOGE("%s: Memory allocation failed for wma_handle",
3089 			 __func__);
3090 		return qdf_status;
3091 	}
3092 
3093 	qdf_mem_zero(wma_handle, sizeof(t_wma_handle));
3094 
3095 	if (target_if_alloc_psoc_tgt_info(psoc)) {
3096 		WMA_LOGE("%s target psoc info allocation failed", __func__);
3097 		qdf_status = QDF_STATUS_E_NOMEM;
3098 		goto err_free_wma_handle;
3099 	}
3100 
3101 	if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE) {
3102 #ifdef FEATURE_WLAN_EXTSCAN
3103 		qdf_wake_lock_create(&wma_handle->extscan_wake_lock,
3104 					"wlan_extscan_wl");
3105 #endif /* FEATURE_WLAN_EXTSCAN */
3106 		qdf_wake_lock_create(&wma_handle->wow_wake_lock,
3107 			"wlan_wow_wl");
3108 		qdf_wake_lock_create(&wma_handle->wow_auth_req_wl,
3109 			"wlan_auth_req_wl");
3110 		qdf_wake_lock_create(&wma_handle->wow_assoc_req_wl,
3111 			"wlan_assoc_req_wl");
3112 		qdf_wake_lock_create(&wma_handle->wow_deauth_rec_wl,
3113 			"wlan_deauth_rec_wl");
3114 		qdf_wake_lock_create(&wma_handle->wow_disassoc_rec_wl,
3115 			"wlan_disassoc_rec_wl");
3116 		qdf_wake_lock_create(&wma_handle->wow_ap_assoc_lost_wl,
3117 			"wlan_ap_assoc_lost_wl");
3118 		qdf_wake_lock_create(&wma_handle->wow_auto_shutdown_wl,
3119 			"wlan_auto_shutdown_wl");
3120 		qdf_wake_lock_create(&wma_handle->roam_ho_wl,
3121 			"wlan_roam_ho_wl");
3122 	}
3123 
3124 	qdf_status = wlan_objmgr_psoc_try_get_ref(psoc, WLAN_LEGACY_WMA_ID);
3125 	if (QDF_IS_STATUS_ERROR(qdf_status)) {
3126 		WMA_LOGE("%s: PSOC get_ref fails", __func__);
3127 		goto err_get_psoc_ref;
3128 	}
3129 	wma_handle->psoc = psoc;
3130 
3131 	/* Open target_if layer and register wma callback */
3132 	wma_target_if_open(wma_handle);
3133 	target_if_open(wma_get_psoc_from_scn_handle);
3134 
3135 	/*
3136 	 * Allocate locally used params with its rx_ops member,
3137 	 * and free it immediately after used.
3138 	 */
3139 	params = qdf_mem_malloc(sizeof(*params) + sizeof(struct wmi_rx_ops));
3140 	if (!params) {
3141 		WMA_LOGE("%s: failed to allocate attach params", __func__);
3142 		qdf_status = QDF_STATUS_E_NOMEM;
3143 		goto err_wma_handle;
3144 	}
3145 
3146 	params->rx_ops = (struct wmi_rx_ops *)(params + 1);
3147 	params->osdev = NULL;
3148 	params->target_type = WMI_TLV_TARGET;
3149 	params->use_cookie = false;
3150 	params->psoc = psoc;
3151 	params->max_commands = WMI_MAX_CMDS;
3152 	/* Attach mc_thread context processing function */
3153 	params->rx_ops->wma_process_fw_event_handler_cbk =
3154 						wma_process_fw_event_handler;
3155 
3156 	/* initialize tlv attach */
3157 	wmi_tlv_init();
3158 
3159 	/* attach the wmi */
3160 	wmi_handle = wmi_unified_attach(wma_handle, params);
3161 	qdf_mem_free(params);
3162 	if (!wmi_handle) {
3163 		WMA_LOGE("%s: failed to attach WMI", __func__);
3164 		qdf_status = QDF_STATUS_E_NOMEM;
3165 		goto err_wma_handle;
3166 	}
3167 
3168 	target_if_register_legacy_service_ready_cb(
3169 					wma_legacy_service_ready_event_handler);
3170 
3171 	WMA_LOGA("WMA --> wmi_unified_attach - success");
3172 
3173 	/* store the wmi handle in tgt_if_handle */
3174 	tgt_psoc_info = wlan_psoc_get_tgt_if_handle(psoc);
3175 
3176 	target_psoc_set_target_type(tgt_psoc_info, target_type);
3177 	/* Save the WMI & HTC handle */
3178 	target_psoc_set_wmi_hdl(tgt_psoc_info, wmi_handle);
3179 	wma_handle->wmi_handle = wmi_handle;
3180 	target_psoc_set_htc_hdl(tgt_psoc_info, htc_handle);
3181 	wma_handle->cds_context = cds_context;
3182 	wma_handle->qdf_dev = qdf_dev;
3183 	wma_handle->max_scan = cds_cfg->max_scan;
3184 
3185 	/* Register Converged Event handlers */
3186 	init_deinit_register_tgt_psoc_ev_handlers(psoc);
3187 
3188 	/* Initialize max_no_of_peers for wma_get_number_of_peers_supported() */
3189 	wma_init_max_no_of_peers(wma_handle, cds_cfg->max_station);
3190 	/* Cap maxStation based on the target version */
3191 	cds_cfg->max_station = wma_get_number_of_peers_supported(wma_handle);
3192 	/* Reinitialize max_no_of_peers based on the capped maxStation value */
3193 	wma_init_max_no_of_peers(wma_handle, cds_cfg->max_station);
3194 
3195 	/* initialize default target config */
3196 	wlan_res_cfg = target_psoc_get_wlan_res_cfg(tgt_psoc_info);
3197 	if (!wlan_res_cfg) {
3198 		WMA_LOGE("%s: wlan_res_cfg is null", __func__);
3199 		qdf_status = QDF_STATUS_E_NOMEM;
3200 		goto err_wma_handle;
3201 	}
3202 
3203 	wma_set_default_tgt_config(wma_handle, wlan_res_cfg);
3204 
3205 	wma_handle->tx_chain_mask_cck = cds_cfg->tx_chain_mask_cck;
3206 	wma_handle->self_gen_frm_pwr = cds_cfg->self_gen_frm_pwr;
3207 	wma_init_max_no_of_peers(wma_handle, cds_cfg->max_station);
3208 	cds_cfg->max_station = wma_get_number_of_peers_supported(wma_handle);
3209 
3210 	cds_cfg->max_bssid = WMA_MAX_SUPPORTED_BSS;
3211 
3212 	wlan_res_cfg->num_keep_alive_pattern = WMA_MAXNUM_PERIODIC_TX_PTRNS;
3213 
3214 	/* The current firmware implementation requires the number of
3215 	 * offload peers should be (number of vdevs + 1).
3216 	 */
3217 	wlan_res_cfg->num_offload_peers =
3218 		cds_cfg->ap_maxoffload_peers + 1;
3219 
3220 	wlan_res_cfg->num_offload_reorder_buffs =
3221 		cds_cfg->ap_maxoffload_reorderbuffs + 1;
3222 
3223 	wma_handle->max_station = cds_cfg->max_station;
3224 	wma_handle->max_bssid = cds_cfg->max_bssid;
3225 	wma_handle->driver_type = cds_cfg->driver_type;
3226 	wma_handle->ssdp = cds_cfg->ssdp;
3227 	wma_handle->enable_mc_list = cds_cfg->enable_mc_list;
3228 	wma_handle->bpf_packet_filter_enable =
3229 		cds_cfg->bpf_packet_filter_enable;
3230 	wma_handle->active_uc_bpf_mode = cds_cfg->active_uc_bpf_mode;
3231 	wma_handle->active_mc_bc_bpf_mode = cds_cfg->active_mc_bc_bpf_mode;
3232 	wma_handle->link_stats_results = NULL;
3233 #ifdef FEATURE_WLAN_RA_FILTERING
3234 	wma_handle->IsRArateLimitEnabled = cds_cfg->is_ra_ratelimit_enabled;
3235 	wma_handle->RArateLimitInterval = cds_cfg->ra_ratelimit_interval;
3236 #endif /* FEATURE_WLAN_RA_FILTERING */
3237 #ifdef WLAN_FEATURE_LPSS
3238 	wma_handle->is_lpass_enabled = cds_cfg->is_lpass_enabled;
3239 #endif
3240 	wma_set_nan_enable(wma_handle, cds_cfg);
3241 	wma_handle->interfaces = qdf_mem_malloc(sizeof(struct wma_txrx_node) *
3242 						wma_handle->max_bssid);
3243 	if (!wma_handle->interfaces) {
3244 		WMA_LOGE("%s: failed to allocate interface table", __func__);
3245 		qdf_status = QDF_STATUS_E_NOMEM;
3246 		goto err_scn_context;
3247 	}
3248 
3249 	for (i = 0; i < wma_handle->max_bssid; ++i) {
3250 		wma_vdev_init(&wma_handle->interfaces[i]);
3251 		wma_handle->interfaces[i].delay_before_vdev_stop =
3252 			cds_cfg->delay_before_vdev_stop;
3253 	}
3254 	/* Register the debug print event handler */
3255 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3256 					wmi_debug_print_event_id,
3257 					wma_unified_debug_print_event_handler,
3258 					WMA_RX_SERIALIZER_CTX);
3259 	/* Register profiling event Handler */
3260 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3261 					wmi_wlan_profile_data_event_id,
3262 					wma_profile_data_report_event_handler,
3263 					WMA_RX_SERIALIZER_CTX);
3264 
3265 	wma_handle->tgt_cfg_update_cb = tgt_cfg_cb;
3266 	wma_handle->old_hw_mode_index = WMA_DEFAULT_HW_MODE_INDEX;
3267 	wma_handle->new_hw_mode_index = WMA_DEFAULT_HW_MODE_INDEX;
3268 	wma_handle->saved_chan.num_channels = 0;
3269 	wma_handle->fw_timeout_crash = cds_cfg->fw_timeout_crash;
3270 
3271 	qdf_status = qdf_mc_timer_init(&wma_handle->service_ready_ext_timer,
3272 					QDF_TIMER_TYPE_SW,
3273 					wma_service_ready_ext_evt_timeout,
3274 					wma_handle);
3275 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
3276 		WMA_LOGE("Failed to initialize service ready ext timeout");
3277 		goto err_event_init;
3278 	}
3279 
3280 	qdf_status = qdf_event_create(&wma_handle->target_suspend);
3281 	if (qdf_status != QDF_STATUS_SUCCESS) {
3282 		WMA_LOGE("%s: target suspend event initialization failed",
3283 			 __func__);
3284 		goto err_event_init;
3285 	}
3286 
3287 	/* Init Tx Frame Complete event */
3288 	qdf_status = qdf_event_create(&wma_handle->tx_frm_download_comp_event);
3289 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
3290 		WMA_LOGE("%s: failed to init tx_frm_download_comp_event",
3291 			 __func__);
3292 		goto err_event_init;
3293 	}
3294 
3295 	/* Init tx queue empty check event */
3296 	qdf_status = qdf_event_create(&wma_handle->tx_queue_empty_event);
3297 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
3298 		WMA_LOGE("%s: failed to init tx_queue_empty_event", __func__);
3299 		goto err_event_init;
3300 	}
3301 
3302 	qdf_status = qdf_event_create(&wma_handle->wma_resume_event);
3303 	if (qdf_status != QDF_STATUS_SUCCESS) {
3304 		WMA_LOGE("%s: wma_resume_event initialization failed",
3305 			 __func__);
3306 		goto err_event_init;
3307 	}
3308 
3309 	qdf_status = cds_shutdown_notifier_register(wma_shutdown_notifier_cb,
3310 						    wma_handle);
3311 	if (qdf_status != QDF_STATUS_SUCCESS) {
3312 		WMA_LOGE("%s: Shutdown notifier register failed: %d",
3313 			 __func__, qdf_status);
3314 		goto err_event_init;
3315 	}
3316 
3317 	qdf_status = qdf_event_create(&wma_handle->runtime_suspend);
3318 	if (qdf_status != QDF_STATUS_SUCCESS) {
3319 		WMA_LOGE("%s: runtime_suspend event initialization failed",
3320 			 __func__);
3321 		goto err_event_init;
3322 	}
3323 
3324 	qdf_status = qdf_event_create(&wma_handle->recovery_event);
3325 	if (qdf_status != QDF_STATUS_SUCCESS) {
3326 		WMA_LOGE("%s: recovery event initialization failed", __func__);
3327 		goto err_event_init;
3328 	}
3329 
3330 	qdf_list_create(&wma_handle->vdev_resp_queue,
3331 		      MAX_ENTRY_VDEV_RESP_QUEUE);
3332 	qdf_spinlock_create(&wma_handle->vdev_respq_lock);
3333 	qdf_list_create(&wma_handle->wma_hold_req_queue,
3334 		      MAX_ENTRY_HOLD_REQ_QUEUE);
3335 	qdf_spinlock_create(&wma_handle->wma_hold_req_q_lock);
3336 	qdf_atomic_init(&wma_handle->is_wow_bus_suspended);
3337 	qdf_atomic_init(&wma_handle->scan_id_counter);
3338 
3339 	/* Register vdev start response event handler */
3340 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3341 					   wmi_vdev_start_resp_event_id,
3342 					   wma_vdev_start_resp_handler,
3343 					   WMA_RX_SERIALIZER_CTX);
3344 
3345 	/* Register vdev stop response event handler */
3346 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3347 					   wmi_vdev_stopped_event_id,
3348 					   wma_vdev_stop_resp_handler,
3349 					   WMA_RX_SERIALIZER_CTX);
3350 
3351 	/* register for STA kickout function */
3352 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3353 					   wmi_peer_sta_kickout_event_id,
3354 					   wma_peer_sta_kickout_event_handler,
3355 					   WMA_RX_SERIALIZER_CTX);
3356 
3357 	/* register for stats event */
3358 	wma_register_stats_events(wma_handle->wmi_handle);
3359 
3360 	/* register for stats response event */
3361 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3362 					   wmi_get_arp_stats_req_id,
3363 					   wma_get_arp_stats_handler,
3364 					   WMA_RX_SERIALIZER_CTX);
3365 
3366 	/* register for peer info response event */
3367 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3368 					   wmi_peer_stats_info_event_id,
3369 					   wma_peer_info_event_handler,
3370 					   WMA_RX_SERIALIZER_CTX);
3371 
3372 #ifdef WLAN_POWER_DEBUGFS
3373 	/* register for Chip Power stats event */
3374 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3375 				wmi_pdev_chip_power_stats_event_id,
3376 				wma_unified_power_debug_stats_event_handler,
3377 				WMA_RX_SERIALIZER_CTX);
3378 #endif
3379 
3380 	/* register for linkspeed response event */
3381 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3382 					   wmi_peer_estimated_linkspeed_event_id,
3383 					   wma_link_speed_event_handler,
3384 					   WMA_RX_SERIALIZER_CTX);
3385 
3386 #ifdef FEATURE_OEM_DATA_SUPPORT
3387 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3388 					   wmi_oem_response_event_id,
3389 					   wma_oem_data_response_handler,
3390 					   WMA_RX_SERIALIZER_CTX);
3391 #endif /* FEATURE_OEM_DATA_SUPPORT */
3392 
3393 	/* Register peer change event handler */
3394 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3395 					   wmi_peer_state_event_id,
3396 					   wma_peer_state_change_event_handler,
3397 					   WMA_RX_WORK_CTX);
3398 
3399 	/* Register beacon tx complete event id. The event is required
3400 	 * for sending channel switch announcement frames
3401 	 */
3402 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3403 					wmi_offload_bcn_tx_status_event_id,
3404 					wma_unified_bcntx_status_event_handler,
3405 					WMA_RX_SERIALIZER_CTX);
3406 
3407 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3408 					   wmi_update_vdev_rate_stats_event_id,
3409 					   wma_link_status_event_handler,
3410 					   WMA_RX_SERIALIZER_CTX);
3411 #ifdef WLAN_FEATURE_LINK_LAYER_STATS
3412 	/* Register event handler for processing Link Layer Stats
3413 	 * response from the FW
3414 	 */
3415 	wma_register_ll_stats_event_handler(wma_handle);
3416 
3417 #endif /* WLAN_FEATURE_LINK_LAYER_STATS */
3418 
3419 	wmi_set_tgt_assert(wma_handle->wmi_handle,
3420 			   cds_cfg->force_target_assert_enabled);
3421 	/* Firmware debug log */
3422 	qdf_status = dbglog_init(wma_handle->wmi_handle);
3423 	if (qdf_status != QDF_STATUS_SUCCESS) {
3424 		WMA_LOGE("%s: Firmware Dbglog initialization failed", __func__);
3425 		goto err_dbglog_init;
3426 	}
3427 
3428 	/*
3429 	 * Update Powersave mode
3430 	 * 1 - Legacy Powersave + Deepsleep Disabled
3431 	 * 2 - QPower + Deepsleep Disabled
3432 	 * 3 - Legacy Powersave + Deepsleep Enabled
3433 	 * 4 - QPower + Deepsleep Enabled
3434 	 */
3435 	wma_handle->powersave_mode = cds_cfg->powersave_offload_enabled;
3436 	wma_handle->staMaxLIModDtim = cds_cfg->sta_maxlimod_dtim;
3437 	wma_handle->staModDtim = cds_cfg->sta_mod_dtim;
3438 	wma_handle->staDynamicDtim = cds_cfg->sta_dynamic_dtim;
3439 
3440 	/*
3441 	 * Value of cds_cfg->wow_enable can be,
3442 	 * 0 - Disable both magic pattern match and pattern byte match.
3443 	 * 1 - Enable magic pattern match on all interfaces.
3444 	 * 2 - Enable pattern byte match on all interfaces.
3445 	 * 3 - Enable both magic patter and pattern byte match on
3446 	 *     all interfaces.
3447 	 */
3448 	wma_handle->wow.magic_ptrn_enable =
3449 		(cds_cfg->wow_enable & 0x01) ? true : false;
3450 	wma_handle->ptrn_match_enable_all_vdev =
3451 		(cds_cfg->wow_enable & 0x02) ? true : false;
3452 
3453 	/* register for install key completion event */
3454 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3455 				wmi_vdev_install_key_complete_event_id,
3456 				wma_vdev_install_key_complete_event_handler,
3457 				WMA_RX_SERIALIZER_CTX);
3458 #ifdef WLAN_FEATURE_NAN
3459 	/* register for nan response event */
3460 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3461 					   wmi_nan_event_id,
3462 					   wma_nan_rsp_event_handler,
3463 					   WMA_RX_SERIALIZER_CTX);
3464 #endif /* WLAN_FEATURE_NAN */
3465 
3466 #ifdef WLAN_FEATURE_STATS_EXT
3467 	/* register for extended stats event */
3468 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3469 					   wmi_stats_ext_event_id,
3470 					   wma_stats_ext_event_handler,
3471 					   WMA_RX_SERIALIZER_CTX);
3472 #endif /* WLAN_FEATURE_STATS_EXT */
3473 #ifdef FEATURE_WLAN_EXTSCAN
3474 	wma_register_extscan_event_handler(wma_handle);
3475 #endif /* WLAN_FEATURE_STATS_EXT */
3476 
3477 	WMA_LOGD("%s: Exit", __func__);
3478 
3479 #ifdef WLAN_FEATURE_ROAM_OFFLOAD
3480 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3481 					   wmi_roam_synch_event_id,
3482 					   wma_roam_synch_event_handler,
3483 					   WMA_RX_SERIALIZER_CTX);
3484 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3485 				   wmi_roam_synch_frame_event_id,
3486 				   wma_roam_synch_frame_event_handler,
3487 				   WMA_RX_SERIALIZER_CTX);
3488 #endif /* WLAN_FEATURE_ROAM_OFFLOAD */
3489 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3490 				wmi_rssi_breach_event_id,
3491 				wma_rssi_breached_event_handler,
3492 				WMA_RX_SERIALIZER_CTX);
3493 
3494 	qdf_wake_lock_create(&wma_handle->wmi_cmd_rsp_wake_lock,
3495 					"wlan_fw_rsp_wakelock");
3496 	qdf_runtime_lock_init(&wma_handle->wmi_cmd_rsp_runtime_lock);
3497 
3498 	/* Register peer assoc conf event handler */
3499 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3500 					   wmi_peer_assoc_conf_event_id,
3501 					   wma_peer_assoc_conf_handler,
3502 					   WMA_RX_SERIALIZER_CTX);
3503 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3504 					   wmi_vdev_delete_resp_event_id,
3505 					   wma_vdev_delete_handler,
3506 					   WMA_RX_SERIALIZER_CTX);
3507 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3508 					   wmi_peer_delete_response_event_id,
3509 					   wma_peer_delete_handler,
3510 					   WMA_RX_SERIALIZER_CTX);
3511 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3512 					   wmi_bpf_capability_info_event_id,
3513 					   wma_get_bpf_caps_event_handler,
3514 					   WMA_RX_SERIALIZER_CTX);
3515 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3516 					   wmi_chan_info_event_id,
3517 					   wma_chan_info_event_handler,
3518 					   WMA_RX_SERIALIZER_CTX);
3519 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3520 				wmi_dbg_mesg_flush_complete_event_id,
3521 				wma_flush_complete_evt_handler,
3522 				WMA_RX_WORK_CTX);
3523 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3524 				wmi_report_rx_aggr_failure_event_id,
3525 				wma_rx_aggr_failure_event_handler,
3526 				WMA_RX_SERIALIZER_CTX);
3527 
3528 	wma_handle->ito_repeat_count = cds_cfg->ito_repeat_count;
3529 	wma_handle->bandcapability = cds_cfg->bandcapability;
3530 
3531 	/* Register PWR_SAVE_FAIL event only in case of recovery(1) */
3532 	if (cds_cfg->auto_power_save_fail_mode ==
3533 	    PMO_FW_TO_SEND_WOW_IND_ON_PWR_FAILURE) {
3534 		wmi_unified_register_event_handler(wma_handle->wmi_handle,
3535 			wmi_pdev_chip_pwr_save_failure_detect_event_id,
3536 			wma_chip_power_save_failure_detected_handler,
3537 			WMA_RX_WORK_CTX);
3538 	}
3539 
3540 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3541 				wmi_pdev_div_rssi_antid_event_id,
3542 				wma_pdev_div_info_evt_handler,
3543 				WMA_RX_WORK_CTX);
3544 
3545 
3546 	wma_register_debug_callback();
3547 	/* Register callback with PMO so PMO can update the vdev pause bitmap*/
3548 	pmo_register_pause_bitmap_notifier(wma_handle->psoc,
3549 		wma_vdev_update_pause_bitmap);
3550 	pmo_register_get_pause_bitmap(wma_handle->psoc,
3551 		wma_vdev_get_pause_bitmap);
3552 	pmo_register_is_device_in_low_pwr_mode(wma_handle->psoc,
3553 		wma_vdev_is_device_in_low_pwr_mode);
3554 	wma_cbacks.wma_get_connection_info = wma_get_connection_info;
3555 	qdf_status = policy_mgr_register_wma_cb(wma_handle->psoc, &wma_cbacks);
3556 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
3557 		WMA_LOGE("Failed to register wma cb with Policy Manager");
3558 	}
3559 
3560 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3561 			wmi_phyerr_event_id,
3562 			wma_unified_phyerr_rx_event_handler,
3563 			WMA_RX_WORK_CTX);
3564 
3565 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3566 			wmi_sap_obss_detection_report_event_id,
3567 			wma_vdev_obss_detection_info_handler,
3568 			WMA_RX_SERIALIZER_CTX);
3569 
3570 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
3571 			wmi_obss_color_collision_report_event_id,
3572 			wma_vdev_bss_color_collision_info_handler,
3573 			WMA_RX_WORK_CTX);
3574 
3575 
3576 	return QDF_STATUS_SUCCESS;
3577 
3578 err_dbglog_init:
3579 	qdf_wake_lock_destroy(&wma_handle->wmi_cmd_rsp_wake_lock);
3580 	qdf_runtime_lock_deinit(&wma_handle->wmi_cmd_rsp_runtime_lock);
3581 	qdf_spinlock_destroy(&wma_handle->vdev_respq_lock);
3582 	qdf_spinlock_destroy(&wma_handle->wma_hold_req_q_lock);
3583 err_event_init:
3584 	wmi_unified_unregister_event_handler(wma_handle->wmi_handle,
3585 					     wmi_debug_print_event_id);
3586 
3587 	for (i = 0; i < wma_handle->max_bssid; ++i)
3588 		wma_vdev_deinit(&wma_handle->interfaces[i]);
3589 
3590 	qdf_mem_free(wma_handle->interfaces);
3591 
3592 err_scn_context:
3593 	qdf_mem_free(((struct cds_context *) cds_context)->cfg_ctx);
3594 	OS_FREE(wmi_handle);
3595 
3596 err_wma_handle:
3597 	target_if_close();
3598 	wlan_objmgr_psoc_release_ref(psoc, WLAN_LEGACY_WMA_ID);
3599 err_get_psoc_ref:
3600 	target_if_free_psoc_tgt_info(psoc);
3601 	if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE) {
3602 #ifdef FEATURE_WLAN_EXTSCAN
3603 		qdf_wake_lock_destroy(&wma_handle->extscan_wake_lock);
3604 #endif /* FEATURE_WLAN_EXTSCAN */
3605 		qdf_wake_lock_destroy(&wma_handle->wow_wake_lock);
3606 		qdf_wake_lock_destroy(&wma_handle->wow_auth_req_wl);
3607 		qdf_wake_lock_destroy(&wma_handle->wow_assoc_req_wl);
3608 		qdf_wake_lock_destroy(&wma_handle->wow_deauth_rec_wl);
3609 		qdf_wake_lock_destroy(&wma_handle->wow_disassoc_rec_wl);
3610 		qdf_wake_lock_destroy(&wma_handle->wow_ap_assoc_lost_wl);
3611 		qdf_wake_lock_destroy(&wma_handle->wow_auto_shutdown_wl);
3612 		qdf_wake_lock_destroy(&wma_handle->roam_ho_wl);
3613 	}
3614 err_free_wma_handle:
3615 	cds_free_context(QDF_MODULE_ID_WMA, wma_handle);
3616 
3617 	WMA_LOGD("%s: Exit", __func__);
3618 
3619 	return qdf_status;
3620 }
3621 
3622 /**
3623  * wma_pre_start() - wma pre start
3624  *
3625  * Return: 0 on success, errno on failure
3626  */
3627 QDF_STATUS wma_pre_start(void)
3628 {
3629 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
3630 	tp_wma_handle wma_handle;
3631 	struct scheduler_msg wma_msg = { 0 };
3632 	void *htc_handle;
3633 
3634 	WMA_LOGD("%s: Enter", __func__);
3635 
3636 	wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
3637 
3638 	/* Validate the wma_handle */
3639 	if (NULL == wma_handle) {
3640 		WMA_LOGE("%s: invalid wma handle", __func__);
3641 		qdf_status = QDF_STATUS_E_INVAL;
3642 		goto end;
3643 	}
3644 
3645 	htc_handle = lmac_get_htc_hdl(wma_handle->psoc);
3646 	if (!htc_handle) {
3647 		WMA_LOGE("%s: invalid htc handle", __func__);
3648 		qdf_status = QDF_STATUS_E_INVAL;
3649 		goto end;
3650 	}
3651 
3652 	/* Open endpoint for ctrl path - WMI <--> HTC */
3653 	qdf_status = wmi_unified_connect_htc_service(wma_handle->wmi_handle,
3654 						     htc_handle);
3655 	if (qdf_status != QDF_STATUS_SUCCESS) {
3656 		WMA_LOGE("%s: wmi_unified_connect_htc_service", __func__);
3657 		if (!cds_is_fw_down())
3658 			QDF_BUG(0);
3659 
3660 		qdf_status = QDF_STATUS_E_FAULT;
3661 		goto end;
3662 	}
3663 
3664 	WMA_LOGD("WMA --> wmi_unified_connect_htc_service - success");
3665 
3666 	/* Trigger the CFG DOWNLOAD */
3667 	wma_msg.type = WNI_CFG_DNLD_REQ;
3668 	wma_msg.bodyptr = NULL;
3669 	wma_msg.bodyval = 0;
3670 
3671 	qdf_status = scheduler_post_msg(QDF_MODULE_ID_WMA, &wma_msg);
3672 	if (QDF_STATUS_SUCCESS != qdf_status) {
3673 		WMA_LOGE("%s: Failed to post WNI_CFG_DNLD_REQ msg", __func__);
3674 		QDF_ASSERT(0);
3675 		qdf_status = QDF_STATUS_E_FAILURE;
3676 	}
3677 end:
3678 	WMA_LOGD("%s: Exit", __func__);
3679 	return qdf_status;
3680 }
3681 
3682 void wma_send_msg_by_priority(tp_wma_handle wma_handle, uint16_t msg_type,
3683 		 void *body_ptr, uint32_t body_val, bool is_high_priority)
3684 {
3685 	struct scheduler_msg msg = {0};
3686 	QDF_STATUS status;
3687 
3688 	msg.type = msg_type;
3689 	msg.bodyval = body_val;
3690 	msg.bodyptr = body_ptr;
3691 	msg.flush_callback = wma_discard_fw_event;
3692 
3693 	status = scheduler_post_msg_by_priority(QDF_MODULE_ID_PE,
3694 					       &msg, is_high_priority);
3695 	if (!QDF_IS_STATUS_SUCCESS(status)) {
3696 		WMA_LOGE("Failed to post msg %d to PE", msg_type);
3697 		if (body_ptr)
3698 			qdf_mem_free(body_ptr);
3699 	}
3700 }
3701 
3702 
3703 void wma_send_msg(tp_wma_handle wma_handle, uint16_t msg_type,
3704 			 void *body_ptr, uint32_t body_val)
3705 {
3706 	wma_send_msg_by_priority(wma_handle, msg_type,
3707 				body_ptr, body_val, false);
3708 }
3709 
3710 void wma_send_msg_high_priority(tp_wma_handle wma_handle, uint16_t msg_type,
3711 			 void *body_ptr, uint32_t body_val)
3712 {
3713 	wma_send_msg_by_priority(wma_handle, msg_type,
3714 				body_ptr, body_val, true);
3715 }
3716 
3717 /**
3718  * wma_set_base_macaddr_indicate() - set base mac address in fw
3719  * @wma_handle: wma handle
3720  * @customAddr: base mac address
3721  *
3722  * Return: 0 for success or error code
3723  */
3724 static int wma_set_base_macaddr_indicate(tp_wma_handle wma_handle,
3725 					 tSirMacAddr *customAddr)
3726 {
3727 	int err;
3728 
3729 	err = wmi_unified_set_base_macaddr_indicate_cmd(wma_handle->wmi_handle,
3730 				     (uint8_t *)customAddr);
3731 	if (err)
3732 		return -EIO;
3733 	WMA_LOGD("Base MAC Addr: " MAC_ADDRESS_STR,
3734 		 MAC_ADDR_ARRAY((*customAddr)));
3735 
3736 	return 0;
3737 }
3738 
3739 /**
3740  * wma_log_supported_evt_handler() - Enable/Disable FW diag/log events
3741  * @handle: WMA handle
3742  * @event:  Event received from FW
3743  * @len:    Length of the event
3744  *
3745  * Enables the low frequency events and disables the high frequency
3746  * events. Bit 17 indicates if the event if low/high frequency.
3747  * 1 - high frequency, 0 - low frequency
3748  *
3749  * Return: 0 on successfully enabling/disabling the events
3750  */
3751 static int wma_log_supported_evt_handler(void *handle,
3752 		uint8_t *event,
3753 		uint32_t len)
3754 {
3755 	tp_wma_handle wma = (tp_wma_handle) handle;
3756 
3757 	if (wmi_unified_log_supported_evt_cmd(wma->wmi_handle,
3758 				event, len))
3759 		return -EINVAL;
3760 
3761 	return 0;
3762 }
3763 
3764 /**
3765  * wma_pdev_set_hw_mode_resp_evt_handler() - Set HW mode resp evt handler
3766  * @handle: WMI handle
3767  * @event:  Event recevied from FW
3768  * @len:    Length of the event
3769  *
3770  * Event handler for WMI_PDEV_SET_HW_MODE_RESP_EVENTID that is sent to host
3771  * driver in response to a WMI_PDEV_SET_HW_MODE_CMDID being sent to WLAN
3772  * firmware
3773  *
3774  * Return: QDF_STATUS
3775  */
3776 static int wma_pdev_set_hw_mode_resp_evt_handler(void *handle,
3777 		uint8_t *event,
3778 		uint32_t len)
3779 {
3780 	WMI_PDEV_SET_HW_MODE_RESP_EVENTID_param_tlvs *param_buf;
3781 	wmi_pdev_set_hw_mode_response_event_fixed_param *wmi_event;
3782 	wmi_pdev_set_hw_mode_response_vdev_mac_entry *vdev_mac_entry;
3783 	uint32_t i;
3784 	struct sir_set_hw_mode_resp *hw_mode_resp;
3785 	tp_wma_handle wma = (tp_wma_handle) handle;
3786 
3787 	if (!wma) {
3788 		WMA_LOGE("%s: Invalid WMA handle", __func__);
3789 		/* Since WMA handle itself is NULL, we cannot send fail
3790 		 * response back to LIM here
3791 		 */
3792 		return QDF_STATUS_E_NULL_VALUE;
3793 	}
3794 
3795 	wma_release_wakelock(&wma->wmi_cmd_rsp_wake_lock);
3796 	wma_remove_req(wma, 0, WMA_PDEV_SET_HW_MODE_RESP);
3797 
3798 	hw_mode_resp = qdf_mem_malloc(sizeof(*hw_mode_resp));
3799 	if (!hw_mode_resp) {
3800 		WMA_LOGE("%s: Memory allocation failed", __func__);
3801 		/* Since this memory allocation itself failed, we cannot
3802 		 * send fail response back to LIM here
3803 		 */
3804 		return QDF_STATUS_E_NULL_VALUE;
3805 	}
3806 
3807 	param_buf = (WMI_PDEV_SET_HW_MODE_RESP_EVENTID_param_tlvs *) event;
3808 	if (!param_buf) {
3809 		WMA_LOGE("Invalid WMI_PDEV_SET_HW_MODE_RESP_EVENTID event");
3810 		/* Need to send response back to upper layer to free
3811 		 * active command list
3812 		 */
3813 		goto fail;
3814 	}
3815 	if (param_buf->fixed_param->num_vdev_mac_entries >=
3816 						MAX_VDEV_SUPPORTED) {
3817 		WMA_LOGE("num_vdev_mac_entries crossed max value");
3818 		goto fail;
3819 	}
3820 
3821 	wmi_event = param_buf->fixed_param;
3822 	if (wmi_event->num_vdev_mac_entries >
3823 	    param_buf->num_wmi_pdev_set_hw_mode_response_vdev_mac_mapping) {
3824 		WMA_LOGE("Invalid num_vdev_mac_entries: %d",
3825 				wmi_event->num_vdev_mac_entries);
3826 		goto fail;
3827 	}
3828 	hw_mode_resp->status = wmi_event->status;
3829 	hw_mode_resp->cfgd_hw_mode_index = wmi_event->cfgd_hw_mode_index;
3830 	hw_mode_resp->num_vdev_mac_entries = wmi_event->num_vdev_mac_entries;
3831 
3832 	WMA_LOGD("%s: status:%d cfgd_hw_mode_index:%d num_vdev_mac_entries:%d",
3833 			__func__, wmi_event->status,
3834 			wmi_event->cfgd_hw_mode_index,
3835 			wmi_event->num_vdev_mac_entries);
3836 	vdev_mac_entry =
3837 		param_buf->wmi_pdev_set_hw_mode_response_vdev_mac_mapping;
3838 
3839 	/* Store the vdev-mac map in WMA and prepare to send to PE  */
3840 	for (i = 0; i < wmi_event->num_vdev_mac_entries; i++) {
3841 		uint32_t vdev_id, mac_id, pdev_id;
3842 
3843 		vdev_id = vdev_mac_entry[i].vdev_id;
3844 		pdev_id = vdev_mac_entry[i].pdev_id;
3845 		if (pdev_id == WMI_PDEV_ID_SOC) {
3846 			WMA_LOGE("%s: soc level id received for mac id)",
3847 				__func__);
3848 			QDF_BUG(0);
3849 			goto fail;
3850 		}
3851 		mac_id = WMA_PDEV_TO_MAC_MAP(vdev_mac_entry[i].pdev_id);
3852 
3853 		WMA_LOGD("%s: vdev_id:%d mac_id:%d",
3854 			__func__, vdev_id, mac_id);
3855 
3856 		hw_mode_resp->vdev_mac_map[i].vdev_id = vdev_id;
3857 		hw_mode_resp->vdev_mac_map[i].mac_id = mac_id;
3858 		wma_update_intf_hw_mode_params(vdev_id, mac_id,
3859 				wmi_event->cfgd_hw_mode_index);
3860 	}
3861 
3862 	if (hw_mode_resp->status == SET_HW_MODE_STATUS_OK) {
3863 		if (WMA_DEFAULT_HW_MODE_INDEX == wma->new_hw_mode_index) {
3864 			wma->new_hw_mode_index = wmi_event->cfgd_hw_mode_index;
3865 		} else {
3866 			wma->old_hw_mode_index = wma->new_hw_mode_index;
3867 			wma->new_hw_mode_index = wmi_event->cfgd_hw_mode_index;
3868 		}
3869 		policy_mgr_update_hw_mode_index(wma->psoc,
3870 		wmi_event->cfgd_hw_mode_index);
3871 	}
3872 
3873 	WMA_LOGD("%s: Updated: old_hw_mode_index:%d new_hw_mode_index:%d",
3874 		__func__, wma->old_hw_mode_index, wma->new_hw_mode_index);
3875 
3876 	wma_send_msg(wma, SIR_HAL_PDEV_SET_HW_MODE_RESP,
3877 		     (void *) hw_mode_resp, 0);
3878 
3879 	return QDF_STATUS_SUCCESS;
3880 
3881 fail:
3882 	WMA_LOGE("%s: Sending fail response to LIM", __func__);
3883 	hw_mode_resp->status = SET_HW_MODE_STATUS_ECANCELED;
3884 	hw_mode_resp->cfgd_hw_mode_index = 0;
3885 	hw_mode_resp->num_vdev_mac_entries = 0;
3886 	wma_send_msg(wma, SIR_HAL_PDEV_SET_HW_MODE_RESP,
3887 			(void *) hw_mode_resp, 0);
3888 
3889 	return QDF_STATUS_E_FAILURE;
3890 }
3891 
3892 /**
3893  * wma_process_pdev_hw_mode_trans_ind() - Process HW mode transition info
3894  *
3895  * @handle: WMA handle
3896  * @fixed_param: Event fixed parameters
3897  * @vdev_mac_entry - vdev mac entry
3898  * @hw_mode_trans_ind - Buffer to store parsed information
3899  *
3900  * Parses fixed_param, vdev_mac_entry and fills in the information into
3901  * hw_mode_trans_ind and wma
3902  *
3903  * Return: None
3904  */
3905 void wma_process_pdev_hw_mode_trans_ind(void *handle,
3906 	wmi_pdev_hw_mode_transition_event_fixed_param *fixed_param,
3907 	wmi_pdev_set_hw_mode_response_vdev_mac_entry *vdev_mac_entry,
3908 	struct sir_hw_mode_trans_ind *hw_mode_trans_ind)
3909 {
3910 	uint32_t i;
3911 	tp_wma_handle wma = (tp_wma_handle) handle;
3912 	if (fixed_param->num_vdev_mac_entries > MAX_VDEV_SUPPORTED) {
3913 		WMA_LOGE("Number of Vdev mac entries %d exceeded"
3914 			 " max vdev supported %d",
3915 			 fixed_param->num_vdev_mac_entries,
3916 			 MAX_VDEV_SUPPORTED);
3917 		return;
3918 	}
3919 	hw_mode_trans_ind->old_hw_mode_index = fixed_param->old_hw_mode_index;
3920 	hw_mode_trans_ind->new_hw_mode_index = fixed_param->new_hw_mode_index;
3921 	hw_mode_trans_ind->num_vdev_mac_entries =
3922 					fixed_param->num_vdev_mac_entries;
3923 	WMA_LOGD("%s: old_hw_mode_index:%d new_hw_mode_index:%d entries=%d",
3924 		__func__, fixed_param->old_hw_mode_index,
3925 		fixed_param->new_hw_mode_index,
3926 		fixed_param->num_vdev_mac_entries);
3927 
3928 	/* Store the vdev-mac map in WMA and send to policy manager */
3929 	for (i = 0; i < fixed_param->num_vdev_mac_entries; i++) {
3930 		uint32_t vdev_id, mac_id, pdev_id;
3931 
3932 		vdev_id = vdev_mac_entry[i].vdev_id;
3933 		pdev_id = vdev_mac_entry[i].pdev_id;
3934 
3935 		if (pdev_id == WMI_PDEV_ID_SOC) {
3936 			WMA_LOGE("%s: soc level id received for mac id)",
3937 					__func__);
3938 			QDF_BUG(0);
3939 			return;
3940 		}
3941 
3942 		mac_id = WMA_PDEV_TO_MAC_MAP(vdev_mac_entry[i].pdev_id);
3943 
3944 		WMA_LOGE("%s: vdev_id:%d mac_id:%d",
3945 				__func__, vdev_id, mac_id);
3946 
3947 		hw_mode_trans_ind->vdev_mac_map[i].vdev_id = vdev_id;
3948 		hw_mode_trans_ind->vdev_mac_map[i].mac_id = mac_id;
3949 		wma_update_intf_hw_mode_params(vdev_id, mac_id,
3950 				fixed_param->new_hw_mode_index);
3951 	}
3952 	wma->old_hw_mode_index = fixed_param->old_hw_mode_index;
3953 	wma->new_hw_mode_index = fixed_param->new_hw_mode_index;
3954 	policy_mgr_update_new_hw_mode_index(wma->psoc,
3955 		fixed_param->new_hw_mode_index);
3956 	policy_mgr_update_old_hw_mode_index(wma->psoc,
3957 		fixed_param->old_hw_mode_index);
3958 
3959 	WMA_LOGD("%s: Updated: old_hw_mode_index:%d new_hw_mode_index:%d",
3960 		__func__, wma->old_hw_mode_index, wma->new_hw_mode_index);
3961 }
3962 
3963 /**
3964  * wma_pdev_hw_mode_transition_evt_handler() - HW mode transition evt handler
3965  * @handle: WMI handle
3966  * @event:  Event recevied from FW
3967  * @len:    Length of the event
3968  *
3969  * Event handler for WMI_PDEV_HW_MODE_TRANSITION_EVENTID that indicates an
3970  * asynchronous hardware mode transition. This event notifies the host driver
3971  * that firmware independently changed the hardware mode for some reason, such
3972  * as Coex, LFR 3.0, etc
3973  *
3974  * Return: Success on receiving valid params from FW
3975  */
3976 static int wma_pdev_hw_mode_transition_evt_handler(void *handle,
3977 		uint8_t *event,
3978 		uint32_t len)
3979 {
3980 	WMI_PDEV_HW_MODE_TRANSITION_EVENTID_param_tlvs *param_buf;
3981 	wmi_pdev_hw_mode_transition_event_fixed_param *wmi_event;
3982 	wmi_pdev_set_hw_mode_response_vdev_mac_entry *vdev_mac_entry;
3983 	struct sir_hw_mode_trans_ind *hw_mode_trans_ind;
3984 	tp_wma_handle wma = (tp_wma_handle) handle;
3985 
3986 	if (!wma) {
3987 		/* This is an async event. So, not sending any event to LIM */
3988 		WMA_LOGE("Invalid WMA handle");
3989 		return QDF_STATUS_E_NULL_VALUE;
3990 	}
3991 
3992 	param_buf = (WMI_PDEV_HW_MODE_TRANSITION_EVENTID_param_tlvs *) event;
3993 	if (!param_buf) {
3994 		/* This is an async event. So, not sending any event to LIM */
3995 		WMA_LOGE("Invalid WMI_PDEV_HW_MODE_TRANSITION_EVENTID event");
3996 		return QDF_STATUS_E_FAILURE;
3997 	}
3998 
3999 	if (param_buf->fixed_param->num_vdev_mac_entries > MAX_VDEV_SUPPORTED) {
4000 		WMA_LOGE("num_vdev_mac_entries: %d crossed max value: %d",
4001 			param_buf->fixed_param->num_vdev_mac_entries,
4002 			MAX_VDEV_SUPPORTED);
4003 		return QDF_STATUS_E_FAILURE;
4004 	}
4005 
4006 	hw_mode_trans_ind = qdf_mem_malloc(sizeof(*hw_mode_trans_ind));
4007 	if (!hw_mode_trans_ind) {
4008 		WMA_LOGE("%s: Memory allocation failed", __func__);
4009 		return QDF_STATUS_E_NOMEM;
4010 	}
4011 
4012 	wmi_event = param_buf->fixed_param;
4013 	vdev_mac_entry =
4014 		param_buf->wmi_pdev_set_hw_mode_response_vdev_mac_mapping;
4015 	if (wmi_event->num_vdev_mac_entries >
4016 	    param_buf->num_wmi_pdev_set_hw_mode_response_vdev_mac_mapping) {
4017 		WMA_LOGE("Invalid num_vdev_mac_entries: %d",
4018 			 wmi_event->num_vdev_mac_entries);
4019 		qdf_mem_free(hw_mode_trans_ind);
4020 		return -EINVAL;
4021 	}
4022 	wma_process_pdev_hw_mode_trans_ind(wma, wmi_event, vdev_mac_entry,
4023 		hw_mode_trans_ind);
4024 	/* Pass the message to PE */
4025 	wma_send_msg(wma, SIR_HAL_PDEV_HW_MODE_TRANS_IND,
4026 		     (void *) hw_mode_trans_ind, 0);
4027 
4028 	return QDF_STATUS_SUCCESS;
4029 }
4030 
4031 /**
4032  * wma_pdev_set_dual_mode_config_resp_evt_handler() - Dual mode evt handler
4033  * @handle: WMI handle
4034  * @event:  Event received from FW
4035  * @len:    Length of the event
4036  *
4037  * Notifies the host driver of the completion or failure of a
4038  * WMI_PDEV_SET_MAC_CONFIG_CMDID command. This event would be returned to
4039  * the host driver once the firmware has completed a reconfiguration of the Scan
4040  * and FW mode configuration. This changes could include entering or leaving a
4041  * dual mac configuration for either scan and/or more permanent firmware mode.
4042  *
4043  * Return: Success on receiving valid params from FW
4044  */
4045 static int wma_pdev_set_dual_mode_config_resp_evt_handler(void *handle,
4046 		uint8_t *event,
4047 		uint32_t len)
4048 {
4049 	WMI_PDEV_SET_MAC_CONFIG_RESP_EVENTID_param_tlvs *param_buf;
4050 	wmi_pdev_set_mac_config_response_event_fixed_param *wmi_event;
4051 	tp_wma_handle wma = (tp_wma_handle) handle;
4052 	struct sir_dual_mac_config_resp *dual_mac_cfg_resp;
4053 
4054 	if (!wma) {
4055 		WMA_LOGE("%s: Invalid WMA handle", __func__);
4056 		/* Since the WMA handle is NULL, we cannot send resp to LIM.
4057 		 * So, returning from here.
4058 		 */
4059 		return QDF_STATUS_E_NULL_VALUE;
4060 	}
4061 	wma_release_wakelock(&wma->wmi_cmd_rsp_wake_lock);
4062 	dual_mac_cfg_resp = qdf_mem_malloc(sizeof(*dual_mac_cfg_resp));
4063 	if (!dual_mac_cfg_resp) {
4064 		WMA_LOGE("%s: Memory allocation failed", __func__);
4065 		/* Since the mem alloc failed, we cannot send resp to LIM.
4066 		 * So, returning from here.
4067 		 */
4068 		return QDF_STATUS_E_NULL_VALUE;
4069 	}
4070 
4071 	param_buf = (WMI_PDEV_SET_MAC_CONFIG_RESP_EVENTID_param_tlvs *)
4072 		event;
4073 	if (!param_buf) {
4074 		WMA_LOGE("%s: Invalid event", __func__);
4075 		goto fail;
4076 	}
4077 
4078 	wmi_event = param_buf->fixed_param;
4079 	WMA_LOGD("%s: status:%d", __func__, wmi_event->status);
4080 	dual_mac_cfg_resp->status = wmi_event->status;
4081 
4082 	if (SET_HW_MODE_STATUS_OK == dual_mac_cfg_resp->status) {
4083 		policy_mgr_update_dbs_scan_config(wma->psoc);
4084 		policy_mgr_update_dbs_fw_config(wma->psoc);
4085 	}
4086 
4087 	/* Pass the message to PE */
4088 	wma_send_msg(wma, SIR_HAL_PDEV_MAC_CFG_RESP,
4089 			(void *) dual_mac_cfg_resp, 0);
4090 
4091 	return QDF_STATUS_SUCCESS;
4092 
4093 fail:
4094 	WMA_LOGE("%s: Sending fail response to LIM", __func__);
4095 	dual_mac_cfg_resp->status = SET_HW_MODE_STATUS_ECANCELED;
4096 	wma_send_msg(wma, SIR_HAL_PDEV_MAC_CFG_RESP,
4097 			(void *) dual_mac_cfg_resp, 0);
4098 
4099 	return QDF_STATUS_E_FAILURE;
4100 
4101 }
4102 
4103 /**
4104  * wma_send_time_stamp_sync_cmd() - timer callback send timestamp to
4105  * firmware to sync with host.
4106  * @wma_handle: wma handle
4107  *
4108  * Return: void
4109  */
4110 static void wma_send_time_stamp_sync_cmd(void *data)
4111 {
4112 	tp_wma_handle wma_handle;
4113 	QDF_STATUS qdf_status;
4114 
4115 	wma_handle = (tp_wma_handle) data;
4116 
4117 	wmi_send_time_stamp_sync_cmd_tlv(wma_handle->wmi_handle);
4118 
4119 	/* Start/Restart the timer */
4120 	qdf_status = qdf_mc_timer_start(&wma_handle->wma_fw_time_sync_timer,
4121 				       WMA_FW_TIME_SYNC_TIMER);
4122 	if (QDF_IS_STATUS_ERROR(qdf_status))
4123 		WMA_LOGE("Failed to start the firmware time sync timer");
4124 }
4125 
4126 #ifdef WLAN_CONV_SPECTRAL_ENABLE
4127 static void wma_register_spectral_cmds(tp_wma_handle wma_handle)
4128 {
4129 	struct wmi_spectral_cmd_ops cmd_ops;
4130 
4131 	cmd_ops.wmi_spectral_configure_cmd_send =
4132 			wmi_unified_vdev_spectral_configure_cmd_send;
4133 	cmd_ops.wmi_spectral_enable_cmd_send =
4134 			wmi_unified_vdev_spectral_enable_cmd_send;
4135 	wlan_register_wmi_spectral_cmd_ops(wma_handle->pdev, &cmd_ops);
4136 }
4137 #else
4138 static void wma_register_spectral_cmds(tp_wma_handle wma_handle)
4139 {
4140 }
4141 #endif
4142 /**
4143  * wma_start() - wma start function.
4144  *               Initialize event handlers and timers.
4145  *
4146  * Return: 0 on success, QDF Error on failure
4147  */
4148 QDF_STATUS wma_start(void)
4149 {
4150 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
4151 	tp_wma_handle wma_handle;
4152 	int status;
4153 	struct wmi_unified *wmi_handle;
4154 
4155 	WMA_LOGD("%s: Enter", __func__);
4156 
4157 	wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
4158 	/* validate the wma_handle */
4159 	if (NULL == wma_handle) {
4160 		WMA_LOGE("%s: Invalid wma handle", __func__);
4161 		qdf_status = QDF_STATUS_E_INVAL;
4162 		goto end;
4163 	}
4164 
4165 	wmi_handle = get_wmi_unified_hdl_from_psoc(wma_handle->psoc);
4166 	if (!wmi_handle) {
4167 		WMA_LOGE("%s: Invalid wmi handle", __func__);
4168 		qdf_status = QDF_STATUS_E_INVAL;
4169 		goto end;
4170 	}
4171 
4172 	status = wmi_unified_register_event_handler(wmi_handle,
4173 						    wmi_roam_event_id,
4174 						    wma_roam_event_callback,
4175 						    WMA_RX_SERIALIZER_CTX);
4176 	if (0 != status) {
4177 		WMA_LOGE("%s: Failed to register Roam callback", __func__);
4178 		qdf_status = QDF_STATUS_E_FAILURE;
4179 		goto end;
4180 	}
4181 
4182 	status = wmi_unified_register_event_handler(wmi_handle,
4183 						    wmi_wow_wakeup_host_event_id,
4184 						    wma_wow_wakeup_host_event,
4185 						    WMA_RX_TASKLET_CTX);
4186 	if (status) {
4187 		WMA_LOGE("%s: Failed to register wow wakeup host event handler",
4188 			 __func__);
4189 		qdf_status = QDF_STATUS_E_FAILURE;
4190 		goto end;
4191 	}
4192 
4193 	if (wma_d0_wow_is_supported()) {
4194 		status = wmi_unified_register_event_handler(
4195 				wmi_handle,
4196 				wmi_d0_wow_disable_ack_event_id,
4197 				wma_d0_wow_disable_ack_event,
4198 				WMA_RX_TASKLET_CTX);
4199 		if (status) {
4200 			WMA_LOGE("%s: Failed to register d0wow disable ack"
4201 				 " event handler", __func__);
4202 			qdf_status = QDF_STATUS_E_FAILURE;
4203 			goto end;
4204 		}
4205 	}
4206 
4207 	status = wmi_unified_register_event_handler(wmi_handle,
4208 				wmi_pdev_resume_event_id,
4209 				wma_pdev_resume_event_handler,
4210 				WMA_RX_TASKLET_CTX);
4211 	if (status) {
4212 		WMA_LOGE("%s: Failed to register PDEV resume event handler",
4213 			 __func__);
4214 		qdf_status = QDF_STATUS_E_FAILURE;
4215 		goto end;
4216 	}
4217 #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \
4218 	defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(CONFIG_HL_SUPPORT)
4219 	WMA_LOGD("MCC TX Pause Event Handler register");
4220 	status = wmi_unified_register_event_handler(wmi_handle,
4221 					wmi_tx_pause_event_id,
4222 					wma_mcc_vdev_tx_pause_evt_handler,
4223 					WMA_RX_TASKLET_CTX);
4224 #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
4225 
4226 #ifdef FEATURE_WLAN_AUTO_SHUTDOWN
4227 	WMA_LOGD("Registering auto shutdown handler");
4228 	status = wmi_unified_register_event_handler(wmi_handle,
4229 						wmi_host_auto_shutdown_event_id,
4230 						wma_auto_shutdown_event_handler,
4231 						WMA_RX_SERIALIZER_CTX);
4232 	if (status) {
4233 		WMA_LOGE("Failed to register WMI Auto shutdown event handler");
4234 		qdf_status = QDF_STATUS_E_FAILURE;
4235 		goto end;
4236 	}
4237 #endif /* FEATURE_WLAN_AUTO_SHUTDOWN */
4238 	status = wmi_unified_register_event_handler(wmi_handle,
4239 						wmi_thermal_mgmt_event_id,
4240 						wma_thermal_mgmt_evt_handler,
4241 						WMA_RX_SERIALIZER_CTX);
4242 	if (status) {
4243 		WMA_LOGE("Failed to register thermal mitigation event cb");
4244 		qdf_status = QDF_STATUS_E_FAILURE;
4245 		goto end;
4246 	}
4247 
4248 	status = wma_ocb_register_callbacks(wma_handle);
4249 	if (!QDF_IS_STATUS_SUCCESS(status)) {
4250 		WMA_LOGE("Failed to register OCB callbacks");
4251 		qdf_status = QDF_STATUS_E_FAILURE;
4252 		goto end;
4253 	}
4254 
4255 	qdf_status = QDF_STATUS_SUCCESS;
4256 
4257 #ifdef QCA_WIFI_FTM
4258 	/*
4259 	 * Tx mgmt attach requires TXRX context which is not created
4260 	 * in FTM mode. So skip the TX mgmt attach.
4261 	 */
4262 	if (cds_get_conparam() == QDF_GLOBAL_FTM_MODE)
4263 		goto end;
4264 #endif /* QCA_WIFI_FTM */
4265 
4266 	if (wmi_service_enabled(wmi_handle, wmi_service_rmc)) {
4267 
4268 		WMA_LOGD("FW supports cesium network, registering event handlers");
4269 
4270 		status = wmi_unified_register_event_handler(
4271 					wmi_handle,
4272 					wmi_peer_info_event_id,
4273 					wma_ibss_peer_info_event_handler,
4274 					WMA_RX_SERIALIZER_CTX);
4275 		if (status) {
4276 			WMA_LOGE("Failed to register ibss peer info event cb");
4277 			qdf_status = QDF_STATUS_E_FAILURE;
4278 			goto end;
4279 		}
4280 		status = wmi_unified_register_event_handler(
4281 					wmi_handle,
4282 					wmi_peer_tx_fail_cnt_thr_event_id,
4283 					wma_fast_tx_fail_event_handler,
4284 					WMA_RX_SERIALIZER_CTX);
4285 		if (status) {
4286 			WMA_LOGE("Failed to register peer fast tx failure event cb");
4287 			qdf_status = QDF_STATUS_E_FAILURE;
4288 			goto end;
4289 		}
4290 	} else {
4291 		WMA_LOGE("Target does not support cesium network");
4292 	}
4293 
4294 	qdf_status = wma_tx_attach(wma_handle);
4295 	if (qdf_status != QDF_STATUS_SUCCESS) {
4296 		WMA_LOGE("%s: Failed to register tx management", __func__);
4297 		goto end;
4298 	}
4299 
4300 	if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE) {
4301 		/* Initialize firmware time stamp sync timer */
4302 		qdf_status =
4303 			qdf_mc_timer_init(&wma_handle->wma_fw_time_sync_timer,
4304 					  QDF_TIMER_TYPE_SW,
4305 					  wma_send_time_stamp_sync_cmd,
4306 					  wma_handle);
4307 		if (QDF_IS_STATUS_ERROR(qdf_status))
4308 			WMA_LOGE(FL("Failed to initialize firmware time stamp sync timer"));
4309 		/* Start firmware time stamp sync timer */
4310 		wma_send_time_stamp_sync_cmd(wma_handle);
4311 	}
4312 
4313 	/* Initialize log completion timeout */
4314 	qdf_status = qdf_mc_timer_init(&wma_handle->log_completion_timer,
4315 			QDF_TIMER_TYPE_SW,
4316 			wma_log_completion_timeout,
4317 			wma_handle);
4318 	if (qdf_status != QDF_STATUS_SUCCESS) {
4319 		WMA_LOGE("Failed to initialize log completion timeout");
4320 		goto end;
4321 	}
4322 
4323 	status = wma_fips_register_event_handlers(wma_handle);
4324 	if (!QDF_IS_STATUS_SUCCESS(status)) {
4325 		WMA_LOGE("Failed to register FIPS event handler");
4326 		qdf_status = QDF_STATUS_E_FAILURE;
4327 		goto end;
4328 	}
4329 
4330 	status = wma_sar_register_event_handlers(wma_handle);
4331 	if (!QDF_IS_STATUS_SUCCESS(status)) {
4332 		WMA_LOGE("Failed to register SAR event handlers");
4333 		qdf_status = QDF_STATUS_E_FAILURE;
4334 		goto end;
4335 	}
4336 
4337 	/* Initialize the get temperature event handler */
4338 	status = wmi_unified_register_event_handler(wmi_handle,
4339 					wmi_pdev_temperature_event_id,
4340 					wma_pdev_temperature_evt_handler,
4341 					WMA_RX_SERIALIZER_CTX);
4342 	if (status != QDF_STATUS_SUCCESS) {
4343 		WMA_LOGE("Failed to register get_temperature event cb");
4344 		qdf_status = QDF_STATUS_E_FAILURE;
4345 		goto end;
4346 	}
4347 
4348 	status = wmi_unified_register_event_handler(wmi_handle,
4349 						wmi_vdev_tsf_report_event_id,
4350 						wma_vdev_tsf_handler,
4351 						WMA_RX_SERIALIZER_CTX);
4352 	if (0 != status) {
4353 		WMA_LOGE("%s: Failed to register tsf callback", __func__);
4354 		qdf_status = QDF_STATUS_E_FAILURE;
4355 		goto end;
4356 	}
4357 
4358 	/* Initialize the wma_pdev_set_hw_mode_resp_evt_handler event handler */
4359 	status = wmi_unified_register_event_handler(wmi_handle,
4360 			wmi_pdev_set_hw_mode_rsp_event_id,
4361 			wma_pdev_set_hw_mode_resp_evt_handler,
4362 			WMA_RX_SERIALIZER_CTX);
4363 	if (status != QDF_STATUS_SUCCESS) {
4364 		WMA_LOGE("Failed to register set hw mode resp event cb");
4365 		qdf_status = QDF_STATUS_E_FAILURE;
4366 		goto end;
4367 	}
4368 
4369 	/* Initialize the WMI_SOC_HW_MODE_TRANSITION_EVENTID event handler */
4370 	status = wmi_unified_register_event_handler(wmi_handle,
4371 			wmi_pdev_hw_mode_transition_event_id,
4372 			wma_pdev_hw_mode_transition_evt_handler,
4373 			WMA_RX_SERIALIZER_CTX);
4374 	if (status != QDF_STATUS_SUCCESS) {
4375 		WMA_LOGE("Failed to register hw mode transition event cb");
4376 		qdf_status = QDF_STATUS_E_FAILURE;
4377 		goto end;
4378 	}
4379 
4380 	/* Initialize the set dual mac configuration event handler */
4381 	status = wmi_unified_register_event_handler(wmi_handle,
4382 			wmi_pdev_set_mac_config_resp_event_id,
4383 			wma_pdev_set_dual_mode_config_resp_evt_handler,
4384 			WMA_RX_SERIALIZER_CTX);
4385 	if (status != QDF_STATUS_SUCCESS) {
4386 		WMA_LOGE("Failed to register hw mode transition event cb");
4387 		qdf_status = QDF_STATUS_E_FAILURE;
4388 		goto end;
4389 	}
4390 
4391 	status = wmi_unified_register_event_handler(wmi_handle,
4392 			wmi_coex_bt_activity_event_id,
4393 			wma_wlan_bt_activity_evt_handler,
4394 			WMA_RX_SERIALIZER_CTX);
4395 	if (!QDF_IS_STATUS_SUCCESS(status)) {
4396 		WMA_LOGE("Failed to register coex bt activity event handler");
4397 		qdf_status = QDF_STATUS_E_FAILURE;
4398 		goto end;
4399 	}
4400 	wma_register_spectral_cmds(wma_handle);
4401 
4402 end:
4403 	WMA_LOGD("%s: Exit", __func__);
4404 	return qdf_status;
4405 }
4406 
4407 /**
4408  * wma_stop() - wma stop function.
4409  *              cleanup timers and suspend target.
4410  * @reason: reason for wma_stop.
4411  *
4412  * Return: 0 on success, QDF Error on failure
4413  */
4414 QDF_STATUS wma_stop(uint8_t reason)
4415 {
4416 	tp_wma_handle wma_handle;
4417 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
4418 	int i;
4419 
4420 	wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
4421 	WMA_LOGD("%s: Enter", __func__);
4422 	/* validate the wma_handle */
4423 	if (NULL == wma_handle) {
4424 		WMA_LOGE("%s: Invalid handle", __func__);
4425 		qdf_status = QDF_STATUS_E_INVAL;
4426 		goto end;
4427 	}
4428 #ifdef QCA_WIFI_FTM
4429 	/*
4430 	 * Tx mgmt detach requires TXRX context which is not created
4431 	 * in FTM mode. So skip the TX mgmt detach.
4432 	 */
4433 	if (cds_get_conparam() == QDF_GLOBAL_FTM_MODE) {
4434 		qdf_status = QDF_STATUS_SUCCESS;
4435 		goto end;
4436 	}
4437 #endif /* QCA_WIFI_FTM */
4438 
4439 	if (wma_handle->ack_work_ctx) {
4440 		cds_flush_work(&wma_handle->ack_work_ctx->ack_cmp_work);
4441 		qdf_mem_free(wma_handle->ack_work_ctx);
4442 		wma_handle->ack_work_ctx = NULL;
4443 	}
4444 
4445 	/* Destroy the timer for log completion */
4446 	qdf_status = qdf_mc_timer_destroy(&wma_handle->log_completion_timer);
4447 	if (qdf_status != QDF_STATUS_SUCCESS)
4448 		WMA_LOGE("Failed to destroy the log completion timer");
4449 	/* clean up ll-queue for all vdev */
4450 	for (i = 0; i < wma_handle->max_bssid; i++) {
4451 		if (wma_handle->interfaces[i].handle &&
4452 				wma_is_vdev_up(i)) {
4453 			cdp_fc_vdev_flush(
4454 				cds_get_context(QDF_MODULE_ID_SOC),
4455 				wma_handle->
4456 				interfaces[i].handle);
4457 		}
4458 	}
4459 
4460 	if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE) {
4461 		/* Destroy firmware time stamp sync timer */
4462 		qdf_status = qdf_mc_timer_destroy(
4463 					&wma_handle->wma_fw_time_sync_timer);
4464 		if (QDF_IS_STATUS_ERROR(qdf_status))
4465 			WMA_LOGE(FL("Failed to destroy the fw time sync timer"));
4466 	}
4467 
4468 	qdf_status = wma_tx_detach(wma_handle);
4469 	if (qdf_status != QDF_STATUS_SUCCESS) {
4470 		WMA_LOGE("%s: Failed to deregister tx management", __func__);
4471 		goto end;
4472 	}
4473 
4474 end:
4475 	WMA_LOGD("%s: Exit", __func__);
4476 	return qdf_status;
4477 }
4478 
4479 /**
4480  * wma_wmi_service_close() - close wma wmi service interface.
4481  *
4482  * Return: 0 on success, QDF Error on failure
4483  */
4484 QDF_STATUS wma_wmi_service_close(void)
4485 {
4486 	void *cds_ctx;
4487 	tp_wma_handle wma_handle;
4488 	struct beacon_info *bcn;
4489 	int i;
4490 
4491 	WMA_LOGD("%s: Enter", __func__);
4492 
4493 	cds_ctx = cds_get_global_context();
4494 	if (!cds_ctx) {
4495 		WMA_LOGE("%s: Invalid CDS context", __func__);
4496 		return QDF_STATUS_E_INVAL;
4497 	}
4498 
4499 	wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
4500 
4501 	/* validate the wma_handle */
4502 	if (NULL == wma_handle) {
4503 		WMA_LOGE("%s: Invalid wma handle", __func__);
4504 		return QDF_STATUS_E_INVAL;
4505 	}
4506 
4507 	/* validate the wmi handle */
4508 	if (NULL == wma_handle->wmi_handle) {
4509 		WMA_LOGE("%s: Invalid wmi handle", __func__);
4510 		return QDF_STATUS_E_INVAL;
4511 	}
4512 
4513 	/* dettach the wmi serice */
4514 	WMA_LOGD("calling wmi_unified_detach");
4515 	wmi_unified_detach(wma_handle->wmi_handle);
4516 	wma_handle->wmi_handle = NULL;
4517 
4518 	for (i = 0; i < wma_handle->max_bssid; i++) {
4519 		bcn = wma_handle->interfaces[i].beacon;
4520 
4521 		if (bcn) {
4522 			if (bcn->dma_mapped)
4523 				qdf_nbuf_unmap_single(wma_handle->qdf_dev,
4524 					bcn->buf, QDF_DMA_TO_DEVICE);
4525 			qdf_nbuf_free(bcn->buf);
4526 			qdf_mem_free(bcn);
4527 			wma_handle->interfaces[i].beacon = NULL;
4528 		}
4529 
4530 		if (wma_handle->interfaces[i].handle) {
4531 			qdf_mem_free(wma_handle->interfaces[i].handle);
4532 			wma_handle->interfaces[i].handle = NULL;
4533 		}
4534 
4535 		if (wma_handle->interfaces[i].addBssStaContext) {
4536 			qdf_mem_free(wma_handle->
4537 				     interfaces[i].addBssStaContext);
4538 			wma_handle->interfaces[i].addBssStaContext = NULL;
4539 		}
4540 
4541 		if (wma_handle->interfaces[i].del_staself_req) {
4542 			qdf_mem_free(wma_handle->interfaces[i].del_staself_req);
4543 			wma_handle->interfaces[i].del_staself_req = NULL;
4544 		}
4545 
4546 		if (wma_handle->interfaces[i].stats_rsp) {
4547 			qdf_mem_free(wma_handle->interfaces[i].stats_rsp);
4548 			wma_handle->interfaces[i].stats_rsp = NULL;
4549 		}
4550 
4551 		if (wma_handle->interfaces[i].psnr_req) {
4552 			qdf_mem_free(wma_handle->
4553 				     interfaces[i].psnr_req);
4554 			wma_handle->interfaces[i].psnr_req = NULL;
4555 		}
4556 
4557 		if (wma_handle->interfaces[i].rcpi_req) {
4558 			qdf_mem_free(wma_handle->
4559 				     interfaces[i].rcpi_req);
4560 			wma_handle->interfaces[i].rcpi_req = NULL;
4561 		}
4562 
4563 		if (wma_handle->interfaces[i].roam_synch_frame_ind.
4564 		    bcn_probe_rsp) {
4565 			qdf_mem_free(wma_handle->interfaces[i].
4566 			      roam_synch_frame_ind.bcn_probe_rsp);
4567 			wma_handle->interfaces[i].roam_synch_frame_ind.
4568 				     bcn_probe_rsp = NULL;
4569 		}
4570 
4571 		if (wma_handle->interfaces[i].roam_synch_frame_ind.
4572 		    reassoc_req) {
4573 			qdf_mem_free(wma_handle->interfaces[i].
4574 				     roam_synch_frame_ind.reassoc_req);
4575 			wma_handle->interfaces[i].roam_synch_frame_ind.
4576 				     reassoc_req = NULL;
4577 		}
4578 
4579 		if (wma_handle->interfaces[i].roam_synch_frame_ind.
4580 		    reassoc_rsp) {
4581 			qdf_mem_free(wma_handle->interfaces[i].
4582 				     roam_synch_frame_ind.reassoc_rsp);
4583 			wma_handle->interfaces[i].roam_synch_frame_ind.
4584 				     reassoc_rsp = NULL;
4585 		}
4586 
4587 		wma_vdev_deinit(&wma_handle->interfaces[i]);
4588 	}
4589 
4590 	qdf_mem_free(wma_handle->interfaces);
4591 
4592 	/* free the wma_handle */
4593 	cds_free_context(QDF_MODULE_ID_WMA, wma_handle);
4594 
4595 	qdf_mem_free(((struct cds_context *) cds_ctx)->cfg_ctx);
4596 	WMA_LOGD("%s: Exit", __func__);
4597 	return QDF_STATUS_SUCCESS;
4598 }
4599 
4600 /**
4601  * wma_wmi_work_close() - close the work queue items associated with WMI
4602  *
4603  * This function closes work queue items associated with WMI, but not fully
4604  * closes WMI service.
4605  *
4606  * Return: QDF_STATUS_SUCCESS if work close is successful. Otherwise
4607  *	proper error codes.
4608  */
4609 QDF_STATUS wma_wmi_work_close(void)
4610 {
4611 	tp_wma_handle wma_handle;
4612 
4613 	WMA_LOGD("%s: Enter", __func__);
4614 
4615 	wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
4616 
4617 	/* validate the wma_handle */
4618 	if (NULL == wma_handle) {
4619 		WMA_LOGE("%s: Invalid wma handle", __func__);
4620 		return QDF_STATUS_E_INVAL;
4621 	}
4622 
4623 	/* validate the wmi handle */
4624 	if (NULL == wma_handle->wmi_handle) {
4625 		WMA_LOGE("%s: Invalid wmi handle", __func__);
4626 		return QDF_STATUS_E_INVAL;
4627 	}
4628 
4629 	/* remove the wmi work */
4630 	WMA_LOGD("calling wmi_unified_remove_work");
4631 	wmi_unified_remove_work(wma_handle->wmi_handle);
4632 
4633 	return QDF_STATUS_SUCCESS;
4634 }
4635 
4636 /**
4637  * wma_close() - wma close function.
4638  *               cleanup resources attached with wma.
4639  *
4640  * Return: 0 on success, QDF Error on failure
4641  */
4642 QDF_STATUS wma_close(void)
4643 {
4644 	tp_wma_handle wma_handle;
4645 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
4646 
4647 	WMA_LOGD("%s: Enter", __func__);
4648 
4649 	wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
4650 
4651 	/* validate the wma_handle */
4652 	if (NULL == wma_handle) {
4653 		WMA_LOGE("%s: Invalid wma handle", __func__);
4654 		return QDF_STATUS_E_INVAL;
4655 	}
4656 
4657 	/* validate the wmi handle */
4658 	if (NULL == wma_handle->wmi_handle) {
4659 		WMA_LOGE("%s: Invalid wmi handle", __func__);
4660 		return QDF_STATUS_E_INVAL;
4661 	}
4662 
4663 	/* Free DBS list */
4664 	if (wma_handle->hw_mode.hw_mode_list) {
4665 		qdf_mem_free(wma_handle->hw_mode.hw_mode_list);
4666 		wma_handle->hw_mode.hw_mode_list = NULL;
4667 		WMA_LOGD("%s: DBS list is freed", __func__);
4668 	}
4669 
4670 	if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE) {
4671 #ifdef FEATURE_WLAN_EXTSCAN
4672 		qdf_wake_lock_destroy(&wma_handle->extscan_wake_lock);
4673 #endif /* FEATURE_WLAN_EXTSCAN */
4674 		qdf_wake_lock_destroy(&wma_handle->wow_wake_lock);
4675 		qdf_wake_lock_destroy(&wma_handle->wow_auth_req_wl);
4676 		qdf_wake_lock_destroy(&wma_handle->wow_assoc_req_wl);
4677 		qdf_wake_lock_destroy(&wma_handle->wow_deauth_rec_wl);
4678 		qdf_wake_lock_destroy(&wma_handle->wow_disassoc_rec_wl);
4679 		qdf_wake_lock_destroy(&wma_handle->wow_ap_assoc_lost_wl);
4680 		qdf_wake_lock_destroy(&wma_handle->wow_auto_shutdown_wl);
4681 		qdf_wake_lock_destroy(&wma_handle->roam_ho_wl);
4682 	}
4683 
4684 	/* unregister Firmware debug log */
4685 	qdf_status = dbglog_deinit(wma_handle->wmi_handle);
4686 	if (qdf_status != QDF_STATUS_SUCCESS)
4687 		WMA_LOGE("%s: dbglog_deinit failed", __func__);
4688 
4689 	qdf_status = qdf_mc_timer_destroy(&wma_handle->service_ready_ext_timer);
4690 	if (!QDF_IS_STATUS_SUCCESS(qdf_status))
4691 		WMA_LOGE("%s: Failed to destroy service ready ext event timer",
4692 			__func__);
4693 
4694 	qdf_event_destroy(&wma_handle->target_suspend);
4695 	qdf_event_destroy(&wma_handle->wma_resume_event);
4696 	qdf_event_destroy(&wma_handle->runtime_suspend);
4697 	qdf_event_destroy(&wma_handle->recovery_event);
4698 	qdf_event_destroy(&wma_handle->tx_frm_download_comp_event);
4699 	qdf_event_destroy(&wma_handle->tx_queue_empty_event);
4700 	wma_cleanup_vdev_resp_queue(wma_handle);
4701 	wma_cleanup_hold_req(wma_handle);
4702 	qdf_wake_lock_destroy(&wma_handle->wmi_cmd_rsp_wake_lock);
4703 	qdf_runtime_lock_deinit(&wma_handle->wmi_cmd_rsp_runtime_lock);
4704 	qdf_spinlock_destroy(&wma_handle->vdev_respq_lock);
4705 	qdf_spinlock_destroy(&wma_handle->wma_hold_req_q_lock);
4706 
4707 	if (NULL != wma_handle->pGetRssiReq) {
4708 		qdf_mem_free(wma_handle->pGetRssiReq);
4709 		wma_handle->pGetRssiReq = NULL;
4710 	}
4711 
4712 	wma_unified_radio_tx_mem_free(wma_handle);
4713 
4714 	if (wma_handle->pdev) {
4715 		wlan_objmgr_pdev_release_ref(wma_handle->pdev,
4716 				WLAN_LEGACY_WMA_ID);
4717 		wma_handle->pdev = NULL;
4718 	}
4719 
4720 	pmo_unregister_pause_bitmap_notifier(wma_handle->psoc,
4721 		wma_vdev_update_pause_bitmap);
4722 	pmo_unregister_get_pause_bitmap(wma_handle->psoc,
4723 		wma_vdev_get_pause_bitmap);
4724 	pmo_unregister_is_device_in_low_pwr_mode(wma_handle->psoc,
4725 		wma_vdev_is_device_in_low_pwr_mode);
4726 
4727 	target_if_free_psoc_tgt_info(wma_handle->psoc);
4728 
4729 	wlan_objmgr_psoc_release_ref(wma_handle->psoc, WLAN_LEGACY_WMA_ID);
4730 	wma_handle->psoc = NULL;
4731 	target_if_close();
4732 	wma_target_if_close(wma_handle);
4733 
4734 	WMA_LOGD("%s: Exit", __func__);
4735 	return QDF_STATUS_SUCCESS;
4736 }
4737 
4738 /**
4739  * wma_update_fw_config() - update fw configuration
4740  * @psoc: psoc to query configuration from
4741  * @tgt_hdl: target capability info
4742  *
4743  * Return: none
4744  */
4745 static void wma_update_fw_config(struct wlan_objmgr_psoc *psoc,
4746 				 struct target_psoc_info *tgt_hdl)
4747 {
4748 	target_resource_config *cfg = &tgt_hdl->info.wlan_res_cfg;
4749 
4750 	/* Override the no. of max fragments as per platform configuration */
4751 	cfg->max_frag_entries =	QDF_MIN(QCA_OL_11AC_TX_MAX_FRAGS,
4752 					target_if_get_max_frag_entry(tgt_hdl));
4753 	target_if_set_max_frag_entry(tgt_hdl, cfg->max_frag_entries);
4754 
4755 	cfg->num_wow_filters = ucfg_pmo_get_num_wow_filters(psoc);
4756 	cfg->bpf_instruction_size = ucfg_pmo_get_apf_instruction_size(psoc);
4757 	cfg->num_packet_filters = ucfg_pmo_get_num_packet_filters(psoc);
4758 }
4759 
4760 /**
4761  * wma_set_tx_partition_base() - set TX MSDU ID partition base for IPA
4762  * @value:  TX MSDU ID partition base
4763  *
4764  * Return: none
4765  */
4766 #ifdef IPA_OFFLOAD
4767 static void wma_set_tx_partition_base(uint32_t value)
4768 {
4769 	cdp_ipa_set_uc_tx_partition_base(
4770 			cds_get_context(QDF_MODULE_ID_SOC),
4771 			(struct cdp_cfg *)cds_get_context(QDF_MODULE_ID_CFG),
4772 			value);
4773 	WMA_LOGD("%s: TX_MSDU_ID_PARTITION=%d", __func__,
4774 			value);
4775 }
4776 #else
4777 static void wma_set_tx_partition_base(uint32_t value)
4778 {
4779 }
4780 #endif
4781 
4782 /**
4783  * wma_update_target_services() - update target services from wma handle
4784  * @wmi_handle: Unified wmi handle
4785  * @cfg: target services
4786  *
4787  * Return: none
4788  */
4789 static inline void wma_update_target_services(struct wmi_unified *wmi_handle,
4790 					      struct wma_tgt_services *cfg)
4791 {
4792 	/* STA power save */
4793 	cfg->sta_power_save = wmi_service_enabled(wmi_handle,
4794 						     wmi_service_sta_pwrsave);
4795 
4796 	/* Enable UAPSD */
4797 	cfg->uapsd = wmi_service_enabled(wmi_handle,
4798 					    wmi_service_ap_uapsd);
4799 
4800 	/* Update AP DFS service */
4801 	cfg->ap_dfs = wmi_service_enabled(wmi_handle,
4802 					     wmi_service_ap_dfs);
4803 
4804 	/* Enable 11AC */
4805 	cfg->en_11ac = wmi_service_enabled(wmi_handle,
4806 					      wmi_service_11ac);
4807 	if (cfg->en_11ac)
4808 		g_fw_wlan_feat_caps |= (1 << DOT11AC);
4809 
4810 	/* Proactive ARP response */
4811 	g_fw_wlan_feat_caps |= (1 << WLAN_PERIODIC_TX_PTRN);
4812 
4813 	/* Enable WOW */
4814 	g_fw_wlan_feat_caps |= (1 << WOW);
4815 
4816 	/* ARP offload */
4817 	cfg->arp_offload = wmi_service_enabled(wmi_handle,
4818 						  wmi_service_arpns_offload);
4819 
4820 	/* Adaptive early-rx */
4821 	cfg->early_rx = wmi_service_enabled(wmi_handle,
4822 					       wmi_service_early_rx);
4823 #ifdef FEATURE_WLAN_SCAN_PNO
4824 	/* PNO offload */
4825 	if (wmi_service_enabled(wmi_handle, wmi_service_nlo)) {
4826 		cfg->pno_offload = true;
4827 		g_fw_wlan_feat_caps |= (1 << PNO);
4828 	}
4829 #endif /* FEATURE_WLAN_SCAN_PNO */
4830 
4831 #ifdef FEATURE_WLAN_EXTSCAN
4832 	if (wmi_service_enabled(wmi_handle, wmi_service_extscan))
4833 		g_fw_wlan_feat_caps |= (1 << EXTENDED_SCAN);
4834 #endif /* FEATURE_WLAN_EXTSCAN */
4835 	cfg->lte_coex_ant_share = wmi_service_enabled(wmi_handle,
4836 					wmi_service_lte_ant_share_support);
4837 #ifdef FEATURE_WLAN_TDLS
4838 	/* Enable TDLS */
4839 	if (wmi_service_enabled(wmi_handle, wmi_service_tdls)) {
4840 		cfg->en_tdls = 1;
4841 		g_fw_wlan_feat_caps |= (1 << TDLS);
4842 	}
4843 	/* Enable advanced TDLS features */
4844 	if (wmi_service_enabled(wmi_handle, wmi_service_tdls_offchan)) {
4845 		cfg->en_tdls_offchan = 1;
4846 		g_fw_wlan_feat_caps |= (1 << TDLS_OFF_CHANNEL);
4847 	}
4848 
4849 	cfg->en_tdls_uapsd_buf_sta =
4850 		wmi_service_enabled(wmi_handle,
4851 				       wmi_service_tdls_uapsd_buffer_sta);
4852 	cfg->en_tdls_uapsd_sleep_sta =
4853 		wmi_service_enabled(wmi_handle,
4854 				       wmi_service_tdls_uapsd_sleep_sta);
4855 #endif /* FEATURE_WLAN_TDLS */
4856 	if (wmi_service_enabled
4857 		    (wmi_handle, wmi_service_beacon_offload))
4858 		cfg->beacon_offload = true;
4859 	if (wmi_service_enabled
4860 		    (wmi_handle, wmi_service_sta_pmf_offload))
4861 		cfg->pmf_offload = true;
4862 #ifdef WLAN_FEATURE_ROAM_OFFLOAD
4863 	/* Enable Roam Offload */
4864 	cfg->en_roam_offload = wmi_service_enabled(wmi_handle,
4865 					      wmi_service_roam_ho_offload);
4866 #endif /* WLAN_FEATURE_ROAM_OFFLOAD */
4867 #ifdef WLAN_FEATURE_NAN
4868 	if (wmi_service_enabled(wmi_handle, wmi_service_nan))
4869 		g_fw_wlan_feat_caps |= (1 << NAN);
4870 #endif /* WLAN_FEATURE_NAN */
4871 
4872 	if (wmi_service_enabled(wmi_handle, wmi_service_rtt))
4873 		g_fw_wlan_feat_caps |= (1 << RTT);
4874 
4875 	if (wmi_service_enabled(wmi_handle,
4876 			wmi_service_tx_msdu_id_new_partition_support)) {
4877 		wma_set_tx_partition_base(HTT_TX_IPA_NEW_MSDU_ID_SPACE_BEGIN);
4878 	} else {
4879 		wma_set_tx_partition_base(HTT_TX_IPA_MSDU_ID_SPACE_BEGIN);
4880 	}
4881 
4882 	wma_he_update_tgt_services(wmi_handle, cfg);
4883 
4884 	cfg->get_peer_info_enabled =
4885 		wmi_service_enabled(wmi_handle,
4886 				       wmi_service_peer_stats_info);
4887 	if (wmi_service_enabled(wmi_handle, wmi_service_fils_support))
4888 		cfg->is_fils_roaming_supported = true;
4889 
4890 	if (wmi_service_enabled(wmi_handle, wmi_service_mawc_support))
4891 		cfg->is_fw_mawc_capable = true;
4892 
4893 	if (wmi_service_enabled(wmi_handle,
4894 				wmi_service_11k_neighbour_report_support))
4895 		cfg->is_11k_offload_supported = true;
4896 }
4897 
4898 /**
4899  * wma_update_target_ht_cap() - update ht capabality from wma handle
4900  * @tgt_hdl: pointer to structure target_psoc_info
4901  * @cfg: ht capability
4902  *
4903  * Return: none
4904  */
4905 static inline void
4906 wma_update_target_ht_cap(struct target_psoc_info *tgt_hdl,
4907 			 struct wma_tgt_ht_cap *cfg)
4908 {
4909 	int ht_cap_info;
4910 
4911 	ht_cap_info = target_if_get_ht_cap_info(tgt_hdl);
4912 	/* RX STBC */
4913 	cfg->ht_rx_stbc = !!(ht_cap_info & WMI_HT_CAP_RX_STBC);
4914 
4915 	/* TX STBC */
4916 	cfg->ht_tx_stbc = !!(ht_cap_info & WMI_HT_CAP_TX_STBC);
4917 
4918 	/* MPDU density */
4919 	cfg->mpdu_density = ht_cap_info & WMI_HT_CAP_MPDU_DENSITY;
4920 
4921 	/* HT RX LDPC */
4922 	cfg->ht_rx_ldpc = !!(ht_cap_info & WMI_HT_CAP_LDPC);
4923 
4924 	/* HT SGI */
4925 	cfg->ht_sgi_20 = !!(ht_cap_info & WMI_HT_CAP_HT20_SGI);
4926 
4927 	cfg->ht_sgi_40 = !!(ht_cap_info & WMI_HT_CAP_HT40_SGI);
4928 
4929 	/* RF chains */
4930 	cfg->num_rf_chains = target_if_get_num_rf_chains(tgt_hdl);
4931 
4932 	WMA_LOGD("%s: ht_cap_info - %x ht_rx_stbc - %d, ht_tx_stbc - %d\n"
4933 		 "mpdu_density - %d ht_rx_ldpc - %d ht_sgi_20 - %d\n"
4934 		 "ht_sgi_40 - %d num_rf_chains - %d", __func__,
4935 		 ht_cap_info,
4936 		 cfg->ht_rx_stbc, cfg->ht_tx_stbc, cfg->mpdu_density,
4937 		 cfg->ht_rx_ldpc, cfg->ht_sgi_20, cfg->ht_sgi_40,
4938 		 cfg->num_rf_chains);
4939 
4940 }
4941 
4942 /**
4943  * wma_update_target_vht_cap() - update vht capabality from wma handle
4944  * @tgt_hdl: pointer to structure target_psoc_info
4945  * @cfg: vht capabality
4946  *
4947  * Return: none
4948  */
4949 static inline void
4950 wma_update_target_vht_cap(struct target_psoc_info *tgt_hdl,
4951 			  struct wma_tgt_vht_cap *cfg)
4952 {
4953 	int vht_cap_info = target_if_get_vht_cap_info(tgt_hdl);
4954 
4955 	if (vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_11454)
4956 		cfg->vht_max_mpdu = WMI_VHT_CAP_MAX_MPDU_LEN_11454;
4957 	else if (vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_7935)
4958 		cfg->vht_max_mpdu = WMI_VHT_CAP_MAX_MPDU_LEN_7935;
4959 	else
4960 		cfg->vht_max_mpdu = 0;
4961 
4962 
4963 	if (vht_cap_info & WMI_VHT_CAP_CH_WIDTH_80P80_160MHZ) {
4964 		cfg->supp_chan_width = 1 << eHT_CHANNEL_WIDTH_80P80MHZ;
4965 		cfg->supp_chan_width |= 1 << eHT_CHANNEL_WIDTH_160MHZ;
4966 	} else if (vht_cap_info & WMI_VHT_CAP_CH_WIDTH_160MHZ) {
4967 		cfg->supp_chan_width = 1 << eHT_CHANNEL_WIDTH_160MHZ;
4968 	} else {
4969 		cfg->supp_chan_width = 1 << eHT_CHANNEL_WIDTH_80MHZ;
4970 	}
4971 
4972 	cfg->vht_rx_ldpc = vht_cap_info & WMI_VHT_CAP_RX_LDPC;
4973 
4974 	cfg->vht_short_gi_80 = vht_cap_info & WMI_VHT_CAP_SGI_80MHZ;
4975 	cfg->vht_short_gi_160 = vht_cap_info & WMI_VHT_CAP_SGI_160MHZ;
4976 
4977 	cfg->vht_tx_stbc = vht_cap_info & WMI_VHT_CAP_TX_STBC;
4978 
4979 	cfg->vht_rx_stbc =
4980 		(vht_cap_info & WMI_VHT_CAP_RX_STBC_1SS) |
4981 		(vht_cap_info & WMI_VHT_CAP_RX_STBC_2SS) |
4982 		(vht_cap_info & WMI_VHT_CAP_RX_STBC_3SS);
4983 
4984 	cfg->vht_max_ampdu_len_exp = (vht_cap_info &
4985 				      WMI_VHT_CAP_MAX_AMPDU_LEN_EXP)
4986 				     >> WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT;
4987 
4988 	cfg->vht_su_bformer = vht_cap_info & WMI_VHT_CAP_SU_BFORMER;
4989 
4990 	cfg->vht_su_bformee = vht_cap_info & WMI_VHT_CAP_SU_BFORMEE;
4991 
4992 	cfg->vht_mu_bformer = vht_cap_info & WMI_VHT_CAP_MU_BFORMER;
4993 
4994 	cfg->vht_mu_bformee = vht_cap_info & WMI_VHT_CAP_MU_BFORMEE;
4995 
4996 	cfg->vht_txop_ps = vht_cap_info & WMI_VHT_CAP_TXOP_PS;
4997 
4998 	WMA_LOGD("%s: max_mpdu %d supp_chan_width %x rx_ldpc %x\n"
4999 		 "short_gi_80 %x tx_stbc %x rx_stbc %x txop_ps %x\n"
5000 		 "su_bformee %x mu_bformee %x max_ampdu_len_exp %d", __func__,
5001 		 cfg->vht_max_mpdu, cfg->supp_chan_width, cfg->vht_rx_ldpc,
5002 		 cfg->vht_short_gi_80, cfg->vht_tx_stbc, cfg->vht_rx_stbc,
5003 		 cfg->vht_txop_ps, cfg->vht_su_bformee, cfg->vht_mu_bformee,
5004 		 cfg->vht_max_ampdu_len_exp);
5005 }
5006 
5007 /**
5008  * wma_update_supported_bands() - update supported bands from service ready ext
5009  * @supported_bands: Supported band given by FW through service ready ext params
5010  * @new_supported_bands: New supported band which needs to be updated by
5011  *			 this API which WMA layer understands
5012  *
5013  * This API will convert FW given supported band to enum which WMA layer
5014  * understands
5015  *
5016  * Return: QDF_STATUS
5017  */
5018 static QDF_STATUS wma_update_supported_bands(
5019 			WLAN_BAND_CAPABILITY supported_bands,
5020 			WMI_PHY_CAPABILITY *new_supported_bands)
5021 {
5022 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5023 
5024 	if (!new_supported_bands) {
5025 		WMA_LOGE("%s: NULL new supported band variable", __func__);
5026 		return QDF_STATUS_E_FAILURE;
5027 	}
5028 	switch (supported_bands) {
5029 	case WLAN_2G_CAPABILITY:
5030 		*new_supported_bands |= WMI_11G_CAPABILITY;
5031 		break;
5032 	case WLAN_5G_CAPABILITY:
5033 		*new_supported_bands |= WMI_11A_CAPABILITY;
5034 		break;
5035 	default:
5036 		WMA_LOGE("%s: wrong supported band", __func__);
5037 		status = QDF_STATUS_E_FAILURE;
5038 		break;
5039 	}
5040 	return status;
5041 }
5042 
5043 /**
5044  * wma_derive_ext_ht_cap() - Derive HT caps based on given value
5045  * @ht_cap: given pointer to HT caps which needs to be updated
5046  * @tx_chain: given tx chainmask value
5047  * @rx_chain: given rx chainmask value
5048  * @value: new HT cap info provided in form of bitmask
5049  *
5050  * This function takes the value provided in form of bitmask and decodes
5051  * it. After decoding, what ever value it gets, it takes the union(max) or
5052  * intersection(min) with previously derived values.
5053  *
5054  * Return: none
5055  *
5056  */
5057 static void wma_derive_ext_ht_cap(
5058 			struct wma_tgt_ht_cap *ht_cap, uint32_t value,
5059 			uint32_t tx_chain, uint32_t rx_chain)
5060 {
5061 	struct wma_tgt_ht_cap tmp = {0};
5062 
5063 	if (ht_cap == NULL)
5064 		return;
5065 
5066 	if (!qdf_mem_cmp(ht_cap, &tmp, sizeof(struct wma_tgt_ht_cap))) {
5067 		ht_cap->ht_rx_stbc = (!!(value & WMI_HT_CAP_RX_STBC));
5068 		ht_cap->ht_tx_stbc = (!!(value & WMI_HT_CAP_TX_STBC));
5069 		ht_cap->mpdu_density = (!!(value & WMI_HT_CAP_MPDU_DENSITY));
5070 		ht_cap->ht_rx_ldpc = (!!(value & WMI_HT_CAP_RX_LDPC));
5071 		ht_cap->ht_sgi_20 = (!!(value & WMI_HT_CAP_HT20_SGI));
5072 		ht_cap->ht_sgi_40 = (!!(value & WMI_HT_CAP_HT40_SGI));
5073 		ht_cap->num_rf_chains =
5074 			QDF_MAX(wma_get_num_of_setbits_from_bitmask(tx_chain),
5075 				wma_get_num_of_setbits_from_bitmask(rx_chain));
5076 	} else {
5077 		ht_cap->ht_rx_stbc = QDF_MIN(ht_cap->ht_rx_stbc,
5078 					(!!(value & WMI_HT_CAP_RX_STBC)));
5079 		ht_cap->ht_tx_stbc = QDF_MAX(ht_cap->ht_tx_stbc,
5080 					(!!(value & WMI_HT_CAP_TX_STBC)));
5081 		ht_cap->mpdu_density = QDF_MIN(ht_cap->mpdu_density,
5082 					(!!(value & WMI_HT_CAP_MPDU_DENSITY)));
5083 		ht_cap->ht_rx_ldpc = QDF_MIN(ht_cap->ht_rx_ldpc,
5084 					(!!(value & WMI_HT_CAP_RX_LDPC)));
5085 		ht_cap->ht_sgi_20 = QDF_MIN(ht_cap->ht_sgi_20,
5086 					(!!(value & WMI_HT_CAP_HT20_SGI)));
5087 		ht_cap->ht_sgi_40 = QDF_MIN(ht_cap->ht_sgi_40,
5088 					(!!(value & WMI_HT_CAP_HT40_SGI)));
5089 		ht_cap->num_rf_chains =
5090 			QDF_MAX(ht_cap->num_rf_chains,
5091 				QDF_MAX(wma_get_num_of_setbits_from_bitmask(
5092 								tx_chain),
5093 					wma_get_num_of_setbits_from_bitmask(
5094 								rx_chain)));
5095 	}
5096 }
5097 
5098 /**
5099  * wma_update_target_ext_ht_cap() - Update HT caps with given extended cap
5100  * @tgt_hdl - target psoc information
5101  * @ht_cap: HT cap structure to be filled
5102  *
5103  * This function loop through each hardware mode and for each hardware mode
5104  * again it loop through each MAC/PHY and pull the caps 2G and 5G specific
5105  * HT caps and derives the final cap.
5106  *
5107  * Return: none
5108  *
5109  */
5110 static void wma_update_target_ext_ht_cap(struct target_psoc_info *tgt_hdl,
5111 					 struct wma_tgt_ht_cap *ht_cap)
5112 {
5113 	int i, total_mac_phy_cnt;
5114 	uint32_t ht_2g, ht_5g;
5115 	struct wma_tgt_ht_cap tmp_ht_cap = {0}, tmp_cap = {0};
5116 	struct wlan_psoc_host_mac_phy_caps *mac_phy_cap;
5117 	int num_hw_modes;
5118 
5119 	total_mac_phy_cnt = target_psoc_get_total_mac_phy_cnt(tgt_hdl);
5120 	num_hw_modes = target_psoc_get_num_hw_modes(tgt_hdl);
5121 	mac_phy_cap = target_psoc_get_mac_phy_cap(tgt_hdl);
5122 	/*
5123 	 * for legacy device extended cap might not even come, so in that case
5124 	 * don't overwrite legacy values
5125 	 */
5126 	if (!num_hw_modes) {
5127 		WMA_LOGD("%s: No extended HT cap for current SOC", __func__);
5128 		return;
5129 	}
5130 
5131 	for (i = 0; i < total_mac_phy_cnt; i++) {
5132 		ht_2g = mac_phy_cap[i].ht_cap_info_2G;
5133 		ht_5g = mac_phy_cap[i].ht_cap_info_5G;
5134 		if (ht_2g)
5135 			wma_derive_ext_ht_cap(&tmp_ht_cap,
5136 					ht_2g,
5137 					mac_phy_cap[i].tx_chain_mask_2G,
5138 					mac_phy_cap[i].rx_chain_mask_2G);
5139 		if (ht_5g)
5140 			wma_derive_ext_ht_cap(&tmp_ht_cap,
5141 					ht_5g,
5142 					mac_phy_cap[i].tx_chain_mask_5G,
5143 					mac_phy_cap[i].rx_chain_mask_5G);
5144 	}
5145 
5146 	if (qdf_mem_cmp(&tmp_cap, &tmp_ht_cap,
5147 				sizeof(struct wma_tgt_ht_cap))) {
5148 		qdf_mem_copy(ht_cap, &tmp_ht_cap,
5149 				sizeof(struct wma_tgt_ht_cap));
5150 	}
5151 
5152 	WMA_LOGD("%s: [ext ht cap] ht_rx_stbc - %d, ht_tx_stbc - %d\n"
5153 			"mpdu_density - %d ht_rx_ldpc - %d ht_sgi_20 - %d\n"
5154 			"ht_sgi_40 - %d num_rf_chains - %d", __func__,
5155 			ht_cap->ht_rx_stbc, ht_cap->ht_tx_stbc,
5156 			ht_cap->mpdu_density, ht_cap->ht_rx_ldpc,
5157 			ht_cap->ht_sgi_20, ht_cap->ht_sgi_40,
5158 			ht_cap->num_rf_chains);
5159 }
5160 
5161 /**
5162  * wma_derive_ext_vht_cap() - Derive VHT caps based on given value
5163  * @vht_cap: pointer to given VHT caps to be filled
5164  * @value: new VHT cap info provided in form of bitmask
5165  *
5166  * This function takes the value provided in form of bitmask and decodes
5167  * it. After decoding, what ever value it gets, it takes the union(max) or
5168  * intersection(min) with previously derived values.
5169  *
5170  * Return: none
5171  *
5172  */
5173 static void wma_derive_ext_vht_cap(
5174 			struct wma_tgt_vht_cap *vht_cap, uint32_t value)
5175 {
5176 	struct wma_tgt_vht_cap tmp_cap = {0};
5177 	uint32_t tmp = 0;
5178 
5179 	if (vht_cap == NULL)
5180 		return;
5181 
5182 	if (!qdf_mem_cmp(vht_cap, &tmp_cap,
5183 				sizeof(struct wma_tgt_vht_cap))) {
5184 		if (value & WMI_VHT_CAP_MAX_MPDU_LEN_11454)
5185 			vht_cap->vht_max_mpdu = WMI_VHT_CAP_MAX_MPDU_LEN_11454;
5186 		else if (value & WMI_VHT_CAP_MAX_MPDU_LEN_7935)
5187 			vht_cap->vht_max_mpdu = WMI_VHT_CAP_MAX_MPDU_LEN_7935;
5188 		else
5189 			vht_cap->vht_max_mpdu = 0;
5190 
5191 		if (value & WMI_VHT_CAP_CH_WIDTH_80P80_160MHZ) {
5192 			vht_cap->supp_chan_width =
5193 				1 << eHT_CHANNEL_WIDTH_80P80MHZ;
5194 			vht_cap->supp_chan_width |=
5195 				1 << eHT_CHANNEL_WIDTH_160MHZ;
5196 		} else if (value & WMI_VHT_CAP_CH_WIDTH_160MHZ) {
5197 			vht_cap->supp_chan_width =
5198 				1 << eHT_CHANNEL_WIDTH_160MHZ;
5199 		} else {
5200 			vht_cap->supp_chan_width = 1 << eHT_CHANNEL_WIDTH_80MHZ;
5201 		}
5202 		vht_cap->vht_rx_ldpc = value & WMI_VHT_CAP_RX_LDPC;
5203 		vht_cap->vht_short_gi_80 = value & WMI_VHT_CAP_SGI_80MHZ;
5204 		vht_cap->vht_short_gi_160 = value & WMI_VHT_CAP_SGI_160MHZ;
5205 		vht_cap->vht_tx_stbc = value & WMI_VHT_CAP_TX_STBC;
5206 		vht_cap->vht_rx_stbc =
5207 			(value & WMI_VHT_CAP_RX_STBC_1SS) |
5208 			(value & WMI_VHT_CAP_RX_STBC_2SS) |
5209 			(value & WMI_VHT_CAP_RX_STBC_3SS);
5210 		vht_cap->vht_max_ampdu_len_exp =
5211 			(value & WMI_VHT_CAP_MAX_AMPDU_LEN_EXP) >>
5212 				WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT;
5213 		vht_cap->vht_su_bformer = value & WMI_VHT_CAP_SU_BFORMER;
5214 		vht_cap->vht_su_bformee = value & WMI_VHT_CAP_SU_BFORMEE;
5215 		vht_cap->vht_mu_bformer = value & WMI_VHT_CAP_MU_BFORMER;
5216 		vht_cap->vht_mu_bformee = value & WMI_VHT_CAP_MU_BFORMEE;
5217 		vht_cap->vht_txop_ps = value & WMI_VHT_CAP_TXOP_PS;
5218 	} else {
5219 		if (value & WMI_VHT_CAP_MAX_MPDU_LEN_11454)
5220 			tmp = WMI_VHT_CAP_MAX_MPDU_LEN_11454;
5221 		else if (value & WMI_VHT_CAP_MAX_MPDU_LEN_7935)
5222 			tmp = WMI_VHT_CAP_MAX_MPDU_LEN_7935;
5223 		else
5224 			tmp = 0;
5225 		vht_cap->vht_max_mpdu = QDF_MIN(vht_cap->vht_max_mpdu, tmp);
5226 
5227 		if ((value & WMI_VHT_CAP_CH_WIDTH_80P80_160MHZ)) {
5228 			tmp = (1 << eHT_CHANNEL_WIDTH_80P80MHZ) |
5229 				(1 << eHT_CHANNEL_WIDTH_160MHZ);
5230 		} else if (value & WMI_VHT_CAP_CH_WIDTH_160MHZ) {
5231 			tmp = 1 << eHT_CHANNEL_WIDTH_160MHZ;
5232 		} else {
5233 			tmp = 1 << eHT_CHANNEL_WIDTH_80MHZ;
5234 		}
5235 		vht_cap->supp_chan_width =
5236 			QDF_MAX(vht_cap->supp_chan_width, tmp);
5237 		vht_cap->vht_rx_ldpc = QDF_MIN(vht_cap->vht_rx_ldpc,
5238 						value & WMI_VHT_CAP_RX_LDPC);
5239 		vht_cap->vht_short_gi_80 = QDF_MAX(vht_cap->vht_short_gi_80,
5240 						value & WMI_VHT_CAP_SGI_80MHZ);
5241 		vht_cap->vht_short_gi_160 = QDF_MAX(vht_cap->vht_short_gi_160,
5242 						value & WMI_VHT_CAP_SGI_160MHZ);
5243 		vht_cap->vht_tx_stbc = QDF_MAX(vht_cap->vht_tx_stbc,
5244 						value & WMI_VHT_CAP_TX_STBC);
5245 		vht_cap->vht_rx_stbc = QDF_MIN(vht_cap->vht_rx_stbc,
5246 					(value & WMI_VHT_CAP_RX_STBC_1SS) |
5247 					(value & WMI_VHT_CAP_RX_STBC_2SS) |
5248 					(value & WMI_VHT_CAP_RX_STBC_3SS));
5249 		vht_cap->vht_max_ampdu_len_exp =
5250 			QDF_MIN(vht_cap->vht_max_ampdu_len_exp,
5251 				(value & WMI_VHT_CAP_MAX_AMPDU_LEN_EXP) >>
5252 					WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT);
5253 		vht_cap->vht_su_bformer = QDF_MAX(vht_cap->vht_su_bformer,
5254 						value & WMI_VHT_CAP_SU_BFORMER);
5255 		vht_cap->vht_su_bformee = QDF_MAX(vht_cap->vht_su_bformee,
5256 						value & WMI_VHT_CAP_SU_BFORMEE);
5257 		vht_cap->vht_mu_bformer = QDF_MAX(vht_cap->vht_mu_bformer,
5258 						value & WMI_VHT_CAP_MU_BFORMER);
5259 		vht_cap->vht_mu_bformee = QDF_MAX(vht_cap->vht_mu_bformee,
5260 						value & WMI_VHT_CAP_MU_BFORMEE);
5261 		vht_cap->vht_txop_ps = QDF_MIN(vht_cap->vht_txop_ps,
5262 						value & WMI_VHT_CAP_TXOP_PS);
5263 	}
5264 }
5265 
5266 /**
5267  * wma_update_target_ext_vht_cap() - Update VHT caps with given extended cap
5268  * @tgt_hdl - target psoc information
5269  * @vht_cap: VHT cap structure to be filled
5270  *
5271  * This function loop through each hardware mode and for each hardware mode
5272  * again it loop through each MAC/PHY and pull the caps 2G and 5G specific
5273  * VHT caps and derives the final cap.
5274  *
5275  * Return: none
5276  *
5277  */
5278 static void wma_update_target_ext_vht_cap(struct target_psoc_info *tgt_hdl,
5279 					  struct wma_tgt_vht_cap *vht_cap)
5280 {
5281 	int i, num_hw_modes, total_mac_phy_cnt;
5282 	uint32_t vht_cap_info_2g, vht_cap_info_5g;
5283 	struct wma_tgt_vht_cap tmp_vht_cap = {0}, tmp_cap = {0};
5284 	struct wlan_psoc_host_mac_phy_caps *mac_phy_cap;
5285 
5286 	total_mac_phy_cnt = target_psoc_get_total_mac_phy_cnt(tgt_hdl);
5287 	num_hw_modes = target_psoc_get_num_hw_modes(tgt_hdl);
5288 	mac_phy_cap = target_psoc_get_mac_phy_cap(tgt_hdl);
5289 
5290 	/*
5291 	 * for legacy device extended cap might not even come, so in that case
5292 	 * don't overwrite legacy values
5293 	 */
5294 	if (!num_hw_modes) {
5295 		WMA_LOGD("%s: No extended VHT cap for current SOC", __func__);
5296 		return;
5297 	}
5298 
5299 	for (i = 0; i < total_mac_phy_cnt; i++) {
5300 		vht_cap_info_2g = mac_phy_cap[i].vht_cap_info_2G;
5301 		vht_cap_info_5g = mac_phy_cap[i].vht_cap_info_5G;
5302 		if (vht_cap_info_2g)
5303 			wma_derive_ext_vht_cap(&tmp_vht_cap,
5304 					vht_cap_info_2g);
5305 		if (vht_cap_info_5g)
5306 			wma_derive_ext_vht_cap(&tmp_vht_cap,
5307 					vht_cap_info_5g);
5308 	}
5309 
5310 	if (qdf_mem_cmp(&tmp_cap, &tmp_vht_cap,
5311 				sizeof(struct wma_tgt_vht_cap))) {
5312 			qdf_mem_copy(vht_cap, &tmp_vht_cap,
5313 					sizeof(struct wma_tgt_vht_cap));
5314 	}
5315 
5316 	WMA_LOGD("%s: [ext vhtcap] max_mpdu %d supp_chan_width %x rx_ldpc %x\n"
5317 		"short_gi_80 %x tx_stbc %x rx_stbc %x txop_ps %x\n"
5318 		"su_bformee %x mu_bformee %x max_ampdu_len_exp %d", __func__,
5319 		vht_cap->vht_max_mpdu, vht_cap->supp_chan_width,
5320 		vht_cap->vht_rx_ldpc, vht_cap->vht_short_gi_80,
5321 		vht_cap->vht_tx_stbc, vht_cap->vht_rx_stbc,
5322 		vht_cap->vht_txop_ps, vht_cap->vht_su_bformee,
5323 		vht_cap->vht_mu_bformee, vht_cap->vht_max_ampdu_len_exp);
5324 }
5325 
5326 /**
5327  * wma_update_ra_rate_limit() - update wma config
5328  * @wma_handle: wma handle
5329  * @cfg: target config
5330  *
5331  * Return: none
5332  */
5333 #ifdef FEATURE_WLAN_RA_FILTERING
5334 static void wma_update_ra_rate_limit(tp_wma_handle wma_handle,
5335 				     struct wma_tgt_cfg *cfg)
5336 {
5337 	cfg->is_ra_rate_limit_enabled = wma_handle->IsRArateLimitEnabled;
5338 }
5339 #else
5340 static void wma_update_ra_rate_limit(tp_wma_handle wma_handle,
5341 				     struct wma_tgt_cfg *cfg)
5342 {
5343 }
5344 #endif
5345 
5346 /**
5347  * wma_update_hdd_band_cap() - update band cap which hdd understands
5348  * @supported_band: supported band which has been given by FW
5349  * @tgt_cfg: target configuration to be updated
5350  *
5351  * Convert WMA given supported band to enum which HDD understands
5352  *
5353  * Return: None
5354  */
5355 static void wma_update_hdd_band_cap(WMI_PHY_CAPABILITY supported_band,
5356 				    struct wma_tgt_cfg *tgt_cfg)
5357 {
5358 	switch (supported_band) {
5359 	case WMI_11G_CAPABILITY:
5360 	case WMI_11NG_CAPABILITY:
5361 		tgt_cfg->band_cap = BAND_2G;
5362 		break;
5363 	case WMI_11A_CAPABILITY:
5364 	case WMI_11NA_CAPABILITY:
5365 	case WMI_11AC_CAPABILITY:
5366 		tgt_cfg->band_cap = BAND_5G;
5367 		break;
5368 	case WMI_11AG_CAPABILITY:
5369 	case WMI_11NAG_CAPABILITY:
5370 	default:
5371 		tgt_cfg->band_cap = BAND_ALL;
5372 	}
5373 }
5374 
5375 /**
5376  * wma_update_obss_detection_support() - update obss detection offload support
5377  * @wh: wma handle
5378  * @tgt_cfg: target configuration to be updated
5379  *
5380  * Update obss detection offload support based on service bit.
5381  *
5382  * Return: None
5383  */
5384 static void wma_update_obss_detection_support(tp_wma_handle wh,
5385 					      struct wma_tgt_cfg *tgt_cfg)
5386 {
5387 	if (wmi_service_enabled(wh->wmi_handle,
5388 				wmi_service_ap_obss_detection_offload))
5389 		tgt_cfg->obss_detection_offloaded = true;
5390 	else
5391 		tgt_cfg->obss_detection_offloaded = false;
5392 }
5393 
5394 /**
5395  * wma_update_obss_color_collision_support() - update obss color collision
5396  *   offload support
5397  * @wh: wma handle
5398  * @tgt_cfg: target configuration to be updated
5399  *
5400  * Update obss color collision offload support based on service bit.
5401  *
5402  * Return: None
5403  */
5404 static void wma_update_obss_color_collision_support(tp_wma_handle wh,
5405 						    struct wma_tgt_cfg *tgt_cfg)
5406 {
5407 	if (wmi_service_enabled(wh->wmi_handle, wmi_service_bss_color_offload))
5408 		tgt_cfg->obss_color_collision_offloaded = true;
5409 	else
5410 		tgt_cfg->obss_color_collision_offloaded = false;
5411 }
5412 
5413 #ifdef WLAN_SUPPORT_GREEN_AP
5414 static void wma_green_ap_register_handlers(tp_wma_handle wma_handle)
5415 {
5416 	if (WMI_SERVICE_IS_ENABLED(wma_handle->wmi_service_bitmap,
5417 				   WMI_SERVICE_EGAP))
5418 		target_if_green_ap_register_egap_event_handler(
5419 					wma_handle->pdev);
5420 
5421 }
5422 #else
5423 static void wma_green_ap_register_handlers(tp_wma_handle wma_handle)
5424 {
5425 }
5426 #endif
5427 
5428 /**
5429  * wma_update_hdd_cfg() - update HDD config
5430  * @wma_handle: wma handle
5431  *
5432  * Return: none
5433  */
5434 static void wma_update_hdd_cfg(tp_wma_handle wma_handle)
5435 {
5436 	struct wma_tgt_cfg tgt_cfg;
5437 	void *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
5438 	target_resource_config *wlan_res_cfg;
5439 	struct wlan_psoc_host_service_ext_param *service_ext_param;
5440 	struct target_psoc_info *tgt_hdl;
5441 	struct wmi_unified *wmi_handle;
5442 
5443 	WMA_LOGD("%s: Enter", __func__);
5444 
5445 	tgt_hdl = wlan_psoc_get_tgt_if_handle(wma_handle->psoc);
5446 	if (!tgt_hdl) {
5447 		WMA_LOGE("%s: target psoc info is NULL", __func__);
5448 		return;
5449 	}
5450 
5451 	wlan_res_cfg = target_psoc_get_wlan_res_cfg(tgt_hdl);
5452 	if (!wlan_res_cfg) {
5453 		WMA_LOGE("%s: wlan_res_cfg is null", __func__);
5454 		return;
5455 	}
5456 	service_ext_param =
5457 			target_psoc_get_service_ext_param(tgt_hdl);
5458 	wmi_handle = get_wmi_unified_hdl_from_psoc(wma_handle->psoc);
5459 	if (!wmi_handle) {
5460 		WMA_LOGE("%s: wmi handle is NULL", __func__);
5461 		return;
5462 	}
5463 
5464 	qdf_mem_zero(&tgt_cfg, sizeof(struct wma_tgt_cfg));
5465 
5466 	tgt_cfg.sub_20_support = wma_handle->sub_20_support;
5467 	tgt_cfg.reg_domain = wma_handle->reg_cap.eeprom_rd;
5468 	tgt_cfg.eeprom_rd_ext = wma_handle->reg_cap.eeprom_rd_ext;
5469 
5470 	tgt_cfg.max_intf_count = wlan_res_cfg->num_vdevs;
5471 
5472 	qdf_mem_copy(tgt_cfg.hw_macaddr.bytes, wma_handle->hwaddr,
5473 		     ATH_MAC_LEN);
5474 
5475 	wma_update_target_services(wmi_handle, &tgt_cfg.services);
5476 	wma_update_target_ht_cap(tgt_hdl, &tgt_cfg.ht_cap);
5477 	wma_update_target_vht_cap(tgt_hdl, &tgt_cfg.vht_cap);
5478 	/*
5479 	 * This will overwrite the structure filled by wma_update_target_ht_cap
5480 	 * and wma_update_target_vht_cap APIs.
5481 	 */
5482 	wma_update_target_ext_ht_cap(tgt_hdl, &tgt_cfg.ht_cap);
5483 	wma_update_target_ext_vht_cap(tgt_hdl, &tgt_cfg.vht_cap);
5484 
5485 	wma_update_target_ext_he_cap(tgt_hdl, &tgt_cfg);
5486 
5487 	tgt_cfg.target_fw_version = target_if_get_fw_version(tgt_hdl);
5488 	if (service_ext_param)
5489 		tgt_cfg.target_fw_vers_ext =
5490 				service_ext_param->fw_build_vers_ext;
5491 
5492 	tgt_cfg.hw_bd_id = wma_handle->hw_bd_id;
5493 	tgt_cfg.hw_bd_info.bdf_version = wma_handle->hw_bd_info[BDF_VERSION];
5494 	tgt_cfg.hw_bd_info.ref_design_id =
5495 		wma_handle->hw_bd_info[REF_DESIGN_ID];
5496 	tgt_cfg.hw_bd_info.customer_id = wma_handle->hw_bd_info[CUSTOMER_ID];
5497 	tgt_cfg.hw_bd_info.project_id = wma_handle->hw_bd_info[PROJECT_ID];
5498 	tgt_cfg.hw_bd_info.board_data_rev =
5499 		wma_handle->hw_bd_info[BOARD_DATA_REV];
5500 
5501 #ifdef WLAN_FEATURE_LPSS
5502 	tgt_cfg.lpss_support = wma_handle->lpss_support;
5503 #endif /* WLAN_FEATURE_LPSS */
5504 	tgt_cfg.ap_arpns_support = wma_handle->ap_arpns_support;
5505 	tgt_cfg.bpf_enabled = wma_handle->bpf_enabled;
5506 	tgt_cfg.dfs_cac_offload = wma_handle->is_dfs_offloaded;
5507 	tgt_cfg.rcpi_enabled = wma_handle->rcpi_enabled;
5508 	wma_update_ra_rate_limit(wma_handle, &tgt_cfg);
5509 	wma_update_hdd_band_cap(target_if_get_phy_capability(tgt_hdl),
5510 				&tgt_cfg);
5511 	tgt_cfg.fine_time_measurement_cap =
5512 		target_if_get_wmi_fw_sub_feat_caps(tgt_hdl);
5513 	tgt_cfg.wmi_max_len = wmi_get_max_msg_len(wma_handle->wmi_handle)
5514 			      - WMI_TLV_HEADROOM;
5515 	tgt_cfg.tx_bfee_8ss_enabled = wma_handle->tx_bfee_8ss_enabled;
5516 	wma_update_obss_detection_support(wma_handle, &tgt_cfg);
5517 	wma_update_obss_color_collision_support(wma_handle, &tgt_cfg);
5518 	wma_update_hdd_cfg_ndp(wma_handle, &tgt_cfg);
5519 	wma_handle->tgt_cfg_update_cb(hdd_ctx, &tgt_cfg);
5520 	target_if_store_pdev_target_if_ctx(wma_get_pdev_from_scn_handle);
5521 	target_pdev_set_wmi_handle(wma_handle->pdev->tgt_if_handle,
5522 				   wma_handle->wmi_handle);
5523 	wma_green_ap_register_handlers(wma_handle);
5524 }
5525 
5526 /**
5527  * wma_dump_dbs_hw_mode() - Print the DBS HW modes
5528  * @wma_handle: WMA handle
5529  *
5530  * Prints the DBS HW modes sent by the FW as part
5531  * of WMI ready event
5532  *
5533  * Return: None
5534  */
5535 static void wma_dump_dbs_hw_mode(tp_wma_handle wma_handle)
5536 {
5537 	uint32_t i, param;
5538 
5539 	if (!wma_handle) {
5540 		WMA_LOGE("%s: Invalid WMA handle", __func__);
5541 		return;
5542 	}
5543 
5544 	for (i = 0; i < wma_handle->num_dbs_hw_modes; i++) {
5545 		param = wma_handle->hw_mode.hw_mode_list[i];
5546 		WMA_LOGD("%s:[%d]-MAC0: tx_ss:%d rx_ss:%d bw_idx:%d",
5547 			__func__, i,
5548 			WMA_HW_MODE_MAC0_TX_STREAMS_GET(param),
5549 			WMA_HW_MODE_MAC0_RX_STREAMS_GET(param),
5550 			WMA_HW_MODE_MAC0_BANDWIDTH_GET(param));
5551 		WMA_LOGD("%s:[%d]-MAC1: tx_ss:%d rx_ss:%d bw_idx:%d",
5552 			__func__, i,
5553 			WMA_HW_MODE_MAC1_TX_STREAMS_GET(param),
5554 			WMA_HW_MODE_MAC1_RX_STREAMS_GET(param),
5555 			WMA_HW_MODE_MAC1_BANDWIDTH_GET(param));
5556 		WMA_LOGD("%s:[%d] DBS:%d SBS:%d", __func__, i,
5557 			WMA_HW_MODE_DBS_MODE_GET(param),
5558 			WMA_HW_MODE_SBS_MODE_GET(param));
5559 	}
5560 	policy_mgr_dump_dbs_hw_mode(wma_handle->psoc);
5561 }
5562 
5563 /**
5564  * wma_init_scan_fw_mode_config() - Initialize scan/fw mode config
5565  * @psoc: Object manager psoc
5566  * @scan_config: Scam mode configuration
5567  * @fw_config: FW mode configuration
5568  *
5569  * Enables all the valid bits of concurrent_scan_config_bits and
5570  * fw_mode_config_bits.
5571  *
5572  * Return: None
5573  */
5574 static void wma_init_scan_fw_mode_config(struct wlan_objmgr_psoc *psoc,
5575 					 uint32_t scan_config,
5576 					 uint32_t fw_config)
5577 {
5578 	WMA_LOGD("%s: Enter", __func__);
5579 
5580 	if (!psoc) {
5581 		WMA_LOGE("%s: obj psoc is NULL", __func__);
5582 		return;
5583 	}
5584 
5585 	policy_mgr_init_dbs_config(psoc, scan_config, fw_config);
5586 }
5587 
5588 /**
5589  * wma_update_ra_limit() - update ra limit based on bpf filter
5590  *  enabled or not
5591  * @handle: wma handle
5592  *
5593  * Return: none
5594  */
5595 #ifdef FEATURE_WLAN_RA_FILTERING
5596 static void wma_update_ra_limit(tp_wma_handle wma_handle)
5597 {
5598 	if (wma_handle->bpf_enabled)
5599 		wma_handle->IsRArateLimitEnabled = false;
5600 }
5601 #else
5602 static void wma_update_ra__limit(tp_wma_handle handle)
5603 {
5604 }
5605 #endif
5606 
5607 static void wma_set_pmo_caps(struct wlan_objmgr_psoc *psoc)
5608 {
5609 	QDF_STATUS status;
5610 	struct pmo_device_caps caps;
5611 
5612 	caps.arp_ns_offload =
5613 		wma_is_service_enabled(wmi_service_arpns_offload);
5614 	caps.apf =
5615 		wma_is_service_enabled(wmi_service_bpf_offload);
5616 	caps.packet_filter =
5617 		wma_is_service_enabled(wmi_service_packet_filter_offload);
5618 	caps.unified_wow =
5619 		wma_is_service_enabled(wmi_service_unified_wow_capability);
5620 
5621 	status = ucfg_pmo_psoc_set_caps(psoc, &caps);
5622 	if (QDF_IS_STATUS_ERROR(status))
5623 		WMA_LOGE("Failed to set PMO capabilities; status:%d", status);
5624 }
5625 
5626 static void wma_set_component_caps(struct wlan_objmgr_psoc *psoc)
5627 {
5628 	wma_set_pmo_caps(psoc);
5629 }
5630 
5631 /**
5632  * wma_rx_service_ready_event() - event handler to process
5633  *                                wmi rx sevice ready event.
5634  * @handle: wma handle
5635  * @cmd_param_info: command params info
5636  *
5637  * Return: none
5638  */
5639 int wma_rx_service_ready_event(void *handle, uint8_t *cmd_param_info,
5640 			       uint32_t length)
5641 {
5642 	tp_wma_handle wma_handle = (tp_wma_handle) handle;
5643 	WMI_SERVICE_READY_EVENTID_param_tlvs *param_buf;
5644 	wmi_service_ready_event_fixed_param *ev;
5645 	QDF_STATUS status;
5646 	uint32_t *ev_wlan_dbs_hw_mode_list;
5647 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
5648 	struct target_psoc_info *tgt_hdl;
5649 	struct wlan_psoc_target_capability_info *tgt_cap_info;
5650 	target_resource_config *wlan_res_cfg;
5651 	struct wmi_unified *wmi_handle;
5652 	uint32_t *service_bitmap;
5653 
5654 	WMA_LOGD("%s: Enter", __func__);
5655 
5656 	if (!handle) {
5657 		WMA_LOGE("%s: wma_handle passed is NULL", __func__);
5658 		return -EINVAL;
5659 	}
5660 
5661 	tgt_hdl = wlan_psoc_get_tgt_if_handle(wma_handle->psoc);
5662 	if (!tgt_hdl) {
5663 		WMA_LOGE("%s: target psoc info is NULL", __func__);
5664 		return -EINVAL;
5665 	}
5666 
5667 	wlan_res_cfg = target_psoc_get_wlan_res_cfg(tgt_hdl);
5668 	tgt_cap_info = target_psoc_get_target_caps(tgt_hdl);
5669 	service_bitmap = target_psoc_get_service_bitmap(tgt_hdl);
5670 
5671 	param_buf = (WMI_SERVICE_READY_EVENTID_param_tlvs *) cmd_param_info;
5672 	if (!param_buf) {
5673 		WMA_LOGE("%s: Invalid arguments", __func__);
5674 		return -EINVAL;
5675 	}
5676 
5677 	ev = param_buf->fixed_param;
5678 	if (!ev) {
5679 		WMA_LOGE("%s: Invalid buffer", __func__);
5680 		return -EINVAL;
5681 	}
5682 
5683 	wmi_handle = get_wmi_unified_hdl_from_psoc(wma_handle->psoc);
5684 	if (!wmi_handle) {
5685 		WMA_LOGE("%s: wmi handle is NULL", __func__);
5686 		return -EINVAL;
5687 	}
5688 
5689 	WMA_LOGD("WMA <-- WMI_SERVICE_READY_EVENTID");
5690 
5691 	if (ev->num_dbs_hw_modes > param_buf->num_wlan_dbs_hw_mode_list) {
5692 		WMA_LOGE("FW dbs_hw_mode entry %d more than value %d in TLV hdr",
5693 			 ev->num_dbs_hw_modes,
5694 			 param_buf->num_wlan_dbs_hw_mode_list);
5695 		return -EINVAL;
5696 	}
5697 
5698 	wma_handle->num_dbs_hw_modes = ev->num_dbs_hw_modes;
5699 	ev_wlan_dbs_hw_mode_list = param_buf->wlan_dbs_hw_mode_list;
5700 	wma_handle->hw_mode.hw_mode_list =
5701 		qdf_mem_malloc(sizeof(*wma_handle->hw_mode.hw_mode_list) *
5702 				wma_handle->num_dbs_hw_modes);
5703 	if (!wma_handle->hw_mode.hw_mode_list) {
5704 		WMA_LOGE("%s: Memory allocation failed for DBS", __func__);
5705 		/* Continuing with the rest of the processing */
5706 	}
5707 
5708 	if (wma_handle->hw_mode.hw_mode_list)
5709 		qdf_mem_copy(wma_handle->hw_mode.hw_mode_list,
5710 			     ev_wlan_dbs_hw_mode_list,
5711 			     (sizeof(*wma_handle->hw_mode.hw_mode_list) *
5712 			      wma_handle->num_dbs_hw_modes));
5713 
5714 	policy_mgr_init_dbs_hw_mode(wma_handle->psoc,
5715 	ev->num_dbs_hw_modes, ev_wlan_dbs_hw_mode_list);
5716 	wma_dump_dbs_hw_mode(wma_handle);
5717 
5718 	/* Initializes the fw_mode and scan_config to zero.
5719 	 * If ext service ready event is present it will set
5720 	 * the actual values of these two params.
5721 	 * This is to ensure that no garbage values would be
5722 	 * present in the absence of ext service ready event.
5723 	 */
5724 	wma_init_scan_fw_mode_config(wma_handle->psoc, 0, 0);
5725 
5726 	qdf_mem_copy(&wma_handle->reg_cap, param_buf->hal_reg_capabilities,
5727 				 sizeof(HAL_REG_CAPABILITIES));
5728 
5729 	wma_handle->vht_supp_mcs = ev->vht_supp_mcs;
5730 
5731 	wma_handle->new_hw_mode_index = tgt_cap_info->default_dbs_hw_mode_index;
5732 	policy_mgr_update_new_hw_mode_index(wma_handle->psoc,
5733 	tgt_cap_info->default_dbs_hw_mode_index);
5734 
5735 	WMA_LOGD("%s: Firmware default hw mode index : %d",
5736 		 __func__, tgt_cap_info->default_dbs_hw_mode_index);
5737 	WMA_LOGI("%s: Firmware build version : %08x",
5738 		 __func__, ev->fw_build_vers);
5739 	WMA_LOGD("FW fine time meas cap: 0x%x",
5740 		 tgt_cap_info->wmi_fw_sub_feat_caps);
5741 
5742 	wma_handle->hw_bd_id = ev->hw_bd_id;
5743 
5744 	wma_handle->hw_bd_info[BDF_VERSION] =
5745 		WMI_GET_BDF_VERSION(ev->hw_bd_info);
5746 	wma_handle->hw_bd_info[REF_DESIGN_ID] =
5747 		WMI_GET_REF_DESIGN(ev->hw_bd_info);
5748 	wma_handle->hw_bd_info[CUSTOMER_ID] =
5749 		WMI_GET_CUSTOMER_ID(ev->hw_bd_info);
5750 	wma_handle->hw_bd_info[PROJECT_ID] =
5751 		WMI_GET_PROJECT_ID(ev->hw_bd_info);
5752 	wma_handle->hw_bd_info[BOARD_DATA_REV] =
5753 		WMI_GET_BOARD_DATA_REV(ev->hw_bd_info);
5754 
5755 	WMA_LOGI("%s: Board id: %x, Board version: %x %x %x %x %x",
5756 		 __func__, wma_handle->hw_bd_id,
5757 		 wma_handle->hw_bd_info[BDF_VERSION],
5758 		 wma_handle->hw_bd_info[REF_DESIGN_ID],
5759 		 wma_handle->hw_bd_info[CUSTOMER_ID],
5760 		 wma_handle->hw_bd_info[PROJECT_ID],
5761 		 wma_handle->hw_bd_info[BOARD_DATA_REV]);
5762 
5763 	/* wmi service is ready */
5764 	qdf_mem_copy(wma_handle->wmi_service_bitmap,
5765 		     service_bitmap,
5766 		     sizeof(wma_handle->wmi_service_bitmap));
5767 
5768 	cdp_cfg_tx_set_is_mgmt_over_wmi_enabled(soc,
5769 		wmi_service_enabled(wmi_handle, wmi_service_mgmt_tx_wmi));
5770 	cdp_set_desc_global_pool_size(soc, ev->num_msdu_desc);
5771 	/* SWBA event handler for beacon transmission */
5772 	status = wmi_unified_register_event_handler(wmi_handle,
5773 						    wmi_host_swba_event_id,
5774 						    wma_beacon_swba_handler,
5775 						    WMA_RX_SERIALIZER_CTX);
5776 	if (QDF_IS_STATUS_ERROR(status)) {
5777 		WMA_LOGE("Failed to register swba beacon event cb");
5778 		goto free_hw_mode_list;
5779 	}
5780 #ifdef WLAN_FEATURE_LPSS
5781 	wma_handle->lpss_support =
5782 		wmi_service_enabled(wmi_handle, wmi_service_lpass);
5783 #endif /* WLAN_FEATURE_LPSS */
5784 
5785 	/*
5786 	 * This Service bit is added to check for ARP/NS Offload
5787 	 * support for LL/HL targets
5788 	 */
5789 	wma_handle->ap_arpns_support =
5790 		wmi_service_enabled(wmi_handle, wmi_service_ap_arpns_offload);
5791 
5792 	wma_handle->bpf_enabled = (wma_handle->bpf_packet_filter_enable &&
5793 		wmi_service_enabled(wmi_handle, wmi_service_bpf_offload));
5794 	wma_update_ra_limit(wma_handle);
5795 
5796 	if (wmi_service_enabled(wmi_handle, wmi_service_csa_offload)) {
5797 		WMA_LOGD("%s: FW support CSA offload capability", __func__);
5798 		status = wmi_unified_register_event_handler(
5799 						wmi_handle,
5800 						wmi_csa_handling_event_id,
5801 						wma_csa_offload_handler,
5802 						WMA_RX_SERIALIZER_CTX);
5803 		if (QDF_IS_STATUS_ERROR(status)) {
5804 			WMA_LOGE("Failed to register CSA offload event cb");
5805 			goto free_hw_mode_list;
5806 		}
5807 	}
5808 
5809 	if (wmi_service_enabled(wmi_handle, wmi_service_mgmt_tx_wmi)) {
5810 		WMA_LOGD("Firmware supports management TX over WMI,use WMI interface instead of HTT for management Tx");
5811 		/*
5812 		 * Register Tx completion event handler for MGMT Tx over WMI
5813 		 * case
5814 		 */
5815 		status = wmi_unified_register_event_handler(
5816 					wmi_handle,
5817 					wmi_mgmt_tx_completion_event_id,
5818 					wma_mgmt_tx_completion_handler,
5819 					WMA_RX_SERIALIZER_CTX);
5820 		if (QDF_IS_STATUS_ERROR(status)) {
5821 			WMA_LOGE("Failed to register MGMT over WMI completion handler");
5822 			goto free_hw_mode_list;
5823 		}
5824 
5825 		status = wmi_unified_register_event_handler(
5826 				wmi_handle,
5827 				wmi_mgmt_tx_bundle_completion_event_id,
5828 				wma_mgmt_tx_bundle_completion_handler,
5829 				WMA_RX_SERIALIZER_CTX);
5830 		if (QDF_IS_STATUS_ERROR(status)) {
5831 			WMA_LOGE("Failed to register MGMT over WMI completion handler");
5832 			goto free_hw_mode_list;
5833 		}
5834 
5835 	} else {
5836 		WMA_LOGE("FW doesnot support WMI_SERVICE_MGMT_TX_WMI, Use HTT interface for Management Tx");
5837 	}
5838 
5839 #ifdef WLAN_FEATURE_GTK_OFFLOAD
5840 	if (wmi_service_enabled(wmi_handle, wmi_service_gtk_offload)) {
5841 		status = wmi_unified_register_event_handler(
5842 					wma_handle->wmi_handle,
5843 					wmi_gtk_offload_status_event_id,
5844 					target_if_pmo_gtk_offload_status_event,
5845 					WMA_RX_WORK_CTX);
5846 		if (QDF_IS_STATUS_ERROR(status)) {
5847 			WMA_LOGE("Failed to register GTK offload event cb");
5848 			goto free_hw_mode_list;
5849 		}
5850 	}
5851 #endif /* WLAN_FEATURE_GTK_OFFLOAD */
5852 
5853 	status = wmi_unified_register_event_handler(wmi_handle,
5854 				wmi_tbttoffset_update_event_id,
5855 				wma_tbttoffset_update_event_handler,
5856 				WMA_RX_SERIALIZER_CTX);
5857 	if (QDF_IS_STATUS_ERROR(status)) {
5858 		WMA_LOGE("Failed to register WMI_TBTTOFFSET_UPDATE_EVENTID callback");
5859 		goto free_hw_mode_list;
5860 	}
5861 
5862 	if (wmi_service_enabled(wma_handle->wmi_handle,
5863 				   wmi_service_rcpi_support)) {
5864 		/* register for rcpi response event */
5865 		status = wmi_unified_register_event_handler(
5866 							wmi_handle,
5867 							wmi_update_rcpi_event_id,
5868 							wma_rcpi_event_handler,
5869 							WMA_RX_SERIALIZER_CTX);
5870 		if (QDF_IS_STATUS_ERROR(status)) {
5871 			WMA_LOGE("Failed to register RCPI event handler");
5872 			goto free_hw_mode_list;
5873 		}
5874 		wma_handle->rcpi_enabled = true;
5875 	}
5876 
5877 	/* mac_id is replaced with pdev_id in converged firmware to have
5878 	 * multi-radio support. In order to maintain backward compatibility
5879 	 * with old fw, host needs to check WMI_SERVICE_DEPRECATED_REPLACE
5880 	 * in service bitmap from FW and host needs to set use_pdev_id in
5881 	 * wmi_resource_config to true. If WMI_SERVICE_DEPRECATED_REPLACE
5882 	 * service is not set, then host shall not expect MAC ID from FW in
5883 	 * VDEV START RESPONSE event and host shall use PDEV ID.
5884 	 */
5885 	if (wmi_service_enabled(wmi_handle, wmi_service_deprecated_replace))
5886 		wlan_res_cfg->use_pdev_id = true;
5887 	else
5888 		wlan_res_cfg->use_pdev_id = false;
5889 
5890 	wlan_res_cfg->max_num_dbs_scan_duty_cycle = CDS_DBS_SCAN_CLIENTS_MAX;
5891 
5892 	/* Initialize the log supported event handler */
5893 	status = wmi_unified_register_event_handler(wmi_handle,
5894 			wmi_diag_event_id_log_supported_event_id,
5895 			wma_log_supported_evt_handler,
5896 			WMA_RX_SERIALIZER_CTX);
5897 	if (QDF_IS_STATUS_ERROR(status)) {
5898 		WMA_LOGE("Failed to register log supported event cb");
5899 		goto free_hw_mode_list;
5900 	}
5901 
5902 	cdp_mark_first_wakeup_packet(soc,
5903 		wmi_service_enabled(wmi_handle,
5904 			wmi_service_mark_first_wakeup_packet));
5905 	wma_handle->is_dfs_offloaded =
5906 		wmi_service_enabled(wmi_handle,
5907 			wmi_service_dfs_phyerr_offload);
5908 	wma_handle->nan_datapath_enabled =
5909 		wmi_service_enabled(wma_handle->wmi_handle,
5910 			wmi_service_nan_data);
5911 
5912 	wma_set_component_caps(wma_handle->psoc);
5913 
5914 	wma_update_fw_config(wma_handle->psoc, tgt_hdl);
5915 
5916 	status = wmi_unified_save_fw_version_cmd(wmi_handle, param_buf);
5917 	if (QDF_IS_STATUS_ERROR(status)) {
5918 		WMA_LOGE("Failed to send WMI_INIT_CMDID command");
5919 		goto free_hw_mode_list;
5920 	}
5921 
5922 	if (wmi_service_enabled(wmi_handle, wmi_service_ext_msg)) {
5923 		status = qdf_mc_timer_start(
5924 				&wma_handle->service_ready_ext_timer,
5925 				WMA_SERVICE_READY_EXT_TIMEOUT);
5926 		if (QDF_IS_STATUS_ERROR(status))
5927 			WMA_LOGE("Failed to start the service ready ext timer");
5928 	}
5929 	wma_handle->tx_bfee_8ss_enabled =
5930 		wmi_service_enabled(wmi_handle, wmi_service_8ss_tx_bfee);
5931 
5932 	target_psoc_set_num_radios(tgt_hdl, 1);
5933 
5934 	return 0;
5935 
5936 free_hw_mode_list:
5937 	if (wma_handle->hw_mode.hw_mode_list) {
5938 		qdf_mem_free(wma_handle->hw_mode.hw_mode_list);
5939 		wma_handle->hw_mode.hw_mode_list = NULL;
5940 		WMA_LOGD("%s: DBS list is freed", __func__);
5941 	}
5942 
5943 	return -EINVAL;
5944 
5945 }
5946 
5947 /**
5948  * wma_get_phyid_for_given_band() - to get phyid for band
5949  *
5950  * @wma_handle: Pointer to wma handle
5951 *  @tgt_hdl: target psoc information
5952  * @band: enum value of for 2G or 5G band
5953  * @phyid: Pointer to phyid which needs to be filled
5954  *
5955  * This API looks in to the map to find out which particular phy supports
5956  * provided band and return the idx (also called phyid) of that phy. Caller
5957  * use this phyid to fetch various caps of that phy
5958  *
5959  * Return: QDF_STATUS
5960  */
5961 static QDF_STATUS wma_get_phyid_for_given_band(
5962 			tp_wma_handle wma_handle,
5963 			struct target_psoc_info *tgt_hdl,
5964 			enum cds_band_type band, uint8_t *phyid)
5965 {
5966 	uint8_t idx, i, num_radios;
5967 	struct wlan_psoc_host_mac_phy_caps *mac_phy_cap;
5968 
5969 	if (!wma_handle) {
5970 		WMA_LOGE("Invalid wma handle");
5971 		return QDF_STATUS_E_FAILURE;
5972 	}
5973 
5974 	idx = 0;
5975 	*phyid = idx;
5976 	num_radios = target_psoc_get_num_radios(tgt_hdl);
5977 	mac_phy_cap = target_psoc_get_mac_phy_cap(tgt_hdl);
5978 
5979 	for (i = 0; i < num_radios; i++) {
5980 		if ((band == CDS_BAND_2GHZ) &&
5981 		(WLAN_2G_CAPABILITY == mac_phy_cap[idx + i].supported_bands)) {
5982 			*phyid = idx + i;
5983 			WMA_LOGD("Select 2G capable phyid[%d]", *phyid);
5984 			return QDF_STATUS_SUCCESS;
5985 		} else if ((band == CDS_BAND_5GHZ) &&
5986 		(WLAN_5G_CAPABILITY == mac_phy_cap[idx + i].supported_bands)) {
5987 			*phyid = idx + i;
5988 			WMA_LOGD("Select 5G capable phyid[%d]", *phyid);
5989 			return QDF_STATUS_SUCCESS;
5990 		}
5991 	}
5992 	WMA_LOGD("Using default single hw mode phyid[%d]", *phyid);
5993 	return QDF_STATUS_SUCCESS;
5994 }
5995 
5996 /**
5997  * wma_get_caps_for_phyidx_hwmode() - to fetch caps for given hw mode and band
5998  * @caps_per_phy: Pointer to capabilities structure which needs to be filled
5999  * @hw_mode: Provided hardware mode
6000  * @band: Provide band i.e. 2G or 5G
6001  *
6002  * This API finds cap which suitable for provided hw mode and band. If user
6003  * is provides some invalid hw mode then it will automatically falls back to
6004  * default hw mode
6005  *
6006  * Return: QDF_STATUS
6007  */
6008 QDF_STATUS wma_get_caps_for_phyidx_hwmode(struct wma_caps_per_phy *caps_per_phy,
6009 		enum hw_mode_dbs_capab hw_mode, enum cds_band_type band)
6010 {
6011 	t_wma_handle *wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
6012 	struct target_psoc_info *tgt_hdl;
6013 	int ht_cap_info, vht_cap_info;
6014 	uint8_t phyid, our_hw_mode = hw_mode, num_hw_modes;
6015 	struct wlan_psoc_host_mac_phy_caps *mac_phy_cap;
6016 
6017 	if (!wma_handle) {
6018 		WMA_LOGE("Invalid wma handle");
6019 		return QDF_STATUS_E_FAILURE;
6020 	}
6021 
6022 	tgt_hdl = wlan_psoc_get_tgt_if_handle(wma_handle->psoc);
6023 	if (!tgt_hdl) {
6024 		WMA_LOGE("%s: target psoc info is NULL", __func__);
6025 		return -EINVAL;
6026 	}
6027 
6028 	ht_cap_info = target_if_get_ht_cap_info(tgt_hdl);
6029 	vht_cap_info = target_if_get_vht_cap_info(tgt_hdl);
6030 	num_hw_modes = target_psoc_get_num_hw_modes(tgt_hdl);
6031 	mac_phy_cap = target_psoc_get_mac_phy_cap(tgt_hdl);
6032 
6033 	if (!num_hw_modes) {
6034 		WMA_LOGD("Invalid number of hw modes, use legacy HT/VHT caps");
6035 		caps_per_phy->ht_2g = ht_cap_info;
6036 		caps_per_phy->ht_5g = ht_cap_info;
6037 		caps_per_phy->vht_2g = vht_cap_info;
6038 		caps_per_phy->vht_5g = vht_cap_info;
6039 		/* legacy platform doesn't support HE IE */
6040 		caps_per_phy->he_2g = 0;
6041 		caps_per_phy->he_5g = 0;
6042 
6043 		return QDF_STATUS_SUCCESS;
6044 	}
6045 
6046 	if (!policy_mgr_is_dbs_enable(wma_handle->psoc))
6047 		our_hw_mode = HW_MODE_DBS_NONE;
6048 
6049 	if (!caps_per_phy) {
6050 		WMA_LOGE("Invalid caps pointer");
6051 		return QDF_STATUS_E_FAILURE;
6052 	}
6053 
6054 	if (QDF_STATUS_SUCCESS !=
6055 		wma_get_phyid_for_given_band(wma_handle, tgt_hdl, band, &phyid)) {
6056 		WMA_LOGE("Invalid phyid");
6057 		return QDF_STATUS_E_FAILURE;
6058 	}
6059 
6060 	caps_per_phy->ht_2g = mac_phy_cap[phyid].ht_cap_info_2G;
6061 	caps_per_phy->ht_5g = mac_phy_cap[phyid].ht_cap_info_5G;
6062 	caps_per_phy->vht_2g = mac_phy_cap[phyid].vht_cap_info_2G;
6063 	caps_per_phy->vht_5g = mac_phy_cap[phyid].vht_cap_info_5G;
6064 	caps_per_phy->he_2g = mac_phy_cap[phyid].he_cap_info_2G;
6065 	caps_per_phy->he_5g = mac_phy_cap[phyid].he_cap_info_5G;
6066 
6067 	caps_per_phy->tx_chain_mask_2G = mac_phy_cap->tx_chain_mask_2G;
6068 	caps_per_phy->rx_chain_mask_2G = mac_phy_cap->rx_chain_mask_2G;
6069 	caps_per_phy->tx_chain_mask_5G = mac_phy_cap->tx_chain_mask_5G;
6070 	caps_per_phy->rx_chain_mask_5G = mac_phy_cap->rx_chain_mask_5G;
6071 
6072 	return QDF_STATUS_SUCCESS;
6073 }
6074 
6075 /**
6076  * wma_is_rx_ldpc_supported_for_channel() - to find out if ldpc is supported
6077  *
6078  * @channel: Channel number for which it needs to check if rx ldpc is enabled
6079  *
6080  * This API takes channel number as argument and takes default hw mode as DBS
6081  * to check if rx LDPC support is enabled for that channel or no
6082  */
6083 bool wma_is_rx_ldpc_supported_for_channel(uint32_t channel)
6084 {
6085 	t_wma_handle *wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
6086 	struct target_psoc_info *tgt_hdl;
6087 	struct wma_caps_per_phy caps_per_phy = {0};
6088 	enum cds_band_type band;
6089 	bool status;
6090 	uint8_t num_hw_modes;
6091 
6092 	if (!wma_handle) {
6093 		WMA_LOGE("Invalid wma handle");
6094 		return false;
6095 	}
6096 
6097 	tgt_hdl = wlan_psoc_get_tgt_if_handle(wma_handle->psoc);
6098 	if (!tgt_hdl) {
6099 		WMA_LOGE("Target handle is NULL");
6100 		return QDF_STATUS_E_FAILURE;
6101 	}
6102 
6103 	num_hw_modes = target_psoc_get_num_hw_modes(tgt_hdl);
6104 
6105 	if (!WLAN_REG_IS_24GHZ_CH(channel))
6106 		band = CDS_BAND_5GHZ;
6107 	else
6108 		band = CDS_BAND_2GHZ;
6109 
6110 	if (QDF_STATUS_SUCCESS != wma_get_caps_for_phyidx_hwmode(
6111 						&caps_per_phy,
6112 						HW_MODE_DBS, band)) {
6113 		return false;
6114 	}
6115 
6116 	/*
6117 	 * Legacy platforms like Rome set WMI_HT_CAP_LDPC to specify RX LDPC
6118 	 * capability. But new platforms like Helium set WMI_HT_CAP_RX_LDPC
6119 	 * instead.
6120 	 */
6121 	if (0 == num_hw_modes) {
6122 		status = (!!(caps_per_phy.ht_2g & WMI_HT_CAP_LDPC));
6123 	} else {
6124 		if (WLAN_REG_IS_24GHZ_CH(channel))
6125 			status = (!!(caps_per_phy.ht_2g & WMI_HT_CAP_RX_LDPC));
6126 		else
6127 			status = (!!(caps_per_phy.ht_5g & WMI_HT_CAP_RX_LDPC));
6128 	}
6129 
6130 	return status;
6131 }
6132 
6133 /**
6134  * wma_print_mac_phy_capabilities() - Prints MAC PHY capabilities
6135  * @cap: pointer to WMI_MAC_PHY_CAPABILITIES
6136  * @index: MAC_PHY index
6137  *
6138  * Return: none
6139  */
6140 static void wma_print_mac_phy_capabilities(struct wlan_psoc_host_mac_phy_caps
6141 					   *cap, int index)
6142 {
6143 	uint32_t mac_2G, mac_5G;
6144 	uint32_t phy_2G[WMI_MAX_HECAP_PHY_SIZE];
6145 	uint32_t phy_5G[WMI_MAX_HECAP_PHY_SIZE];
6146 	struct wlan_psoc_host_ppe_threshold ppet_2G, ppet_5G;
6147 
6148 	WMA_LOGI("\t: index [%d]", index);
6149 	WMA_LOGI("\t: cap for hw_mode_id[%d]", cap->hw_mode_id);
6150 	WMA_LOGI("\t: pdev_id[%d]", cap->pdev_id);
6151 	WMA_LOGI("\t: phy_id[%d]", cap->phy_id);
6152 	WMA_LOGI("\t: supports_11b[%d]", cap->supports_11b);
6153 	WMA_LOGI("\t: supports_11g[%d]", cap->supports_11g);
6154 	WMA_LOGI("\t: supports_11a[%d]", cap->supports_11a);
6155 	WMA_LOGI("\t: supports_11n[%d]", cap->supports_11n);
6156 	WMA_LOGI("\t: supports_11ac[%d]", cap->supports_11ac);
6157 	WMA_LOGI("\t: supports_11ax[%d]", cap->supports_11ax);
6158 	WMA_LOGI("\t: supported_bands[%d]", cap->supported_bands);
6159 	WMA_LOGI("\t: ampdu_density[%d]", cap->ampdu_density);
6160 	WMA_LOGI("\t: max_bw_supported_2G[%d]", cap->max_bw_supported_2G);
6161 	WMA_LOGI("\t: ht_cap_info_2G[%d]", cap->ht_cap_info_2G);
6162 	WMA_LOGI("\t: vht_cap_info_2G[%d]", cap->vht_cap_info_2G);
6163 	WMA_LOGI("\t: vht_supp_mcs_2G[%d]", cap->vht_supp_mcs_2G);
6164 	WMA_LOGI("\t: tx_chain_mask_2G[%d]", cap->tx_chain_mask_2G);
6165 	WMA_LOGI("\t: rx_chain_mask_2G[%d]", cap->rx_chain_mask_2G);
6166 	WMA_LOGI("\t: max_bw_supported_5G[%d]", cap->max_bw_supported_5G);
6167 	WMA_LOGI("\t: ht_cap_info_5G[%d]", cap->ht_cap_info_5G);
6168 	WMA_LOGI("\t: vht_cap_info_5G[%d]", cap->vht_cap_info_5G);
6169 	WMA_LOGI("\t: vht_supp_mcs_5G[%d]", cap->vht_supp_mcs_5G);
6170 	WMA_LOGI("\t: tx_chain_mask_5G[%d]", cap->tx_chain_mask_5G);
6171 	WMA_LOGI("\t: rx_chain_mask_5G[%d]", cap->rx_chain_mask_5G);
6172 	WMA_LOGI("\t: he_cap_info_2G[%08x]", cap->he_cap_info_2G);
6173 	WMA_LOGI("\t: he_supp_mcs_2G[%08x]", cap->he_supp_mcs_2G);
6174 	WMA_LOGI("\t: he_cap_info_5G[%08x]", cap->he_cap_info_5G);
6175 	WMA_LOGI("\t: he_supp_mcs_5G[%08x]", cap->he_supp_mcs_5G);
6176 	mac_2G = cap->he_cap_info_2G;
6177 	mac_5G = cap->he_cap_info_5G;
6178 	qdf_mem_copy(phy_2G, cap->he_cap_phy_info_2G,
6179 		     WMI_MAX_HECAP_PHY_SIZE * 4);
6180 	qdf_mem_copy(phy_5G, cap->he_cap_phy_info_5G,
6181 		     WMI_MAX_HECAP_PHY_SIZE * 4);
6182 	ppet_2G = cap->he_ppet2G;
6183 	ppet_5G = cap->he_ppet5G;
6184 
6185 	wma_print_he_mac_cap(mac_2G);
6186 	wma_print_he_phy_cap(phy_2G);
6187 	wma_print_he_ppet(&ppet_2G);
6188 	wma_print_he_mac_cap(mac_5G);
6189 	wma_print_he_phy_cap(phy_5G);
6190 	wma_print_he_ppet(&ppet_5G);
6191 }
6192 
6193 /**
6194  * wma_print_populate_soc_caps() - Prints all the caps populated per hw mode
6195  * @tgt_info: target related info
6196  *
6197  * This function prints all the caps populater per hw mode and per PHY
6198  *
6199  * Return: none
6200  */
6201 static void wma_print_populate_soc_caps(struct target_psoc_info *tgt_hdl)
6202 {
6203 	int i, num_hw_modes, total_mac_phy_cnt;
6204 	struct wlan_psoc_host_mac_phy_caps *mac_phy_cap, *tmp;
6205 
6206 	num_hw_modes = target_psoc_get_num_hw_modes(tgt_hdl);
6207 	total_mac_phy_cnt = target_psoc_get_total_mac_phy_cnt(tgt_hdl);
6208 
6209 	/* print number of hw modes */
6210 	WMA_LOGD("%s: num of hw modes [%d]", __func__, num_hw_modes);
6211 	WMA_LOGD("%s: num mac_phy_cnt [%d]", __func__, total_mac_phy_cnt);
6212 	mac_phy_cap = target_psoc_get_mac_phy_cap(tgt_hdl);
6213 	WMA_LOGD("%s: <====== HW mode cap printing starts ======>", __func__);
6214 	/* print cap of each hw mode */
6215 	for (i = 0; i < total_mac_phy_cnt; i++) {
6216 		WMA_LOGD("====>: hw mode id[%d], phy_id map[%d]",
6217 				mac_phy_cap[i].hw_mode_id,
6218 				mac_phy_cap[i].phy_id);
6219 		tmp = &mac_phy_cap[i];
6220 		wma_print_mac_phy_capabilities(tmp, i);
6221 	}
6222 	WMA_LOGI("%s: <====== HW mode cap printing ends ======>\n", __func__);
6223 }
6224 
6225 /**
6226  * wma_map_wmi_channel_width_to_hw_mode_bw() - returns bandwidth
6227  * in terms of hw_mode_bandwidth
6228  * @width: bandwidth in terms of wmi_channel_width
6229  *
6230  * This function returns the bandwidth in terms of hw_mode_bandwidth.
6231  *
6232  * Return: BW in terms of hw_mode_bandwidth.
6233  */
6234 static enum hw_mode_bandwidth wma_map_wmi_channel_width_to_hw_mode_bw(
6235 			wmi_channel_width width)
6236 {
6237 	switch (width) {
6238 	case WMI_CHAN_WIDTH_20:
6239 		return HW_MODE_20_MHZ;
6240 	case WMI_CHAN_WIDTH_40:
6241 		return HW_MODE_40_MHZ;
6242 	case WMI_CHAN_WIDTH_80:
6243 		return HW_MODE_80_MHZ;
6244 	case WMI_CHAN_WIDTH_160:
6245 		return HW_MODE_160_MHZ;
6246 	case WMI_CHAN_WIDTH_80P80:
6247 		return HW_MODE_80_PLUS_80_MHZ;
6248 	case WMI_CHAN_WIDTH_5:
6249 		return HW_MODE_5_MHZ;
6250 	case WMI_CHAN_WIDTH_10:
6251 		return HW_MODE_10_MHZ;
6252 	default:
6253 		return HW_MODE_BW_NONE;
6254 	}
6255 
6256 	return HW_MODE_BW_NONE;
6257 }
6258 
6259 /**
6260  * wma_get_hw_mode_params() - get TX-RX stream and bandwidth
6261  * supported from the capabilities.
6262  * @caps: PHY capability
6263  * @info: param to store TX-RX stream and BW information
6264  *
6265  * This function will calculate TX-RX stream and bandwidth supported
6266  * as per the PHY capability, and assign to mac_ss_bw_info.
6267  *
6268  * Return: none
6269  */
6270 static void wma_get_hw_mode_params(struct wlan_psoc_host_mac_phy_caps *caps,
6271 			struct mac_ss_bw_info *info)
6272 {
6273 	if (!caps) {
6274 		WMA_LOGE("%s: Invalid capabilities", __func__);
6275 		return;
6276 	}
6277 
6278 	info->mac_tx_stream = wma_get_num_of_setbits_from_bitmask(
6279 				QDF_MAX(caps->tx_chain_mask_2G,
6280 					caps->tx_chain_mask_5G));
6281 	info->mac_rx_stream = wma_get_num_of_setbits_from_bitmask(
6282 				QDF_MAX(caps->rx_chain_mask_2G,
6283 					caps->rx_chain_mask_5G));
6284 	info->mac_bw = wma_map_wmi_channel_width_to_hw_mode_bw(
6285 				QDF_MAX(caps->max_bw_supported_2G,
6286 					caps->max_bw_supported_5G));
6287 }
6288 
6289 /**
6290  * wma_set_hw_mode_params() - sets TX-RX stream, bandwidth and
6291  * DBS in hw_mode_list
6292  * @wma_handle: pointer to wma global structure
6293  * @mac0_ss_bw_info: TX-RX streams, BW for MAC0
6294  * @mac1_ss_bw_info: TX-RX streams, BW for MAC1
6295  * @pos: refers to hw_mode_index
6296  * @dbs_mode: dbs_mode for the dbs_hw_mode
6297  * @sbs_mode: sbs_mode for the sbs_hw_mode
6298  *
6299  * This function sets TX-RX stream, bandwidth and DBS mode in
6300  * hw_mode_list.
6301  *
6302  * Return: none
6303  */
6304 static void wma_set_hw_mode_params(t_wma_handle *wma_handle,
6305 			struct mac_ss_bw_info mac0_ss_bw_info,
6306 			struct mac_ss_bw_info mac1_ss_bw_info,
6307 			uint32_t pos, uint32_t dbs_mode,
6308 			uint32_t sbs_mode)
6309 {
6310 	WMA_HW_MODE_MAC0_TX_STREAMS_SET(
6311 		wma_handle->hw_mode.hw_mode_list[pos],
6312 		mac0_ss_bw_info.mac_tx_stream);
6313 	WMA_HW_MODE_MAC0_RX_STREAMS_SET(
6314 		wma_handle->hw_mode.hw_mode_list[pos],
6315 		mac0_ss_bw_info.mac_rx_stream);
6316 	WMA_HW_MODE_MAC0_BANDWIDTH_SET(
6317 		wma_handle->hw_mode.hw_mode_list[pos],
6318 		mac0_ss_bw_info.mac_bw);
6319 	WMA_HW_MODE_MAC1_TX_STREAMS_SET(
6320 		wma_handle->hw_mode.hw_mode_list[pos],
6321 		mac1_ss_bw_info.mac_tx_stream);
6322 	WMA_HW_MODE_MAC1_RX_STREAMS_SET(
6323 		wma_handle->hw_mode.hw_mode_list[pos],
6324 		mac1_ss_bw_info.mac_rx_stream);
6325 	WMA_HW_MODE_MAC1_BANDWIDTH_SET(
6326 		wma_handle->hw_mode.hw_mode_list[pos],
6327 		mac1_ss_bw_info.mac_bw);
6328 	WMA_HW_MODE_DBS_MODE_SET(
6329 		wma_handle->hw_mode.hw_mode_list[pos],
6330 		dbs_mode);
6331 	WMA_HW_MODE_AGILE_DFS_SET(
6332 		wma_handle->hw_mode.hw_mode_list[pos],
6333 		HW_MODE_AGILE_DFS_NONE);
6334 	WMA_HW_MODE_SBS_MODE_SET(
6335 		wma_handle->hw_mode.hw_mode_list[pos],
6336 		sbs_mode);
6337 }
6338 
6339 /**
6340  * wma_update_hw_mode_list() - updates hw_mode_list
6341  * @wma_handle: pointer to wma global structure
6342  * @tgt_hdl - target psoc information
6343  *
6344  * This function updates hw_mode_list with tx_streams, rx_streams,
6345  * bandwidth, dbs and agile dfs for each hw_mode.
6346  *
6347  * Returns: 0 for success else failure.
6348  */
6349 static QDF_STATUS wma_update_hw_mode_list(t_wma_handle *wma_handle,
6350 					  struct target_psoc_info *tgt_hdl)
6351 {
6352 	struct wlan_psoc_host_mac_phy_caps *tmp, *mac_phy_cap;
6353 	uint32_t i, hw_config_type, j = 0;
6354 	uint32_t dbs_mode, sbs_mode;
6355 	struct mac_ss_bw_info mac0_ss_bw_info = {0};
6356 	struct mac_ss_bw_info mac1_ss_bw_info = {0};
6357 	WMI_PHY_CAPABILITY new_supported_band = 0;
6358 	bool supported_band_update_failure = false;
6359 	struct wlan_psoc_target_capability_info *tgt_cap_info;
6360 	int num_hw_modes;
6361 
6362 	if (!wma_handle) {
6363 		WMA_LOGE("%s: Invalid wma handle", __func__);
6364 		return QDF_STATUS_E_FAILURE;
6365 	}
6366 
6367 	num_hw_modes = target_psoc_get_num_hw_modes(tgt_hdl);
6368 	mac_phy_cap = target_psoc_get_mac_phy_cap(tgt_hdl);
6369 	tgt_cap_info = target_psoc_get_target_caps(tgt_hdl);
6370 	/*
6371 	 * This list was updated as part of service ready event. Re-populate
6372 	 * HW mode list from the device capabilities.
6373 	 */
6374 	if (wma_handle->hw_mode.hw_mode_list) {
6375 		qdf_mem_free(wma_handle->hw_mode.hw_mode_list);
6376 		wma_handle->hw_mode.hw_mode_list = NULL;
6377 		WMA_LOGD("%s: DBS list is freed", __func__);
6378 	}
6379 
6380 	wma_handle->hw_mode.hw_mode_list =
6381 		qdf_mem_malloc(sizeof(*wma_handle->hw_mode.hw_mode_list) *
6382 			       num_hw_modes);
6383 	if (!wma_handle->hw_mode.hw_mode_list) {
6384 		WMA_LOGE("%s: Memory allocation failed for DBS", __func__);
6385 		return QDF_STATUS_E_FAILURE;
6386 	}
6387 
6388 	WMA_LOGD("%s: Updated HW mode list: Num modes:%d",
6389 		 __func__, num_hw_modes);
6390 
6391 	for (i = 0; i < num_hw_modes; i++) {
6392 		/* Update for MAC0 */
6393 		tmp = &mac_phy_cap[j++];
6394 		wma_get_hw_mode_params(tmp, &mac0_ss_bw_info);
6395 		hw_config_type = mac_phy_cap[j].hw_mode_config_type;
6396 		dbs_mode = HW_MODE_DBS_NONE;
6397 		sbs_mode = HW_MODE_SBS_NONE;
6398 		mac1_ss_bw_info.mac_tx_stream = 0;
6399 		mac1_ss_bw_info.mac_rx_stream = 0;
6400 		mac1_ss_bw_info.mac_bw = 0;
6401 		if (wma_update_supported_bands(tmp->supported_bands,
6402 						&new_supported_band)
6403 		   != QDF_STATUS_SUCCESS)
6404 			supported_band_update_failure = true;
6405 
6406 		/* SBS and DBS have dual MAC. Upto 2 MACs are considered. */
6407 		if ((hw_config_type == WMI_HW_MODE_DBS) ||
6408 		    (hw_config_type == WMI_HW_MODE_SBS_PASSIVE) ||
6409 		    (hw_config_type == WMI_HW_MODE_SBS)) {
6410 			/* Update for MAC1 */
6411 			tmp = &mac_phy_cap[j++];
6412 			wma_get_hw_mode_params(tmp, &mac1_ss_bw_info);
6413 			if (hw_config_type == WMI_HW_MODE_DBS)
6414 				dbs_mode = HW_MODE_DBS;
6415 			if ((hw_config_type == WMI_HW_MODE_SBS_PASSIVE) ||
6416 			    (hw_config_type == WMI_HW_MODE_SBS))
6417 				sbs_mode = HW_MODE_SBS;
6418 			if (QDF_STATUS_SUCCESS !=
6419 			wma_update_supported_bands(tmp->supported_bands,
6420 						&new_supported_band))
6421 				supported_band_update_failure = true;
6422 		}
6423 
6424 		/* Updating HW mode list */
6425 		wma_set_hw_mode_params(wma_handle, mac0_ss_bw_info,
6426 				       mac1_ss_bw_info, i, dbs_mode,
6427 				       sbs_mode);
6428 	}
6429 
6430 	/* overwrite phy_capability which we got from service ready event */
6431 	if (!supported_band_update_failure) {
6432 		WMA_LOGD("%s: updating supported band from old[%d] to new[%d]",
6433 			 __func__, target_if_get_phy_capability(tgt_hdl),
6434 			 new_supported_band);
6435 		target_if_set_phy_capability(tgt_hdl, new_supported_band);
6436 	}
6437 
6438 	if (QDF_STATUS_SUCCESS !=
6439 			policy_mgr_update_hw_mode_list(wma_handle->psoc,
6440 						       tgt_hdl))
6441 		WMA_LOGE("%s: failed to update policy manager", __func__);
6442 	wma_dump_dbs_hw_mode(wma_handle);
6443 	return QDF_STATUS_SUCCESS;
6444 }
6445 
6446 static void wma_init_wifi_pos_dma_rings(t_wma_handle *wma_handle,
6447 					uint8_t num_mac, void *buf)
6448 {
6449 	struct hif_softc *hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
6450 	void *hal_soc;
6451 
6452 	if (!hif_ctx) {
6453 		WMA_LOGE("invalid hif context");
6454 		return;
6455 	}
6456 
6457 	hal_soc = hif_get_hal_handle(hif_ctx);
6458 
6459 	wifi_pos_init_cir_cfr_rings(wma_handle->psoc, hal_soc, num_mac, buf);
6460 }
6461 
6462 /**
6463  * wma_populate_soc_caps() - populate entire SOC's capabilities
6464  * @wma_handle: pointer to wma global structure
6465  * @tgt_hdl: target psoc information
6466  * @param_buf: pointer to param of service ready extension event from fw
6467  *
6468  * This API populates all capabilities of entire SOC. For example,
6469  * how many number of hw modes are supported by this SOC, what are the
6470  * capabilities of each phy per hw mode, what are HAL reg capabilities per
6471  * phy.
6472  *
6473  * Return: none
6474  */
6475 static void wma_populate_soc_caps(t_wma_handle *wma_handle,
6476 				  struct target_psoc_info *tgt_hdl,
6477 			WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *param_buf)
6478 {
6479 
6480 	WMA_LOGD("%s: Enter", __func__);
6481 
6482 	wma_init_wifi_pos_dma_rings(wma_handle,
6483 				    param_buf->num_oem_dma_ring_caps,
6484 				    param_buf->oem_dma_ring_caps);
6485 
6486 	wma_print_populate_soc_caps(tgt_hdl);
6487 }
6488 
6489 /**
6490  * wma_rx_service_ready_ext_event() - evt handler for sevice ready ext event.
6491  * @handle: wma handle
6492  * @event: params of the service ready extended event
6493  * @length: param length
6494  *
6495  * Return: none
6496  */
6497 int wma_rx_service_ready_ext_event(void *handle, uint8_t *event,
6498 					uint32_t length)
6499 {
6500 	tp_wma_handle wma_handle = (tp_wma_handle) handle;
6501 	WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *param_buf;
6502 	wmi_service_ready_ext_event_fixed_param *ev;
6503 	QDF_STATUS ret;
6504 	struct target_psoc_info *tgt_hdl;
6505 	uint32_t conc_scan_config_bits, fw_config_bits;
6506 
6507 	WMA_LOGD("%s: Enter", __func__);
6508 
6509 	if (!wma_handle) {
6510 		WMA_LOGE("%s: Invalid WMA handle", __func__);
6511 		return -EINVAL;
6512 	}
6513 
6514 	tgt_hdl = wlan_psoc_get_tgt_if_handle(wma_handle->psoc);
6515 	if (!tgt_hdl) {
6516 		WMA_LOGE("%s: target psoc info is NULL", __func__);
6517 		return -EINVAL;
6518 	}
6519 
6520 	param_buf = (WMI_SERVICE_READY_EXT_EVENTID_param_tlvs *) event;
6521 	if (!param_buf) {
6522 		WMA_LOGE("%s: Invalid event", __func__);
6523 		return -EINVAL;
6524 	}
6525 
6526 	ev = param_buf->fixed_param;
6527 	if (!ev) {
6528 		WMA_LOGE("%s: Invalid buffer", __func__);
6529 		return -EINVAL;
6530 	}
6531 
6532 	WMA_LOGD("WMA <-- WMI_SERVICE_READY_EXT_EVENTID");
6533 
6534 	fw_config_bits = target_if_get_fw_config_bits(tgt_hdl);
6535 	conc_scan_config_bits = target_if_get_conc_scan_config_bits(tgt_hdl);
6536 
6537 	WMA_LOGD("%s: Defaults: scan config:%x FW mode config:%x",
6538 		__func__, conc_scan_config_bits, fw_config_bits);
6539 
6540 	ret = qdf_mc_timer_stop(&wma_handle->service_ready_ext_timer);
6541 	if (!QDF_IS_STATUS_SUCCESS(ret)) {
6542 		WMA_LOGE("Failed to stop the service ready ext timer");
6543 		return -EINVAL;
6544 	}
6545 	wma_populate_soc_caps(wma_handle, tgt_hdl, param_buf);
6546 
6547 	ret = wma_update_hw_mode_list(wma_handle, tgt_hdl);
6548 	if (QDF_IS_STATUS_ERROR(ret)) {
6549 		WMA_LOGE("Failed to update hw mode list");
6550 		return -EINVAL;
6551 	}
6552 
6553 	WMA_LOGD("WMA --> WMI_INIT_CMDID");
6554 
6555 	wma_init_scan_fw_mode_config(wma_handle->psoc, conc_scan_config_bits,
6556 				     fw_config_bits);
6557 
6558 	target_psoc_set_num_radios(tgt_hdl, 1);
6559 	return 0;
6560 }
6561 
6562 /**
6563  * wma_rx_ready_event() - event handler to process
6564  *                        wmi rx ready event.
6565  * @handle: wma handle
6566  * @cmd_param_info: command params info
6567  * @length: param length
6568  *
6569  * Return: none
6570  */
6571 int wma_rx_ready_event(void *handle, uint8_t *cmd_param_info,
6572 					uint32_t length)
6573 {
6574 	tp_wma_handle wma_handle = (tp_wma_handle) handle;
6575 	WMI_READY_EVENTID_param_tlvs *param_buf = NULL;
6576 	wmi_ready_event_fixed_param *ev = NULL;
6577 
6578 	WMA_LOGD("%s: Enter", __func__);
6579 
6580 	param_buf = (WMI_READY_EVENTID_param_tlvs *) cmd_param_info;
6581 	if (!(wma_handle && param_buf)) {
6582 		WMA_LOGE("%s: Invalid arguments", __func__);
6583 		QDF_ASSERT(0);
6584 		return -EINVAL;
6585 	}
6586 
6587 	WMA_LOGD("WMA <-- WMI_READY_EVENTID");
6588 
6589 	ev = param_buf->fixed_param;
6590 	/* Indicate to the waiting thread that the ready
6591 	 * event was received
6592 	 */
6593 	wma_handle->sub_20_support =
6594 		wmi_service_enabled(wma_handle->wmi_handle,
6595 				wmi_service_half_rate_quarter_rate_support);
6596 	wma_handle->wmi_ready = true;
6597 	wma_handle->wlan_init_status = ev->status;
6598 
6599 	/* copy the mac addr */
6600 	WMI_MAC_ADDR_TO_CHAR_ARRAY(&ev->mac_addr, wma_handle->myaddr);
6601 	WMI_MAC_ADDR_TO_CHAR_ARRAY(&ev->mac_addr, wma_handle->hwaddr);
6602 	wma_update_hdd_cfg(wma_handle);
6603 	WMA_LOGD("Exit");
6604 
6605 	return 0;
6606 }
6607 
6608 /**
6609  * wma_setneedshutdown() - setting wma needshutdown flag
6610  *
6611  * Return: none
6612  */
6613 void wma_setneedshutdown(void)
6614 {
6615 	tp_wma_handle wma_handle;
6616 
6617 	WMA_LOGD("%s: Enter", __func__);
6618 
6619 	wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
6620 
6621 	if (NULL == wma_handle) {
6622 		WMA_LOGE("%s: Invalid arguments", __func__);
6623 		QDF_ASSERT(0);
6624 		return;
6625 	}
6626 
6627 	wma_handle->needShutdown = true;
6628 	WMA_LOGD("%s: Exit", __func__);
6629 }
6630 
6631 /**
6632  * wma_needshutdown() - Is wma needs shutdown?
6633  *
6634  * Return: returns true/false
6635  */
6636 bool wma_needshutdown(void)
6637 {
6638 	tp_wma_handle wma_handle;
6639 
6640 	WMA_LOGD("%s: Enter", __func__);
6641 
6642 	wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
6643 
6644 	if (NULL == wma_handle) {
6645 		WMA_LOGE("%s: Invalid arguments", __func__);
6646 		QDF_ASSERT(0);
6647 		return false;
6648 	}
6649 
6650 	WMA_LOGD("%s: Exit", __func__);
6651 	return wma_handle->needShutdown;
6652 }
6653 
6654 /**
6655  * wma_wait_for_ready_event() - wait for wma ready event
6656  * @handle: wma handle
6657  *
6658  * Return: 0 for success or QDF error
6659  */
6660 QDF_STATUS wma_wait_for_ready_event(WMA_HANDLE handle)
6661 {
6662 	tp_wma_handle wma_handle = (tp_wma_handle) handle;
6663 	QDF_STATUS qdf_status;
6664 	struct target_psoc_info *tgt_hdl;
6665 	int timeleft;
6666 
6667 	tgt_hdl = wlan_psoc_get_tgt_if_handle(wma_handle->psoc);
6668 	if (!tgt_hdl) {
6669 		WMA_LOGE("%s: target psoc info is NULL", __func__);
6670 		return QDF_STATUS_E_INVAL;
6671 	}
6672 
6673 	timeleft = qdf_wait_queue_timeout(
6674 			tgt_hdl->info.event_queue,
6675 			((tgt_hdl->info.wmi_service_ready) &&
6676 			(tgt_hdl->info.wmi_ready)),
6677 			WMA_READY_EVENTID_TIMEOUT);
6678 	if (!timeleft) {
6679 		WMA_LOGE("%s: Timeout waiting for ready event from FW",
6680 			 __func__);
6681 		qdf_status = QDF_STATUS_E_FAILURE;
6682 	} else {
6683 		WMA_LOGI("%s Ready event received from FW", __func__);
6684 		qdf_status = QDF_STATUS_SUCCESS;
6685 	}
6686 
6687 	return qdf_status;
6688 }
6689 
6690 /**
6691  * wma_set_ppsconfig() - set pps config in fw
6692  * @vdev_id: vdev id
6693  * @pps_param: pps params
6694  * @val : param value
6695  *
6696  * Return: 0 for success or QDF error
6697  */
6698 QDF_STATUS wma_set_ppsconfig(uint8_t vdev_id, uint16_t pps_param,
6699 				    int val)
6700 {
6701 	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
6702 	int ret = -EIO;
6703 	uint32_t pps_val;
6704 
6705 	if (NULL == wma) {
6706 		WMA_LOGE("%s: Failed to get wma", __func__);
6707 		return QDF_STATUS_E_INVAL;
6708 	}
6709 
6710 	switch (pps_param) {
6711 	case WMA_VHT_PPS_PAID_MATCH:
6712 		pps_val = ((val << 31) & 0xffff0000) |
6713 			  (PKT_PWR_SAVE_PAID_MATCH & 0xffff);
6714 		goto pkt_pwr_save_config;
6715 	case WMA_VHT_PPS_GID_MATCH:
6716 		pps_val = ((val << 31) & 0xffff0000) |
6717 			  (PKT_PWR_SAVE_GID_MATCH & 0xffff);
6718 		goto pkt_pwr_save_config;
6719 	case WMA_VHT_PPS_DELIM_CRC_FAIL:
6720 		pps_val = ((val << 31) & 0xffff0000) |
6721 			  (PKT_PWR_SAVE_DELIM_CRC_FAIL & 0xffff);
6722 		goto pkt_pwr_save_config;
6723 
6724 		/* Enable the code below as and when the functionality
6725 		 * is supported/added in host.
6726 		 */
6727 #ifdef NOT_YET
6728 	case WMA_VHT_PPS_EARLY_TIM_CLEAR:
6729 		pps_val = ((val << 31) & 0xffff0000) |
6730 			  (PKT_PWR_SAVE_EARLY_TIM_CLEAR & 0xffff);
6731 		goto pkt_pwr_save_config;
6732 	case WMA_VHT_PPS_EARLY_DTIM_CLEAR:
6733 		pps_val = ((val << 31) & 0xffff0000) |
6734 			  (PKT_PWR_SAVE_EARLY_DTIM_CLEAR & 0xffff);
6735 		goto pkt_pwr_save_config;
6736 	case WMA_VHT_PPS_EOF_PAD_DELIM:
6737 		pps_val = ((val << 31) & 0xffff0000) |
6738 			  (PKT_PWR_SAVE_EOF_PAD_DELIM & 0xffff);
6739 		goto pkt_pwr_save_config;
6740 	case WMA_VHT_PPS_MACADDR_MISMATCH:
6741 		pps_val = ((val << 31) & 0xffff0000) |
6742 			  (PKT_PWR_SAVE_MACADDR_MISMATCH & 0xffff);
6743 		goto pkt_pwr_save_config;
6744 	case WMA_VHT_PPS_GID_NSTS_ZERO:
6745 		pps_val = ((val << 31) & 0xffff0000) |
6746 			  (PKT_PWR_SAVE_GID_NSTS_ZERO & 0xffff);
6747 		goto pkt_pwr_save_config;
6748 	case WMA_VHT_PPS_RSSI_CHECK:
6749 		pps_val = ((val << 31) & 0xffff0000) |
6750 			  (PKT_PWR_SAVE_RSSI_CHECK & 0xffff);
6751 		goto pkt_pwr_save_config;
6752 #endif /* NOT_YET */
6753 pkt_pwr_save_config:
6754 		WMA_LOGD("vdev_id:%d val:0x%x pps_val:0x%x", vdev_id,
6755 			 val, pps_val);
6756 		ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
6757 					      WMI_VDEV_PARAM_PACKET_POWERSAVE,
6758 					      pps_val);
6759 		break;
6760 	default:
6761 		WMA_LOGE("%s:INVALID PPS CONFIG", __func__);
6762 	}
6763 
6764 	return (ret) ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
6765 }
6766 
6767 /**
6768  * wma_process_set_mas() - Function to enable/disable MAS
6769  * @wma:	Pointer to WMA handle
6770  * @mas_val:	1-Enable MAS, 0-Disable MAS
6771  *
6772  * This function enables/disables the MAS value
6773  *
6774  * Return: QDF_SUCCESS for success otherwise failure
6775  */
6776 static QDF_STATUS wma_process_set_mas(tp_wma_handle wma,
6777 				      uint32_t *mas_val)
6778 {
6779 	uint32_t val;
6780 
6781 	if (NULL == wma || NULL == mas_val) {
6782 		WMA_LOGE("%s: Invalid input to enable/disable MAS", __func__);
6783 		return QDF_STATUS_E_FAILURE;
6784 	}
6785 
6786 	val = (*mas_val);
6787 
6788 	if (QDF_STATUS_SUCCESS !=
6789 			wma_set_enable_disable_mcc_adaptive_scheduler(val)) {
6790 		WMA_LOGE("%s: Unable to enable/disable MAS", __func__);
6791 		return QDF_STATUS_E_FAILURE;
6792 	}
6793 	WMA_LOGE("%s: Value is %d", __func__, val);
6794 	return QDF_STATUS_SUCCESS;
6795 }
6796 
6797 /**
6798  * wma_process_set_miracast() - Function to set miracast value in WMA
6799  * @wma:		Pointer to WMA handle
6800  * @miracast_val:	0-Disabled,1-Source,2-Sink
6801  *
6802  * This function stores the miracast value in WMA
6803  *
6804  * Return: QDF_SUCCESS for success otherwise failure
6805  *
6806  */
6807 static QDF_STATUS wma_process_set_miracast(tp_wma_handle wma,
6808 					   uint32_t *miracast_val)
6809 {
6810 	if (NULL == wma || NULL == miracast_val) {
6811 		WMA_LOGE("%s: Invalid input to store miracast value", __func__);
6812 		return QDF_STATUS_E_FAILURE;
6813 	}
6814 
6815 	wma->miracast_value = *miracast_val;
6816 	WMA_LOGE("%s: Miracast value is %d", __func__, wma->miracast_value);
6817 
6818 	return QDF_STATUS_SUCCESS;
6819 }
6820 
6821 /**
6822  * wma_config_stats_factor() - Function to configure stats avg. factor
6823  * @wma:  pointer to WMA handle
6824  * @avg_factor:	stats. avg. factor passed down by userspace
6825  *
6826  * This function configures the avg. stats value in firmware
6827  *
6828  * Return: QDF_STATUS_SUCCESS for success otherwise failure
6829  *
6830  */
6831 static QDF_STATUS wma_config_stats_factor(tp_wma_handle wma,
6832 				      struct sir_stats_avg_factor *avg_factor)
6833 {
6834 	QDF_STATUS ret;
6835 
6836 	if (NULL == wma || NULL == avg_factor) {
6837 		WMA_LOGE("%s: Invalid input of stats avg factor", __func__);
6838 		return QDF_STATUS_E_FAILURE;
6839 	}
6840 
6841 	ret = wma_vdev_set_param(wma->wmi_handle,
6842 					    avg_factor->vdev_id,
6843 					    WMI_VDEV_PARAM_STATS_AVG_FACTOR,
6844 					    avg_factor->stats_avg_factor);
6845 	if (QDF_IS_STATUS_ERROR(ret)) {
6846 		WMA_LOGE(" failed to set avg_factor for vdev_id %d",
6847 			 avg_factor->vdev_id);
6848 	}
6849 
6850 	WMA_LOGD("%s: Set stats_avg_factor %d for vdev_id %d", __func__,
6851 		 avg_factor->stats_avg_factor, avg_factor->vdev_id);
6852 
6853 	return ret;
6854 }
6855 
6856 /**
6857  * wma_config_guard_time() - Function to set guard time in firmware
6858  * @wma:  pointer to WMA handle
6859  * @guard_time:  guard time passed down by userspace
6860  *
6861  * This function configures the guard time in firmware
6862  *
6863  * Return: QDF_STATUS_SUCCESS for success otherwise failure
6864  *
6865  */
6866 static QDF_STATUS wma_config_guard_time(tp_wma_handle wma,
6867 				   struct sir_guard_time_request *guard_time)
6868 {
6869 	QDF_STATUS ret;
6870 
6871 	if (NULL == wma || NULL == guard_time) {
6872 		WMA_LOGE("%s: Invalid input of guard time", __func__);
6873 		return QDF_STATUS_E_FAILURE;
6874 	}
6875 
6876 	ret = wma_vdev_set_param(wma->wmi_handle,
6877 					      guard_time->vdev_id,
6878 					      WMI_VDEV_PARAM_RX_LEAK_WINDOW,
6879 					      guard_time->guard_time);
6880 	if (QDF_IS_STATUS_ERROR(ret)) {
6881 		WMA_LOGE(" failed to set guard time for vdev_id %d",
6882 			 guard_time->vdev_id);
6883 	}
6884 
6885 	WMA_LOGD("Set guard time %d for vdev_id %d",
6886 		 guard_time->guard_time, guard_time->vdev_id);
6887 
6888 	return ret;
6889 }
6890 
6891 /**
6892  * wma_enable_specific_fw_logs() - Start/Stop logging of diag event/log id
6893  * @wma_handle: WMA handle
6894  * @start_log: Start logging related parameters
6895  *
6896  * Send the command to the FW based on which specific logging of diag
6897  * event/log id can be started/stopped
6898  *
6899  * Return: None
6900  */
6901 static void wma_enable_specific_fw_logs(tp_wma_handle wma_handle,
6902 					struct sir_wifi_start_log *start_log)
6903 {
6904 
6905 	if (!start_log) {
6906 		WMA_LOGE("%s: start_log pointer is NULL", __func__);
6907 		return;
6908 	}
6909 	if (!wma_handle) {
6910 		WMA_LOGE("%s: Invalid wma handle", __func__);
6911 		return;
6912 	}
6913 
6914 	if (!((start_log->ring_id == RING_ID_CONNECTIVITY) ||
6915 			(start_log->ring_id == RING_ID_FIRMWARE_DEBUG))) {
6916 		WMA_LOGD("%s: Not connectivity or fw debug ring: %d",
6917 				__func__, start_log->ring_id);
6918 		return;
6919 	}
6920 
6921 	wmi_unified_enable_specific_fw_logs_cmd(wma_handle->wmi_handle,
6922 				(struct wmi_wifi_start_log *)start_log);
6923 }
6924 
6925 #define MEGABYTE	(1024 * 1024)
6926 /**
6927  * wma_set_wifi_start_packet_stats() - Start/stop packet stats
6928  * @wma_handle: WMA handle
6929  * @start_log: Struture containing the start wifi logger params
6930  *
6931  * This function is used to send the WMA commands to start/stop logging
6932  * of per packet statistics
6933  *
6934  * Return: None
6935  *
6936  */
6937 #ifdef REMOVE_PKT_LOG
6938 static void wma_set_wifi_start_packet_stats(void *wma_handle,
6939 					struct sir_wifi_start_log *start_log)
6940 {
6941 }
6942 
6943 #else
6944 static void wma_set_wifi_start_packet_stats(void *wma_handle,
6945 					struct sir_wifi_start_log *start_log)
6946 {
6947 	struct hif_opaque_softc *scn;
6948 	uint32_t log_state;
6949 
6950 	if (!start_log) {
6951 		WMA_LOGE("%s: start_log pointer is NULL", __func__);
6952 		return;
6953 	}
6954 	if (!wma_handle) {
6955 		WMA_LOGE("%s: Invalid wma handle", __func__);
6956 		return;
6957 	}
6958 
6959 	/* No need to register for ring IDs other than packet stats */
6960 	if (start_log->ring_id != RING_ID_PER_PACKET_STATS) {
6961 		WMA_LOGD("%s: Ring id is not for per packet stats: %d",
6962 			__func__, start_log->ring_id);
6963 		return;
6964 	}
6965 
6966 	scn = cds_get_context(QDF_MODULE_ID_HIF);
6967 	if (scn == NULL) {
6968 		WMA_LOGE("%s: Invalid HIF handle", __func__);
6969 		return;
6970 	}
6971 
6972 #ifdef HELIUMPLUS
6973 	log_state = ATH_PKTLOG_ANI | ATH_PKTLOG_RCUPDATE | ATH_PKTLOG_RCFIND |
6974 		ATH_PKTLOG_RX | ATH_PKTLOG_TX |
6975 		ATH_PKTLOG_TEXT | ATH_PKTLOG_SW_EVENT;
6976 #else
6977 	log_state = ATH_PKTLOG_LITE_T2H | ATH_PKTLOG_LITE_RX;
6978 #endif
6979 	if (start_log->size != 0) {
6980 		pktlog_setsize(scn, start_log->size * MEGABYTE);
6981 		return;
6982 	} else if (start_log->is_pktlog_buff_clear == true) {
6983 		pktlog_clearbuff(scn, start_log->is_pktlog_buff_clear);
6984 		return;
6985 	}
6986 
6987 	if (start_log->verbose_level == WLAN_LOG_LEVEL_ACTIVE) {
6988 		pktlog_enable(scn, log_state, start_log->ini_triggered,
6989 			      start_log->user_triggered,
6990 			      start_log->is_iwpriv_command);
6991 		WMA_LOGD("%s: Enabling per packet stats", __func__);
6992 	} else {
6993 		pktlog_enable(scn, 0, start_log->ini_triggered,
6994 				start_log->user_triggered,
6995 				start_log->is_iwpriv_command);
6996 		WMA_LOGD("%s: Disabling per packet stats", __func__);
6997 	}
6998 }
6999 #endif
7000 
7001 /**
7002  * wma_send_flush_logs_to_fw() - Send log flush command to FW
7003  * @wma_handle: WMI handle
7004  *
7005  * This function is used to send the flush command to the FW,
7006  * that will flush the fw logs that are residue in the FW
7007  *
7008  * Return: None
7009  */
7010 void wma_send_flush_logs_to_fw(tp_wma_handle wma_handle)
7011 {
7012 	QDF_STATUS status;
7013 	int ret;
7014 
7015 	ret = wmi_unified_flush_logs_to_fw_cmd(wma_handle->wmi_handle);
7016 	if (ret != EOK)
7017 		return;
7018 
7019 	status = qdf_mc_timer_start(&wma_handle->log_completion_timer,
7020 			WMA_LOG_COMPLETION_TIMER);
7021 	if (status != QDF_STATUS_SUCCESS)
7022 		WMA_LOGE("Failed to start the log completion timer");
7023 }
7024 
7025 /**
7026  * wma_update_wep_default_key - To update default key id
7027  * @wma: pointer to wma handler
7028  * @update_def_key: pointer to wep_update_default_key_idx
7029  *
7030  * This function makes a copy of default key index to txrx node
7031  *
7032  * Return: Success
7033  */
7034 static QDF_STATUS wma_update_wep_default_key(tp_wma_handle wma,
7035 			struct wep_update_default_key_idx *update_def_key)
7036 {
7037 	struct wma_txrx_node *iface =
7038 		&wma->interfaces[update_def_key->session_id];
7039 	iface->wep_default_key_idx = update_def_key->default_idx;
7040 
7041 	return QDF_STATUS_SUCCESS;
7042 }
7043 
7044 /**
7045  * wma_update_tx_fail_cnt_th() - Set threshold for TX pkt fail
7046  * @wma_handle: WMA handle
7047  * @tx_fail_cnt_th: sme_tx_fail_cnt_threshold parameter
7048  *
7049  * This function is used to set Tx pkt fail count threshold,
7050  * FW will do disconnect with station once this threshold is reached.
7051  *
7052  * Return: VOS_STATUS_SUCCESS on success, error number otherwise
7053  */
7054 static QDF_STATUS wma_update_tx_fail_cnt_th(tp_wma_handle wma,
7055 			struct sme_tx_fail_cnt_threshold *tx_fail_cnt_th)
7056 {
7057 	u_int8_t vdev_id;
7058 	u_int32_t tx_fail_disconn_th;
7059 	int ret = -EIO;
7060 
7061 	if (!wma || !wma->wmi_handle) {
7062 		WMA_LOGE(FL("WMA is closed, can not issue Tx pkt fail count threshold"));
7063 		return QDF_STATUS_E_INVAL;
7064 	}
7065 	vdev_id = tx_fail_cnt_th->session_id;
7066 	tx_fail_disconn_th = tx_fail_cnt_th->tx_fail_cnt_threshold;
7067 	WMA_LOGD("Set TX pkt fail count threshold  vdevId %d count %d",
7068 			vdev_id, tx_fail_disconn_th);
7069 
7070 
7071 	ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
7072 			WMI_VDEV_PARAM_DISCONNECT_TH,
7073 			tx_fail_disconn_th);
7074 
7075 	if (ret) {
7076 		WMA_LOGE(FL("Failed to send TX pkt fail count threshold command"));
7077 		return QDF_STATUS_E_FAILURE;
7078 	}
7079 
7080 	return QDF_STATUS_SUCCESS;
7081 }
7082 
7083 /**
7084  * wma_update_short_retry_limit() - Set retry limit for short frames
7085  * @wma_handle: WMA handle
7086  * @short_retry_limit_th: retry limir count for Short frames.
7087  *
7088  * This function is used to configure the transmission retry limit at which
7089  * short frames needs to be retry.
7090  *
7091  * Return: VOS_STATUS_SUCCESS on success, error number otherwise
7092  */
7093 static QDF_STATUS wma_update_short_retry_limit(tp_wma_handle wma,
7094 		struct sme_short_retry_limit *short_retry_limit_th)
7095 {
7096 	uint8_t vdev_id;
7097 	uint32_t short_retry_limit;
7098 	int ret;
7099 
7100 	if (!wma || !wma->wmi_handle) {
7101 		WMA_LOGE("WMA is closed, can not issue short retry limit threshold");
7102 		return QDF_STATUS_E_INVAL;
7103 	}
7104 	vdev_id = short_retry_limit_th->session_id;
7105 	short_retry_limit = short_retry_limit_th->short_retry_limit;
7106 	WMA_LOGD("Set short retry limit threshold  vdevId %d count %d",
7107 		vdev_id, short_retry_limit);
7108 
7109 	ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
7110 		WMI_VDEV_PARAM_NON_AGG_SW_RETRY_TH,
7111 		short_retry_limit);
7112 
7113 	if (ret) {
7114 		WMA_LOGE("Failed to send short limit threshold command");
7115 		return QDF_STATUS_E_FAILURE;
7116 	}
7117 	return QDF_STATUS_SUCCESS;
7118 }
7119 
7120 /**
7121  * wma_update_long_retry_limit() - Set retry limit for long frames
7122  * @wma_handle: WMA handle
7123  * @long_retry_limit_th: retry limir count for long frames
7124  *
7125  * This function is used to configure the transmission retry limit at which
7126  * long frames needs to be retry
7127  *
7128  * Return: VOS_STATUS_SUCCESS on success, error number otherwise
7129  */
7130 static QDF_STATUS wma_update_long_retry_limit(tp_wma_handle wma,
7131 		struct sme_long_retry_limit  *long_retry_limit_th)
7132 {
7133 	uint8_t vdev_id;
7134 	uint32_t long_retry_limit;
7135 	int ret;
7136 
7137 	if (!wma || !wma->wmi_handle) {
7138 		WMA_LOGE("WMA is closed, can not issue long retry limit threshold");
7139 		return QDF_STATUS_E_INVAL;
7140 	}
7141 	vdev_id = long_retry_limit_th->session_id;
7142 	long_retry_limit = long_retry_limit_th->long_retry_limit;
7143 	WMA_LOGD("Set TX pkt fail count threshold  vdevId %d count %d",
7144 		vdev_id, long_retry_limit);
7145 
7146 	ret  = wma_vdev_set_param(wma->wmi_handle, vdev_id,
7147 			WMI_VDEV_PARAM_AGG_SW_RETRY_TH,
7148 			long_retry_limit);
7149 
7150 	if (ret) {
7151 		WMA_LOGE("Failed to send long limit threshold command");
7152 		return QDF_STATUS_E_FAILURE;
7153 	}
7154 
7155 	return QDF_STATUS_SUCCESS;
7156 }
7157 
7158 /*
7159  * wma_update_sta_inactivity_timeout() - Set sta_inactivity_timeout to fw
7160  * @wma_handle: WMA handle
7161  * @sta_inactivity_timer: sme_sta_inactivity_timeout
7162  *
7163  * This function is used to set sta_inactivity_timeout.
7164  * If a station does not send anything in sta_inactivity_timeout seconds, an
7165  * empty data frame is sent to it in order to verify whether it is
7166  * still in range. If this frame is not ACKed, the station will be
7167  * disassociated and then deauthenticated.
7168  *
7169  * Return: None
7170  */
7171 void wma_update_sta_inactivity_timeout(tp_wma_handle wma,
7172 		struct sme_sta_inactivity_timeout  *sta_inactivity_timer)
7173 {
7174 	uint8_t vdev_id;
7175 	uint32_t max_unresponsive_time;
7176 	uint32_t min_inactive_time, max_inactive_time;
7177 
7178 	if (!wma || !wma->wmi_handle) {
7179 		WMA_LOGE("WMA is closed, can not issue sta_inactivity_timeout");
7180 		return;
7181 	}
7182 	vdev_id = sta_inactivity_timer->session_id;
7183 	max_unresponsive_time = sta_inactivity_timer->sta_inactivity_timeout;
7184 	max_inactive_time = max_unresponsive_time * TWO_THIRD;
7185 	min_inactive_time = max_unresponsive_time - max_inactive_time;
7186 
7187 	if (wma_vdev_set_param(wma->wmi_handle, vdev_id,
7188 			WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
7189 			min_inactive_time))
7190 		WMA_LOGE("Failed to Set AP MIN IDLE INACTIVE TIME");
7191 
7192 	if (wma_vdev_set_param(wma->wmi_handle, vdev_id,
7193 			WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
7194 			max_inactive_time))
7195 		WMA_LOGE("Failed to Set AP MAX IDLE INACTIVE TIME");
7196 
7197 	if (wma_vdev_set_param(wma->wmi_handle, vdev_id,
7198 		WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
7199 		max_unresponsive_time))
7200 		WMA_LOGE("Failed to Set MAX UNRESPONSIVE TIME");
7201 
7202 	WMA_LOGD("%s:vdev_id:%d min_inactive_time: %u max_inactive_time: %u max_unresponsive_time: %u",
7203 			__func__, vdev_id,
7204 			min_inactive_time, max_inactive_time,
7205 			max_unresponsive_time);
7206 }
7207 
7208 #ifdef WLAN_FEATURE_WOW_PULSE
7209 
7210 
7211 #define WMI_WOW_HOSTWAKEUP_GPIO_CMD_FIXED_PARAM \
7212 WMI_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMD_fixed_param
7213 
7214 
7215 #define WMITLV_TAG_HOSTWAKEUP_GPIO_CMD_FIXED_PARAM \
7216 WMITLV_TAG_STRUC_wmi_wow_hostwakeup_gpio_pin_pattern_config_cmd_fixed_param
7217 
7218 /**
7219  * wma_send_wow_pulse_cmd() - send wmi cmd of wow pulse cmd
7220  * information to fw.
7221  * @wma_handle: wma handler
7222  * @udp_response: wow_pulse_mode pointer
7223  *
7224  * Return: Return QDF_STATUS
7225  */
7226 static QDF_STATUS wma_send_wow_pulse_cmd(tp_wma_handle wma_handle,
7227 					struct wow_pulse_mode *wow_pulse_cmd)
7228 {
7229 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7230 	wmi_buf_t buf;
7231 	WMI_WOW_HOSTWAKEUP_GPIO_CMD_FIXED_PARAM *cmd;
7232 	u_int16_t len;
7233 
7234 	len = sizeof(*cmd);
7235 	buf = wmi_buf_alloc(wma_handle->wmi_handle, len);
7236 	if (!buf) {
7237 		WMA_LOGE("wmi_buf_alloc failed");
7238 		return QDF_STATUS_E_NOMEM;
7239 	}
7240 
7241 	cmd = (WMI_WOW_HOSTWAKEUP_GPIO_CMD_FIXED_PARAM *)wmi_buf_data(buf);
7242 	qdf_mem_zero(cmd, len);
7243 
7244 	WMITLV_SET_HDR(&cmd->tlv_header,
7245 		WMITLV_TAG_HOSTWAKEUP_GPIO_CMD_FIXED_PARAM,
7246 		WMITLV_GET_STRUCT_TLVLEN(
7247 			WMI_WOW_HOSTWAKEUP_GPIO_CMD_FIXED_PARAM));
7248 
7249 	cmd->enable = wow_pulse_cmd->wow_pulse_enable;
7250 	cmd->pin = wow_pulse_cmd->wow_pulse_pin;
7251 	cmd->interval_low = wow_pulse_cmd->wow_pulse_interval_low;
7252 	cmd->interval_high = wow_pulse_cmd->wow_pulse_interval_high;
7253 	cmd->repeat_cnt = WMI_WOW_PULSE_REPEAT_CNT;
7254 
7255 	if (wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len,
7256 		WMI_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMDID)) {
7257 		WMA_LOGE("Failed to send send wow pulse");
7258 		wmi_buf_free(buf);
7259 		status = QDF_STATUS_E_FAILURE;
7260 	}
7261 
7262 	WMA_LOGD("%s: Exit", __func__);
7263 	return status;
7264 }
7265 
7266 #undef WMI_WOW_HOSTWAKEUP_GPIO_CMD_FIXED_PARAM
7267 #undef WMITLV_TAG_HOSTWAKEUP_GPIO_CMD_FIXED_PARAM
7268 #undef WMI_WOW_PULSE_REPEAT_CNT
7269 
7270 #else
7271 static inline QDF_STATUS wma_send_wow_pulse_cmd(tp_wma_handle wma_handle,
7272 					struct wow_pulse_mode *wow_pulse_cmd)
7273 {
7274 	return QDF_STATUS_E_FAILURE;
7275 }
7276 #endif
7277 
7278 
7279 /**
7280  * wma_process_power_debug_stats_req() - Process the Chip Power stats collect
7281  * request and pass the Power stats request to Fw
7282  * @wma_handle: WMA handle
7283  *
7284  * Return: QDF_STATUS
7285  */
7286 #ifdef WLAN_POWER_DEBUGFS
7287 static QDF_STATUS wma_process_power_debug_stats_req(tp_wma_handle wma_handle)
7288 {
7289 	wmi_pdev_get_chip_power_stats_cmd_fixed_param *cmd;
7290 	int32_t len;
7291 	wmi_buf_t buf;
7292 	uint8_t *buf_ptr;
7293 	int ret;
7294 
7295 	if (!wma_handle) {
7296 		WMA_LOGE("%s: input pointer is NULL", __func__);
7297 		return QDF_STATUS_E_FAILURE;
7298 	}
7299 
7300 	len = sizeof(*cmd);
7301 	buf = wmi_buf_alloc(wma_handle->wmi_handle, len);
7302 	if (!buf) {
7303 		WMA_LOGE("%s: Failed allocate wmi buffer", __func__);
7304 		return QDF_STATUS_E_NOMEM;
7305 	}
7306 
7307 	buf_ptr = (u_int8_t *) wmi_buf_data(buf);
7308 	cmd = (wmi_pdev_get_chip_power_stats_cmd_fixed_param *) buf_ptr;
7309 
7310 	WMITLV_SET_HDR(&cmd->tlv_header,
7311 		WMITLV_TAG_STRUC_wmi_get_chip_power_stats_cmd_fixed_param,
7312 		WMITLV_GET_STRUCT_TLVLEN(
7313 			wmi_pdev_get_chip_power_stats_cmd_fixed_param));
7314 	cmd->pdev_id = 0;
7315 
7316 	WMA_LOGD("POWER_DEBUG_STATS - Get Request Params; Pdev id - %d",
7317 			cmd->pdev_id);
7318 	ret = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len,
7319 			WMI_PDEV_GET_CHIP_POWER_STATS_CMDID);
7320 	if (ret) {
7321 		WMA_LOGE("%s: Failed to send power debug stats request",
7322 				__func__);
7323 		wmi_buf_free(buf);
7324 		return QDF_STATUS_E_FAILURE;
7325 	}
7326 	return QDF_STATUS_SUCCESS;
7327 }
7328 #else
7329 static QDF_STATUS wma_process_power_debug_stats_req(tp_wma_handle wma_handle)
7330 {
7331 	return QDF_STATUS_SUCCESS;
7332 }
7333 #endif
7334 
7335 /**
7336  * wma_set_arp_req_stats() - process set arp stats request command to fw
7337  * @wma_handle: WMA handle
7338  * @req_buf: set srp stats request buffer
7339  *
7340  * Return: None
7341  */
7342 static void wma_set_arp_req_stats(WMA_HANDLE handle,
7343 				  struct set_arp_stats_params *req_buf)
7344 {
7345 	int status;
7346 	struct set_arp_stats *arp_stats;
7347 	tp_wma_handle wma_handle = (tp_wma_handle) handle;
7348 
7349 	if (!wma_handle || !wma_handle->wmi_handle) {
7350 		WMA_LOGE("%s: WMA is closed, cannot send per roam config",
7351 			 __func__);
7352 		return;
7353 	}
7354 	if (!wma_is_vdev_valid(req_buf->vdev_id)) {
7355 		WMA_LOGE("vdev id not active or not valid");
7356 		return;
7357 	}
7358 
7359 	arp_stats = (struct set_arp_stats *)req_buf;
7360 	status = wmi_unified_set_arp_stats_req(wma_handle->wmi_handle,
7361 					       arp_stats);
7362 	if (status != EOK)
7363 		WMA_LOGE("%s: failed to set arp stats to FW",
7364 			 __func__);
7365 }
7366 
7367 /**
7368  * wma_get_arp_req_stats() - process get arp stats request command to fw
7369  * @wma_handle: WMA handle
7370  * @req_buf: get srp stats request buffer
7371  *
7372  * Return: None
7373  */
7374 static void wma_get_arp_req_stats(WMA_HANDLE handle,
7375 				  struct get_arp_stats_params *req_buf)
7376 {
7377 	int status;
7378 	struct get_arp_stats *arp_stats;
7379 	tp_wma_handle wma_handle = (tp_wma_handle) handle;
7380 
7381 	if (!wma_handle || !wma_handle->wmi_handle) {
7382 		WMA_LOGE("%s: WMA is closed, cannot send per roam config",
7383 			 __func__);
7384 		return;
7385 	}
7386 	if (!wma_is_vdev_valid(req_buf->vdev_id)) {
7387 		WMA_LOGE("vdev id not active or not valid");
7388 		return;
7389 	}
7390 
7391 	arp_stats = (struct get_arp_stats *)req_buf;
7392 	status = wmi_unified_get_arp_stats_req(wma_handle->wmi_handle,
7393 					       arp_stats);
7394 	if (status != EOK)
7395 		WMA_LOGE("%s: failed to send get arp stats to FW",
7396 			 __func__);
7397 }
7398 
7399 /**
7400  * wma_set_del_pmkid_cache() - API to set/delete PMKID cache entry in fw
7401  * @handle: WMA handle
7402  * @pmk_cache: PMK cache entry
7403  *
7404  * Return: None
7405  */
7406 static void wma_set_del_pmkid_cache(WMA_HANDLE handle,
7407 				    struct wmi_unified_pmk_cache *pmk_cache)
7408 {
7409 	int status;
7410 	tp_wma_handle wma_handle = (tp_wma_handle) handle;
7411 
7412 	if (!wma_handle || !wma_handle->wmi_handle) {
7413 		WMA_LOGE("WMA is closed, cannot send set del pmkid");
7414 		return;
7415 	}
7416 
7417 	status = wmi_unified_set_del_pmkid_cache(wma_handle->wmi_handle,
7418 						 pmk_cache);
7419 	if (status != EOK)
7420 		WMA_LOGE("failed to send set/del pmkid cmd to fw");
7421 }
7422 
7423 /**
7424  * wma_send_invoke_neighbor_report() - API to send invoke neighbor report
7425  * command to fw
7426  *
7427  * @handle: WMA handle
7428  * @params: Pointer to invoke neighbor report params
7429  *
7430  * Return: None
7431  */
7432 static
7433 void wma_send_invoke_neighbor_report(WMA_HANDLE handle,
7434 			struct wmi_invoke_neighbor_report_params *params)
7435 {
7436 	QDF_STATUS status;
7437 	tp_wma_handle wma_handle = (tp_wma_handle) handle;
7438 
7439 	if (!wma_handle || !wma_handle->wmi_handle) {
7440 		WMA_LOGE("WMA is closed, cannot send invoke neighbor report");
7441 		return;
7442 	}
7443 
7444 	status = wmi_unified_invoke_neighbor_report_cmd(wma_handle->wmi_handle,
7445 							params);
7446 
7447 	if (status != QDF_STATUS_SUCCESS)
7448 		WMA_LOGE("failed to send invoke neighbor report command");
7449 }
7450 
7451 QDF_STATUS wma_set_rx_reorder_timeout_val(tp_wma_handle wma_handle,
7452 	struct sir_set_rx_reorder_timeout_val *reorder_timeout)
7453 {
7454 	wmi_pdev_set_reorder_timeout_val_cmd_fixed_param *cmd;
7455 	uint32_t len;
7456 	wmi_buf_t buf;
7457 	int ret;
7458 
7459 	if (!reorder_timeout) {
7460 		WMA_LOGE(FL("invalid pointer"));
7461 		return QDF_STATUS_E_INVAL;
7462 	}
7463 
7464 	if (!wma_handle) {
7465 		WMA_LOGE(FL("WMA context is invald!"));
7466 		return QDF_STATUS_E_INVAL;
7467 	}
7468 	len = sizeof(*cmd);
7469 	buf = wmi_buf_alloc(wma_handle->wmi_handle, len);
7470 
7471 	if (!buf) {
7472 		WMA_LOGE(FL("Failed allocate wmi buffer"));
7473 		return QDF_STATUS_E_NOMEM;
7474 	}
7475 	cmd = (wmi_pdev_set_reorder_timeout_val_cmd_fixed_param *)
7476 		wmi_buf_data(buf);
7477 
7478 	WMITLV_SET_HDR(&cmd->tlv_header,
7479 	WMITLV_TAG_STRUC_wmi_pdev_set_reorder_timeout_val_cmd_fixed_param,
7480 	WMITLV_GET_STRUCT_TLVLEN(wmi_pdev_set_reorder_timeout_val_cmd_fixed_param));
7481 
7482 	memcpy(cmd->rx_timeout_pri, reorder_timeout->rx_timeout_pri,
7483 		sizeof(reorder_timeout->rx_timeout_pri));
7484 
7485 	WMA_LOGD("rx aggr record timeout: VO: %d, VI: %d, BE: %d, BK: %d",
7486 		cmd->rx_timeout_pri[0], cmd->rx_timeout_pri[1],
7487 		cmd->rx_timeout_pri[2], cmd->rx_timeout_pri[3]);
7488 
7489 	ret = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len,
7490 			WMI_PDEV_SET_REORDER_TIMEOUT_VAL_CMDID);
7491 	if (ret) {
7492 		WMA_LOGE(FL("Failed to send aggregation timeout"));
7493 		wmi_buf_free(buf);
7494 		return QDF_STATUS_E_FAILURE;
7495 	}
7496 
7497 	return QDF_STATUS_SUCCESS;
7498 }
7499 
7500 QDF_STATUS wma_set_rx_blocksize(tp_wma_handle wma_handle,
7501 	struct sir_peer_set_rx_blocksize *peer_rx_blocksize)
7502 {
7503 	wmi_peer_set_rx_blocksize_cmd_fixed_param *cmd;
7504 	int32_t len;
7505 	wmi_buf_t buf;
7506 	u_int8_t *buf_ptr;
7507 	int ret;
7508 
7509 	if (!peer_rx_blocksize) {
7510 		WMA_LOGE(FL("invalid pointer"));
7511 		return QDF_STATUS_E_INVAL;
7512 	}
7513 
7514 	if (!wma_handle) {
7515 		WMA_LOGE(FL(" WMA context is invald!"));
7516 		return QDF_STATUS_E_INVAL;
7517 	}
7518 
7519 	len = sizeof(*cmd);
7520 	buf = wmi_buf_alloc(wma_handle->wmi_handle, len);
7521 
7522 	if (!buf) {
7523 		WMA_LOGE(FL("Failed allocate wmi buffer"));
7524 		return QDF_STATUS_E_NOMEM;
7525 	}
7526 
7527 	buf_ptr = (u_int8_t *) wmi_buf_data(buf);
7528 	cmd = (wmi_peer_set_rx_blocksize_cmd_fixed_param *) buf_ptr;
7529 
7530 	WMITLV_SET_HDR(&cmd->tlv_header,
7531 	WMITLV_TAG_STRUC_wmi_peer_set_rx_blocksize_cmd_fixed_param,
7532 	WMITLV_GET_STRUCT_TLVLEN(wmi_peer_set_rx_blocksize_cmd_fixed_param));
7533 
7534 	cmd->vdev_id = peer_rx_blocksize->vdev_id;
7535 	cmd->rx_block_ack_win_limit =
7536 		peer_rx_blocksize->rx_block_ack_win_limit;
7537 	WMI_CHAR_ARRAY_TO_MAC_ADDR(peer_rx_blocksize->peer_macaddr.bytes,
7538 		&cmd->peer_macaddr);
7539 
7540 	WMA_LOGD("rx aggr blocksize: %d", cmd->rx_block_ack_win_limit);
7541 
7542 	ret = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len,
7543 			WMI_PEER_SET_RX_BLOCKSIZE_CMDID);
7544 	if (ret) {
7545 		WMA_LOGE(FL("Failed to send aggregation size command"));
7546 		wmi_buf_free(buf);
7547 		return QDF_STATUS_E_FAILURE;
7548 	}
7549 
7550 	return QDF_STATUS_SUCCESS;
7551 }
7552 
7553 QDF_STATUS wma_get_chain_rssi(tp_wma_handle wma_handle,
7554 		struct get_chain_rssi_req_params *req_params)
7555 {
7556 	wmi_pdev_div_get_rssi_antid_fixed_param *cmd;
7557 	wmi_buf_t wmi_buf;
7558 	uint32_t len = sizeof(wmi_pdev_div_get_rssi_antid_fixed_param);
7559 	u_int8_t *buf_ptr;
7560 
7561 	if (!wma_handle) {
7562 		WMA_LOGE(FL("WMA is closed, can not issue cmd"));
7563 		return QDF_STATUS_E_INVAL;
7564 	}
7565 
7566 	wmi_buf = wmi_buf_alloc(wma_handle->wmi_handle, len);
7567 	if (!wmi_buf) {
7568 		WMA_LOGE(FL("wmi_buf_alloc failed"));
7569 		return QDF_STATUS_E_NOMEM;
7570 	}
7571 
7572 	buf_ptr = (u_int8_t *)wmi_buf_data(wmi_buf);
7573 
7574 	cmd = (wmi_pdev_div_get_rssi_antid_fixed_param *)buf_ptr;
7575 	WMITLV_SET_HDR(&cmd->tlv_header,
7576 		WMITLV_TAG_STRUC_wmi_pdev_div_get_rssi_antid_fixed_param,
7577 		WMITLV_GET_STRUCT_TLVLEN(
7578 		wmi_pdev_div_get_rssi_antid_fixed_param));
7579 	cmd->pdev_id = 0;
7580 	WMI_CHAR_ARRAY_TO_MAC_ADDR(req_params->peer_macaddr.bytes,
7581 				&cmd->macaddr);
7582 
7583 	if (wmi_unified_cmd_send(wma_handle->wmi_handle, wmi_buf, len,
7584 				 WMI_PDEV_DIV_GET_RSSI_ANTID_CMDID)) {
7585 		WMA_LOGE(FL("failed to send get chain rssi command"));
7586 		wmi_buf_free(wmi_buf);
7587 		return QDF_STATUS_E_FAILURE;
7588 	}
7589 
7590 	return QDF_STATUS_SUCCESS;
7591 }
7592 
7593 #if defined(WLAN_FEATURE_FILS_SK)
7594 /**
7595  * wma_roam_scan_send_hlp() - API to send HLP IE info to fw
7596  * @wma_handle: WMA handle
7597  * @req: HLP params
7598  *
7599  * Return: QDF_STATUS
7600  */
7601 static QDF_STATUS wma_roam_scan_send_hlp(tp_wma_handle wma_handle,
7602 					 struct hlp_params *req)
7603 {
7604 	struct hlp_params *params;
7605 	QDF_STATUS status;
7606 
7607 	params = qdf_mem_malloc(sizeof(*params));
7608 	if (!params) {
7609 		WMA_LOGE("%s : Memory allocation failed", __func__);
7610 		return QDF_STATUS_E_NOMEM;
7611 	}
7612 
7613 	params->vdev_id = req->vdev_id;
7614 	params->hlp_ie_len = req->hlp_ie_len;
7615 	qdf_mem_copy(params->hlp_ie, req->hlp_ie, req->hlp_ie_len);
7616 	status = wmi_unified_roam_send_hlp_cmd(wma_handle->wmi_handle, params);
7617 
7618 	WMA_LOGD("Send HLP status %d vdev id %d", status, params->vdev_id);
7619 	qdf_trace_hex_dump(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG,
7620 				params->hlp_ie, 10);
7621 
7622 	qdf_mem_free(params);
7623 	return status;
7624 }
7625 #else
7626 static QDF_STATUS wma_roam_scan_send_hlp(tp_wma_handle wma_handle,
7627 					 struct hlp_params *req)
7628 {
7629 	return QDF_STATUS_SUCCESS;
7630 }
7631 #endif
7632 
7633 /**
7634  * wma_process_set_limit_off_chan() - set limit off chanel parameters
7635  * @wma_handle: pointer to wma handle
7636  * @param: pointer to sir_limit_off_chan
7637  *
7638  * Return: QDF_STATUS_SUCCESS for success or error code.
7639  */
7640 static QDF_STATUS wma_process_limit_off_chan(tp_wma_handle wma_handle,
7641 	struct sir_limit_off_chan *param)
7642 {
7643 	int32_t err;
7644 	struct wmi_limit_off_chan_param limit_off_chan_param;
7645 
7646 	if (param->vdev_id >= wma_handle->max_bssid) {
7647 		WMA_LOGE(FL("Invalid vdev_id: %d"), param->vdev_id);
7648 		return QDF_STATUS_E_INVAL;
7649 	}
7650 	if (!wma_is_vdev_up(param->vdev_id)) {
7651 		WMA_LOGE("vdev %d is not up skipping limit_off_chan_param",
7652 			param->vdev_id);
7653 		return QDF_STATUS_E_INVAL;
7654 	}
7655 
7656 	limit_off_chan_param.vdev_id = param->vdev_id;
7657 	limit_off_chan_param.status = param->is_tos_active;
7658 	limit_off_chan_param.max_offchan_time = param->max_off_chan_time;
7659 	limit_off_chan_param.rest_time = param->rest_time;
7660 	limit_off_chan_param.skip_dfs_chans = param->skip_dfs_chans;
7661 
7662 	err = wmi_unified_send_limit_off_chan_cmd(wma_handle->wmi_handle,
7663 			&limit_off_chan_param);
7664 	if (err) {
7665 		WMA_LOGE("\n failed to set limit off chan cmd");
7666 		return QDF_STATUS_E_FAILURE;
7667 	}
7668 
7669 	return QDF_STATUS_SUCCESS;
7670 }
7671 
7672 static QDF_STATUS wma_process_obss_color_collision_req(tp_wma_handle wma_handle,
7673 		struct wmi_obss_color_collision_cfg_param *cfg)
7674 {
7675 	QDF_STATUS status;
7676 
7677 	if (cfg->vdev_id >= wma_handle->max_bssid) {
7678 		WMA_LOGE(FL("Invalid vdev_id: %d"), cfg->vdev_id);
7679 		return QDF_STATUS_E_INVAL;
7680 	}
7681 	if (!wma_is_vdev_up(cfg->vdev_id)) {
7682 		WMA_LOGE("vdev %d is not up skipping obss color collision req",
7683 			 cfg->vdev_id);
7684 		return QDF_STATUS_E_INVAL;
7685 	}
7686 
7687 	status = wmi_unified_send_obss_color_collision_cfg_cmd(wma_handle->
7688 							       wmi_handle, cfg);
7689 	if (QDF_IS_STATUS_ERROR(status))
7690 		WMA_LOGE("Failed to send obss color collision cfg");
7691 
7692 	return status;
7693 }
7694 
7695 /**
7696  * wma_send_obss_detection_cfg() - send obss detection cfg to firmware
7697  * @wma_handle: pointer to wma handle
7698  * @cfg: obss detection configuration
7699  *
7700  * Send obss detection configuration to firmware.
7701  *
7702  * Return: None
7703  */
7704 static void wma_send_obss_detection_cfg(tp_wma_handle wma_handle,
7705 					struct wmi_obss_detection_cfg_param
7706 					*cfg)
7707 {
7708 	QDF_STATUS status;
7709 
7710 	if (cfg->vdev_id >= wma_handle->max_bssid) {
7711 		WMA_LOGE(FL("Invalid vdev_id: %d"), cfg->vdev_id);
7712 		return;
7713 	}
7714 	if (!wma_is_vdev_up(cfg->vdev_id)) {
7715 		WMA_LOGE("vdev %d is not up skipping obss detection req",
7716 			 cfg->vdev_id);
7717 		return;
7718 	}
7719 
7720 	status = wmi_unified_send_obss_detection_cfg_cmd(wma_handle->wmi_handle,
7721 							 cfg);
7722 	if (QDF_IS_STATUS_ERROR(status))
7723 		WMA_LOGE("Failed to send obss detection cfg");
7724 
7725 	return;
7726 }
7727 
7728 /**
7729  * wma_mc_process_msg() - process wma messages and call appropriate function.
7730  * @msg: message
7731  *
7732  * Return: QDF_SUCCESS for success otherwise failure
7733  */
7734 static QDF_STATUS wma_mc_process_msg(struct scheduler_msg *msg)
7735 {
7736 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
7737 	tp_wma_handle wma_handle;
7738 	struct cdp_vdev *txrx_vdev_handle = NULL;
7739 
7740 	extern uint8_t *mac_trace_get_wma_msg_string(uint16_t wmaMsg);
7741 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
7742 
7743 	if (NULL == msg) {
7744 		WMA_LOGE("msg is NULL");
7745 		QDF_ASSERT(0);
7746 		qdf_status = QDF_STATUS_E_INVAL;
7747 		goto end;
7748 	}
7749 
7750 	WMA_LOGD("msg->type = %x %s", msg->type,
7751 		 mac_trace_get_wma_msg_string(msg->type));
7752 
7753 	wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
7754 
7755 	if (NULL == wma_handle) {
7756 		WMA_LOGE("%s: wma_handle is NULL", __func__);
7757 		QDF_ASSERT(0);
7758 		qdf_mem_free(msg->bodyptr);
7759 		qdf_status = QDF_STATUS_E_INVAL;
7760 		goto end;
7761 	}
7762 
7763 	switch (msg->type) {
7764 
7765 	/* Message posted by wmi for all control path related
7766 	 * FW events to serialize through mc_thread.
7767 	 */
7768 	case WMA_PROCESS_FW_EVENT:
7769 		wma_process_fw_event(wma_handle,
7770 				(wma_process_fw_event_params *) msg->bodyptr);
7771 		qdf_mem_free(msg->bodyptr);
7772 		break;
7773 
7774 #ifdef FEATURE_WLAN_ESE
7775 	case WMA_TSM_STATS_REQ:
7776 		WMA_LOGD("McThread: WMA_TSM_STATS_REQ");
7777 		wma_process_tsm_stats_req(wma_handle, (void *)msg->bodyptr);
7778 		break;
7779 #endif /* FEATURE_WLAN_ESE */
7780 	case WNI_CFG_DNLD_REQ:
7781 		WMA_LOGD("McThread: WNI_CFG_DNLD_REQ");
7782 		qdf_status = wma_wni_cfg_dnld(wma_handle);
7783 		if (QDF_IS_STATUS_SUCCESS(qdf_status))
7784 			cds_wma_complete_cback();
7785 		else
7786 			WMA_LOGD("config download failure");
7787 		break;
7788 	case WMA_ADD_STA_SELF_REQ:
7789 		txrx_vdev_handle =
7790 			wma_vdev_attach(wma_handle,
7791 				(struct add_sta_self_params *) msg->
7792 				bodyptr, 1);
7793 		if (!txrx_vdev_handle) {
7794 			WMA_LOGE("Failed to attach vdev");
7795 		} else {
7796 			/* Register with TxRx Module for Data Ack Complete Cb */
7797 			if (soc) {
7798 				cdp_data_tx_cb_set(soc, txrx_vdev_handle,
7799 						wma_data_tx_ack_comp_hdlr,
7800 						wma_handle);
7801 			} else {
7802 				WMA_LOGE("%s: SOC context is NULL", __func__);
7803 				qdf_status = QDF_STATUS_E_FAILURE;
7804 				goto end;
7805 			}
7806 		}
7807 		break;
7808 	case WMA_DEL_STA_SELF_REQ:
7809 		wma_vdev_detach(wma_handle,
7810 				(struct del_sta_self_params *) msg->bodyptr, 1);
7811 		break;
7812 	case WMA_UPDATE_CHAN_LIST_REQ:
7813 		wma_update_channel_list(wma_handle,
7814 					(tSirUpdateChanList *) msg->bodyptr);
7815 		qdf_mem_free(msg->bodyptr);
7816 		break;
7817 	case WMA_SET_LINK_STATE:
7818 		wma_set_linkstate(wma_handle, (tpLinkStateParams) msg->bodyptr);
7819 		break;
7820 	case WMA_CHNL_SWITCH_REQ:
7821 		wma_set_channel(wma_handle,
7822 				(tpSwitchChannelParams) msg->bodyptr);
7823 		break;
7824 	case WMA_ADD_BSS_REQ:
7825 		wma_add_bss(wma_handle, (tpAddBssParams) msg->bodyptr);
7826 		break;
7827 	case WMA_ADD_STA_REQ:
7828 		wma_add_sta(wma_handle, (tpAddStaParams) msg->bodyptr);
7829 		break;
7830 	case WMA_SET_BSSKEY_REQ:
7831 		wma_set_bsskey(wma_handle, (tpSetBssKeyParams) msg->bodyptr);
7832 		break;
7833 	case WMA_SET_STAKEY_REQ:
7834 		wma_set_stakey(wma_handle, (tpSetStaKeyParams) msg->bodyptr);
7835 		break;
7836 	case WMA_DELETE_STA_REQ:
7837 		wma_delete_sta(wma_handle, (tpDeleteStaParams) msg->bodyptr);
7838 		break;
7839 	case WMA_DELETE_BSS_HO_FAIL_REQ:
7840 		wma_delete_bss_ho_fail(wma_handle,
7841 			(tpDeleteBssParams) msg->bodyptr);
7842 		break;
7843 	case WMA_DELETE_BSS_REQ:
7844 		wma_delete_bss(wma_handle, (tpDeleteBssParams) msg->bodyptr);
7845 		break;
7846 	case WMA_UPDATE_EDCA_PROFILE_IND:
7847 		wma_process_update_edca_param_req(wma_handle,
7848 						  (tEdcaParams *) msg->bodyptr);
7849 		qdf_mem_free(msg->bodyptr);
7850 		break;
7851 	case WMA_SEND_BEACON_REQ:
7852 		wma_send_beacon(wma_handle, (tpSendbeaconParams) msg->bodyptr);
7853 		qdf_mem_free(msg->bodyptr);
7854 		break;
7855 	case WMA_SEND_PROBE_RSP_TMPL:
7856 		wma_send_probe_rsp_tmpl(wma_handle,
7857 					(tpSendProbeRespParams) msg->bodyptr);
7858 		qdf_mem_free(msg->bodyptr);
7859 		break;
7860 	case WMA_CLI_SET_CMD:
7861 		wma_process_cli_set_cmd(wma_handle,
7862 					(wma_cli_set_cmd_t *) msg->bodyptr);
7863 		qdf_mem_free(msg->bodyptr);
7864 		break;
7865 	case WMA_SET_PDEV_IE_REQ:
7866 		wma_process_set_pdev_ie_req(wma_handle,
7867 				(struct set_ie_param *)msg->bodyptr);
7868 		qdf_mem_free(msg->bodyptr);
7869 		break;
7870 #if !defined(REMOVE_PKT_LOG)
7871 	case WMA_PKTLOG_ENABLE_REQ:
7872 		wma_pktlog_wmi_send_cmd(wma_handle,
7873 			(struct ath_pktlog_wmi_params *)msg->bodyptr);
7874 		qdf_mem_free(msg->bodyptr);
7875 		break;
7876 #endif /* REMOVE_PKT_LOG */
7877 	case WMA_ENTER_PS_REQ:
7878 		wma_enable_sta_ps_mode(wma_handle,
7879 				       (tpEnablePsParams) msg->bodyptr);
7880 		qdf_mem_free(msg->bodyptr);
7881 		break;
7882 	case WMA_EXIT_PS_REQ:
7883 		wma_disable_sta_ps_mode(wma_handle,
7884 					(tpDisablePsParams) msg->bodyptr);
7885 		qdf_mem_free(msg->bodyptr);
7886 		break;
7887 	case WMA_ENABLE_UAPSD_REQ:
7888 		wma_enable_uapsd_mode(wma_handle,
7889 				      (tpEnableUapsdParams) msg->bodyptr);
7890 		qdf_mem_free(msg->bodyptr);
7891 		break;
7892 	case WMA_DISABLE_UAPSD_REQ:
7893 		wma_disable_uapsd_mode(wma_handle,
7894 				       (tpDisableUapsdParams) msg->bodyptr);
7895 		qdf_mem_free(msg->bodyptr);
7896 		break;
7897 	case WMA_SET_DTIM_PERIOD:
7898 		wma_set_dtim_period(wma_handle,
7899 				    (struct set_dtim_params *)msg->bodyptr);
7900 		qdf_mem_free(msg->bodyptr);
7901 		break;
7902 	case WMA_SET_TX_POWER_REQ:
7903 		wma_set_tx_power(wma_handle, (tpMaxTxPowerParams) msg->bodyptr);
7904 		break;
7905 	case WMA_SET_MAX_TX_POWER_REQ:
7906 		wma_set_max_tx_power(wma_handle,
7907 				     (tpMaxTxPowerParams) msg->bodyptr);
7908 		break;
7909 	case WMA_SET_KEEP_ALIVE:
7910 		wma_set_keepalive_req(wma_handle,
7911 				      (tSirKeepAliveReq *) msg->bodyptr);
7912 		break;
7913 #ifdef FEATURE_WLAN_ESE
7914 	case WMA_SET_PLM_REQ:
7915 		wma_config_plm(wma_handle, (tpSirPlmReq) msg->bodyptr);
7916 		break;
7917 #endif
7918 	case WMA_GET_STATISTICS_REQ:
7919 		wma_get_stats_req(wma_handle,
7920 				  (tAniGetPEStatsReq *) msg->bodyptr);
7921 		break;
7922 
7923 	case WMA_CONFIG_PARAM_UPDATE_REQ:
7924 		wma_update_cfg_params(wma_handle,  msg);
7925 		break;
7926 
7927 	case WMA_UPDATE_OP_MODE:
7928 		wma_process_update_opmode(wma_handle,
7929 					  (tUpdateVHTOpMode *) msg->bodyptr);
7930 		qdf_mem_free(msg->bodyptr);
7931 		break;
7932 	case WMA_UPDATE_RX_NSS:
7933 		wma_process_update_rx_nss(wma_handle,
7934 					  (tUpdateRxNss *) msg->bodyptr);
7935 		qdf_mem_free(msg->bodyptr);
7936 		break;
7937 	case WMA_UPDATE_MEMBERSHIP:
7938 		wma_process_update_membership(wma_handle,
7939 			(tUpdateMembership *) msg->bodyptr);
7940 		break;
7941 	case WMA_UPDATE_USERPOS:
7942 		wma_process_update_userpos(wma_handle,
7943 					   (tUpdateUserPos *) msg->bodyptr);
7944 		break;
7945 	case WMA_UPDATE_BEACON_IND:
7946 		wma_process_update_beacon_params(wma_handle,
7947 			(tUpdateBeaconParams *) msg->bodyptr);
7948 		qdf_mem_free(msg->bodyptr);
7949 		break;
7950 
7951 	case WMA_ADD_TS_REQ:
7952 		wma_add_ts_req(wma_handle, (tAddTsParams *) msg->bodyptr);
7953 		break;
7954 
7955 	case WMA_DEL_TS_REQ:
7956 		wma_del_ts_req(wma_handle, (tDelTsParams *) msg->bodyptr);
7957 		break;
7958 
7959 	case WMA_AGGR_QOS_REQ:
7960 		wma_aggr_qos_req(wma_handle, (tAggrAddTsParams *) msg->bodyptr);
7961 		break;
7962 
7963 	case WMA_8023_MULTICAST_LIST_REQ:
7964 		wma_process_mcbc_set_filter_req(wma_handle,
7965 				(tpSirRcvFltMcAddrList) msg->bodyptr);
7966 		qdf_mem_free(msg->bodyptr);
7967 		break;
7968 	case WMA_ROAM_SCAN_OFFLOAD_REQ:
7969 		/*
7970 		 * Main entry point or roaming directives from CSR.
7971 		 */
7972 		wma_process_roaming_config(wma_handle,
7973 				(tSirRoamOffloadScanReq *) msg->bodyptr);
7974 		break;
7975 
7976 	case WMA_RATE_UPDATE_IND:
7977 		wma_process_rate_update_indicate(wma_handle,
7978 				(tSirRateUpdateInd *) msg->bodyptr);
7979 		break;
7980 
7981 #ifdef FEATURE_WLAN_TDLS
7982 	case WMA_UPDATE_TDLS_PEER_STATE:
7983 		wma_update_tdls_peer_state(wma_handle,
7984 				(tTdlsPeerStateParams *) msg->bodyptr);
7985 		break;
7986 	case WMA_TDLS_SET_OFFCHAN_MODE:
7987 		wma_set_tdls_offchan_mode(wma_handle,
7988 			(tdls_chan_switch_params *)msg->bodyptr);
7989 		break;
7990 #endif /* FEATURE_WLAN_TDLS */
7991 	case WMA_ADD_PERIODIC_TX_PTRN_IND:
7992 		wma_process_add_periodic_tx_ptrn_ind(wma_handle,
7993 				(tSirAddPeriodicTxPtrn *) msg->bodyptr);
7994 		qdf_mem_free(msg->bodyptr);
7995 		break;
7996 	case WMA_DEL_PERIODIC_TX_PTRN_IND:
7997 		wma_process_del_periodic_tx_ptrn_ind(wma_handle,
7998 				(tSirDelPeriodicTxPtrn *) msg->bodyptr);
7999 		qdf_mem_free(msg->bodyptr);
8000 		break;
8001 	case WMA_TX_POWER_LIMIT:
8002 		wma_process_tx_power_limits(wma_handle,
8003 					    (tSirTxPowerLimit *) msg->bodyptr);
8004 		qdf_mem_free(msg->bodyptr);
8005 		break;
8006 	case WMA_SEND_ADDBA_REQ:
8007 		wma_process_send_addba_req(wma_handle,
8008 				(struct send_add_ba_req *)msg->bodyptr);
8009 		break;
8010 
8011 #ifdef FEATURE_WLAN_CH_AVOID
8012 	case WMA_CH_AVOID_UPDATE_REQ:
8013 		wma_process_ch_avoid_update_req(wma_handle,
8014 				(tSirChAvoidUpdateReq *) msg->bodyptr);
8015 		qdf_mem_free(msg->bodyptr);
8016 		break;
8017 #endif /* FEATURE_WLAN_CH_AVOID */
8018 #ifdef FEATURE_WLAN_AUTO_SHUTDOWN
8019 	case WMA_SET_AUTO_SHUTDOWN_TIMER_REQ:
8020 		wma_set_auto_shutdown_timer_req(wma_handle, msg->bodyptr);
8021 		qdf_mem_free(msg->bodyptr);
8022 		break;
8023 #endif /* FEATURE_WLAN_AUTO_SHUTDOWN */
8024 	case WMA_DHCP_START_IND:
8025 	case WMA_DHCP_STOP_IND:
8026 		wma_process_dhcp_ind(wma_handle, (tAniDHCPInd *) msg->bodyptr);
8027 		qdf_mem_free(msg->bodyptr);
8028 		break;
8029 
8030 	case WMA_IBSS_CESIUM_ENABLE_IND:
8031 		wma_process_cesium_enable_ind(wma_handle);
8032 		break;
8033 	case WMA_GET_IBSS_PEER_INFO_REQ:
8034 		wma_process_get_peer_info_req(wma_handle,
8035 					      (tSirIbssGetPeerInfoReqParams *)
8036 					      msg->bodyptr);
8037 		qdf_mem_free(msg->bodyptr);
8038 		break;
8039 	case WMA_TX_FAIL_MONITOR_IND:
8040 		wma_process_tx_fail_monitor_ind(wma_handle,
8041 				(tAniTXFailMonitorInd *) msg->bodyptr);
8042 		qdf_mem_free(msg->bodyptr);
8043 		break;
8044 
8045 	case WMA_RMC_ENABLE_IND:
8046 		wma_process_rmc_enable_ind(wma_handle);
8047 		break;
8048 	case WMA_RMC_DISABLE_IND:
8049 		wma_process_rmc_disable_ind(wma_handle);
8050 		break;
8051 	case WMA_RMC_ACTION_PERIOD_IND:
8052 		wma_process_rmc_action_period_ind(wma_handle);
8053 		break;
8054 	case WMA_INIT_THERMAL_INFO_CMD:
8055 		wma_process_init_thermal_info(wma_handle,
8056 					      (t_thermal_mgmt *) msg->bodyptr);
8057 		qdf_mem_free(msg->bodyptr);
8058 		break;
8059 
8060 	case WMA_SET_THERMAL_LEVEL:
8061 		wma_process_set_thermal_level(wma_handle, msg->bodyval);
8062 		break;
8063 #ifdef CONFIG_HL_SUPPORT
8064 	case WMA_INIT_BAD_PEER_TX_CTL_INFO_CMD:
8065 		wma_process_init_bad_peer_tx_ctl_info(
8066 			wma_handle,
8067 			(struct t_bad_peer_txtcl_config *)msg->bodyptr);
8068 		qdf_mem_free(msg->bodyptr);
8069 			break;
8070 #endif
8071 	case WMA_SET_MIMOPS_REQ:
8072 		wma_process_set_mimops_req(wma_handle,
8073 					   (tSetMIMOPS *) msg->bodyptr);
8074 		qdf_mem_free(msg->bodyptr);
8075 		break;
8076 	case WMA_SET_SAP_INTRABSS_DIS:
8077 		wma_set_vdev_intrabss_fwd(wma_handle,
8078 					  (tDisableIntraBssFwd *) msg->bodyptr);
8079 		qdf_mem_free(msg->bodyptr);
8080 		break;
8081 	case WMA_GET_PEER_INFO:
8082 		wma_get_peer_info(wma_handle, msg->bodyptr);
8083 		qdf_mem_free(msg->bodyptr);
8084 		break;
8085 	case WMA_GET_PEER_INFO_EXT:
8086 		wma_get_peer_info_ext(wma_handle, msg->bodyptr);
8087 		qdf_mem_free(msg->bodyptr);
8088 		break;
8089 	case WMA_MODEM_POWER_STATE_IND:
8090 		wma_notify_modem_power_state(wma_handle,
8091 				(tSirModemPowerStateInd *) msg->bodyptr);
8092 		qdf_mem_free(msg->bodyptr);
8093 		break;
8094 #ifdef WLAN_FEATURE_STATS_EXT
8095 	case WMA_STATS_EXT_REQUEST:
8096 		wma_stats_ext_req(wma_handle,
8097 				  (tpStatsExtRequest) (msg->bodyptr));
8098 		qdf_mem_free(msg->bodyptr);
8099 		break;
8100 #endif /* WLAN_FEATURE_STATS_EXT */
8101 	case WMA_HIDDEN_SSID_VDEV_RESTART:
8102 		wma_hidden_ssid_vdev_restart(wma_handle,
8103 				(tHalHiddenSsidVdevRestart *) msg->bodyptr);
8104 		break;
8105 #ifdef WLAN_FEATURE_EXTWOW_SUPPORT
8106 	case WMA_WLAN_EXT_WOW:
8107 		wma_enable_ext_wow(wma_handle,
8108 				   (tSirExtWoWParams *) msg->bodyptr);
8109 		qdf_mem_free(msg->bodyptr);
8110 		break;
8111 	case WMA_WLAN_SET_APP_TYPE1_PARAMS:
8112 		wma_set_app_type1_params_in_fw(wma_handle,
8113 				(tSirAppType1Params *) msg->bodyptr);
8114 		qdf_mem_free(msg->bodyptr);
8115 		break;
8116 	case WMA_WLAN_SET_APP_TYPE2_PARAMS:
8117 		wma_set_app_type2_params_in_fw(wma_handle,
8118 				(tSirAppType2Params *) msg->bodyptr);
8119 		qdf_mem_free(msg->bodyptr);
8120 		break;
8121 #endif /* WLAN_FEATURE_EXTWOW_SUPPORT */
8122 #ifdef FEATURE_WLAN_EXTSCAN
8123 	case WMA_EXTSCAN_START_REQ:
8124 		wma_start_extscan(wma_handle,
8125 				  (tSirWifiScanCmdReqParams *) msg->bodyptr);
8126 		qdf_mem_free(msg->bodyptr);
8127 		break;
8128 	case WMA_EXTSCAN_STOP_REQ:
8129 		wma_stop_extscan(wma_handle,
8130 				 (tSirExtScanStopReqParams *) msg->bodyptr);
8131 		qdf_mem_free(msg->bodyptr);
8132 		break;
8133 	case WMA_EXTSCAN_SET_BSSID_HOTLIST_REQ:
8134 		wma_extscan_start_hotlist_monitor(wma_handle,
8135 			(tSirExtScanSetBssidHotListReqParams *) msg->bodyptr);
8136 		qdf_mem_free(msg->bodyptr);
8137 		break;
8138 	case WMA_EXTSCAN_RESET_BSSID_HOTLIST_REQ:
8139 		wma_extscan_stop_hotlist_monitor(wma_handle,
8140 			(tSirExtScanResetBssidHotlistReqParams *) msg->bodyptr);
8141 		qdf_mem_free(msg->bodyptr);
8142 		break;
8143 	case WMA_EXTSCAN_SET_SIGNF_CHANGE_REQ:
8144 		wma_extscan_start_change_monitor(wma_handle,
8145 			(tSirExtScanSetSigChangeReqParams *) msg->bodyptr);
8146 		qdf_mem_free(msg->bodyptr);
8147 		break;
8148 	case WMA_EXTSCAN_RESET_SIGNF_CHANGE_REQ:
8149 		wma_extscan_stop_change_monitor(wma_handle,
8150 			(tSirExtScanResetSignificantChangeReqParams *)
8151 							msg->bodyptr);
8152 		qdf_mem_free(msg->bodyptr);
8153 		break;
8154 	case WMA_EXTSCAN_GET_CACHED_RESULTS_REQ:
8155 		wma_extscan_get_cached_results(wma_handle,
8156 			(tSirExtScanGetCachedResultsReqParams *) msg->bodyptr);
8157 		qdf_mem_free(msg->bodyptr);
8158 		break;
8159 	case WMA_EXTSCAN_GET_CAPABILITIES_REQ:
8160 		wma_extscan_get_capabilities(wma_handle,
8161 			(tSirGetExtScanCapabilitiesReqParams *) msg->bodyptr);
8162 		qdf_mem_free(msg->bodyptr);
8163 		break;
8164 	case WMA_SET_EPNO_LIST_REQ:
8165 		wma_set_epno_network_list(wma_handle,
8166 			(struct wifi_epno_params *)msg->bodyptr);
8167 		qdf_mem_free(msg->bodyptr);
8168 		break;
8169 	case WMA_SET_PER_ROAM_CONFIG_CMD:
8170 		wma_update_per_roam_config(wma_handle,
8171 			(struct wmi_per_roam_config_req *)msg->bodyptr);
8172 		qdf_mem_free(msg->bodyptr);
8173 		break;
8174 	case WMA_SET_PASSPOINT_LIST_REQ:
8175 		/* Issue reset passpoint network list first and clear
8176 		 * the entries
8177 		 */
8178 		wma_reset_passpoint_network_list(wma_handle,
8179 			(struct wifi_passpoint_req *)msg->bodyptr);
8180 
8181 		wma_set_passpoint_network_list(wma_handle,
8182 			(struct wifi_passpoint_req *)msg->bodyptr);
8183 		qdf_mem_free(msg->bodyptr);
8184 		break;
8185 	case WMA_RESET_PASSPOINT_LIST_REQ:
8186 		wma_reset_passpoint_network_list(wma_handle,
8187 			(struct wifi_passpoint_req *)msg->bodyptr);
8188 		break;
8189 #endif /* FEATURE_WLAN_EXTSCAN */
8190 	case WMA_SET_SCAN_MAC_OUI_REQ:
8191 		wma_scan_probe_setoui(wma_handle, msg->bodyptr);
8192 		qdf_mem_free(msg->bodyptr);
8193 		break;
8194 #ifdef WLAN_FEATURE_LINK_LAYER_STATS
8195 	case WMA_LINK_LAYER_STATS_CLEAR_REQ:
8196 		wma_process_ll_stats_clear_req(wma_handle,
8197 			(tpSirLLStatsClearReq) msg->bodyptr);
8198 		qdf_mem_free(msg->bodyptr);
8199 		break;
8200 	case WMA_LINK_LAYER_STATS_SET_REQ:
8201 		wma_process_ll_stats_set_req(wma_handle,
8202 					     (tpSirLLStatsSetReq) msg->bodyptr);
8203 		qdf_mem_free(msg->bodyptr);
8204 		break;
8205 	case WMA_LINK_LAYER_STATS_GET_REQ:
8206 		wma_process_ll_stats_get_req(wma_handle,
8207 					     (tpSirLLStatsGetReq) msg->bodyptr);
8208 		qdf_mem_free(msg->bodyptr);
8209 		break;
8210 	case WDA_LINK_LAYER_STATS_SET_THRESHOLD:
8211 		wma_config_stats_ext_threshold(wma_handle,
8212 			(struct sir_ll_ext_stats_threshold *)msg->bodyptr);
8213 		qdf_mem_free(msg->bodyptr);
8214 		break;
8215 #endif /* WLAN_FEATURE_LINK_LAYER_STATS */
8216 #ifdef WLAN_FEATURE_ROAM_OFFLOAD
8217 	case WMA_ROAM_OFFLOAD_SYNCH_FAIL:
8218 		wma_process_roam_synch_fail(wma_handle,
8219 			(struct roam_offload_synch_fail *)msg->bodyptr);
8220 		qdf_mem_free(msg->bodyptr);
8221 		break;
8222 	case SIR_HAL_ROAM_INVOKE:
8223 		wma_process_roam_invoke(wma_handle,
8224 			(struct wma_roam_invoke_cmd *)msg->bodyptr);
8225 		qdf_mem_free(msg->bodyptr);
8226 		break;
8227 #endif /* WLAN_FEATURE_ROAM_OFFLOAD */
8228 #ifdef WLAN_FEATURE_NAN
8229 	case WMA_NAN_REQUEST:
8230 		wma_nan_req(wma_handle, (tNanRequest *) msg->bodyptr);
8231 		qdf_mem_free(msg->bodyptr);
8232 		break;
8233 #endif /* WLAN_FEATURE_NAN */
8234 	case SIR_HAL_SET_BASE_MACADDR_IND:
8235 		wma_set_base_macaddr_indicate(wma_handle,
8236 					      (tSirMacAddr *) msg->bodyptr);
8237 		qdf_mem_free(msg->bodyptr);
8238 		break;
8239 	case WMA_LINK_STATUS_GET_REQ:
8240 		wma_process_link_status_req(wma_handle,
8241 					    (tAniGetLinkStatus *) msg->bodyptr);
8242 		break;
8243 	case WMA_GET_TEMPERATURE_REQ:
8244 		wma_get_temperature(wma_handle);
8245 		qdf_mem_free(msg->bodyptr);
8246 		break;
8247 	case WMA_TSF_GPIO_PIN:
8248 		wma_set_tsf_gpio_pin(wma_handle, msg->bodyval);
8249 		break;
8250 
8251 #ifdef DHCP_SERVER_OFFLOAD
8252 	case WMA_SET_DHCP_SERVER_OFFLOAD_CMD:
8253 		wma_process_dhcpserver_offload(wma_handle,
8254 			(tSirDhcpSrvOffloadInfo *) msg->bodyptr);
8255 		qdf_mem_free(msg->bodyptr);
8256 		break;
8257 #endif /* DHCP_SERVER_OFFLOAD */
8258 #ifdef WLAN_FEATURE_GPIO_LED_FLASHING
8259 	case WMA_LED_FLASHING_REQ:
8260 		wma_set_led_flashing(wma_handle, msg->bodyptr);
8261 		qdf_mem_free(msg->bodyptr);
8262 		break;
8263 #endif /* WLAN_FEATURE_GPIO_LED_FLASHING */
8264 	case SIR_HAL_SET_MAS:
8265 		wma_process_set_mas(wma_handle,
8266 				(uint32_t *)msg->bodyptr);
8267 		qdf_mem_free(msg->bodyptr);
8268 		break;
8269 	case SIR_HAL_SET_MIRACAST:
8270 		wma_process_set_miracast(wma_handle,
8271 				(uint32_t *)msg->bodyptr);
8272 		qdf_mem_free(msg->bodyptr);
8273 		break;
8274 	case SIR_HAL_CONFIG_STATS_FACTOR:
8275 		wma_config_stats_factor(wma_handle,
8276 					(struct sir_stats_avg_factor *)
8277 					msg->bodyptr);
8278 		qdf_mem_free(msg->bodyptr);
8279 		break;
8280 	case SIR_HAL_CONFIG_GUARD_TIME:
8281 		wma_config_guard_time(wma_handle,
8282 				      (struct sir_guard_time_request *)
8283 				      msg->bodyptr);
8284 		qdf_mem_free(msg->bodyptr);
8285 		break;
8286 	case SIR_HAL_START_STOP_LOGGING:
8287 		wma_set_wifi_start_packet_stats(wma_handle,
8288 				(struct sir_wifi_start_log *)msg->bodyptr);
8289 		wma_enable_specific_fw_logs(wma_handle,
8290 				(struct sir_wifi_start_log *)msg->bodyptr);
8291 		qdf_mem_free(msg->bodyptr);
8292 		break;
8293 	case SIR_HAL_FLUSH_LOG_TO_FW:
8294 		wma_send_flush_logs_to_fw(wma_handle);
8295 		/* Body ptr is NULL here */
8296 		break;
8297 	case WMA_SET_RSSI_MONITOR_REQ:
8298 		wma_set_rssi_monitoring(wma_handle,
8299 			(struct rssi_monitor_req *)msg->bodyptr);
8300 		qdf_mem_free(msg->bodyptr);
8301 		break;
8302 	case SIR_HAL_PDEV_SET_PCL_TO_FW:
8303 		wma_send_pdev_set_pcl_cmd(wma_handle,
8304 				(struct wmi_pcl_chan_weights *)msg->bodyptr);
8305 		qdf_mem_free(msg->bodyptr);
8306 		break;
8307 	case SIR_HAL_PDEV_SET_HW_MODE:
8308 		wma_send_pdev_set_hw_mode_cmd(wma_handle,
8309 				(struct policy_mgr_hw_mode *)msg->bodyptr);
8310 		qdf_mem_free(msg->bodyptr);
8311 		break;
8312 	case WMA_OCB_SET_CONFIG_CMD:
8313 		wma_ocb_set_config_req(wma_handle,
8314 			(struct sir_ocb_config *)msg->bodyptr);
8315 		qdf_mem_free(msg->bodyptr);
8316 		break;
8317 	case WMA_OCB_SET_UTC_TIME_CMD:
8318 		wma_ocb_set_utc_time(wma_handle,
8319 			(struct sir_ocb_utc *)msg->bodyptr);
8320 		qdf_mem_free(msg->bodyptr);
8321 		break;
8322 	case WMA_OCB_START_TIMING_ADVERT_CMD:
8323 		wma_ocb_start_timing_advert(wma_handle,
8324 			(struct sir_ocb_timing_advert *)msg->bodyptr);
8325 		qdf_mem_free(msg->bodyptr);
8326 		break;
8327 	case WMA_OCB_STOP_TIMING_ADVERT_CMD:
8328 		wma_ocb_stop_timing_advert(wma_handle,
8329 			(struct sir_ocb_timing_advert *)msg->bodyptr);
8330 		qdf_mem_free(msg->bodyptr);
8331 		break;
8332 	case WMA_DCC_CLEAR_STATS_CMD:
8333 		wma_dcc_clear_stats(wma_handle,
8334 			(struct sir_dcc_clear_stats *)msg->bodyptr);
8335 		qdf_mem_free(msg->bodyptr);
8336 		break;
8337 	case WMA_OCB_GET_TSF_TIMER_CMD:
8338 		wma_ocb_get_tsf_timer(wma_handle,
8339 			(struct sir_ocb_get_tsf_timer *)msg->bodyptr);
8340 		qdf_mem_free(msg->bodyptr);
8341 		break;
8342 	case WMA_SET_WISA_PARAMS:
8343 		wma_set_wisa_params(wma_handle,
8344 			(struct sir_wisa_params *)msg->bodyptr);
8345 		qdf_mem_free(msg->bodyptr);
8346 		break;
8347 	case WMA_DCC_GET_STATS_CMD:
8348 		wma_dcc_get_stats(wma_handle,
8349 			(struct sir_dcc_get_stats *)msg->bodyptr);
8350 		qdf_mem_free(msg->bodyptr);
8351 		break;
8352 	case WMA_DCC_UPDATE_NDL_CMD:
8353 		wma_dcc_update_ndl(wma_handle,
8354 			(struct sir_dcc_update_ndl *)msg->bodyptr);
8355 		qdf_mem_free(msg->bodyptr);
8356 		break;
8357 	case SIR_HAL_PDEV_DUAL_MAC_CFG_REQ:
8358 		wma_send_pdev_set_dual_mac_config(wma_handle,
8359 				(struct policy_mgr_dual_mac_config *)msg->bodyptr);
8360 		qdf_mem_free(msg->bodyptr);
8361 		break;
8362 	case WMA_SET_IE_INFO:
8363 		wma_process_set_ie_info(wma_handle,
8364 			(struct vdev_ie_info *)msg->bodyptr);
8365 		qdf_mem_free(msg->bodyptr);
8366 		break;
8367 	case SIR_HAL_SOC_ANTENNA_MODE_REQ:
8368 		wma_send_pdev_set_antenna_mode(wma_handle,
8369 			(struct sir_antenna_mode_param *)msg->bodyptr);
8370 		qdf_mem_free(msg->bodyptr);
8371 		break;
8372 	case WMA_LRO_CONFIG_CMD:
8373 		wma_lro_config_cmd(wma_handle,
8374 			(struct cdp_lro_hash_config *)msg->bodyptr);
8375 		qdf_mem_free(msg->bodyptr);
8376 		break;
8377 	case WMA_GW_PARAM_UPDATE_REQ:
8378 		wma_set_gateway_params(wma_handle,
8379 			(struct gateway_param_update_req *)msg->bodyptr);
8380 		qdf_mem_free(msg->bodyptr);
8381 		break;
8382 	case WMA_SET_ADAPT_DWELLTIME_CONF_PARAMS:
8383 		wma_send_adapt_dwelltime_params(wma_handle,
8384 			(struct adaptive_dwelltime_params *)msg->bodyptr);
8385 		qdf_mem_free(msg->bodyptr);
8386 		break;
8387 	case WMA_HT40_OBSS_SCAN_IND:
8388 		wma_send_ht40_obss_scanind(wma_handle,
8389 			(struct obss_ht40_scanind *)msg->bodyptr);
8390 		qdf_mem_free(msg->bodyptr);
8391 		break;
8392 	case WMA_ADD_BCN_FILTER_CMDID:
8393 		wma_add_beacon_filter(wma_handle, msg->bodyptr);
8394 		qdf_mem_free(msg->bodyptr);
8395 		break;
8396 	case WMA_REMOVE_BCN_FILTER_CMDID:
8397 		wma_remove_beacon_filter(wma_handle, msg->bodyptr);
8398 		qdf_mem_free(msg->bodyptr);
8399 		break;
8400 	case WDA_BPF_GET_CAPABILITIES_REQ:
8401 		wma_get_bpf_capabilities(wma_handle);
8402 		break;
8403 	case WDA_BPF_SET_INSTRUCTIONS_REQ:
8404 		wma_set_bpf_instructions(wma_handle, msg->bodyptr);
8405 		qdf_mem_free(msg->bodyptr);
8406 		break;
8407 	case SIR_HAL_POWER_DBG_CMD:
8408 		wma_process_hal_pwr_dbg_cmd(wma_handle,
8409 					    msg->bodyptr);
8410 		qdf_mem_free(msg->bodyptr);
8411 		break;
8412 	case WMA_UPDATE_WEP_DEFAULT_KEY:
8413 		wma_update_wep_default_key(wma_handle,
8414 			(struct wep_update_default_key_idx *)msg->bodyptr);
8415 		qdf_mem_free(msg->bodyptr);
8416 		break;
8417 	case WMA_SEND_FREQ_RANGE_CONTROL_IND:
8418 		wma_enable_disable_caevent_ind(wma_handle, msg->bodyval);
8419 		break;
8420 	case SIR_HAL_UPDATE_TX_FAIL_CNT_TH:
8421 		wma_update_tx_fail_cnt_th(wma_handle, msg->bodyptr);
8422 		qdf_mem_free(msg->bodyptr);
8423 		break;
8424 	case SIR_HAL_LONG_RETRY_LIMIT_CNT:
8425 		wma_update_long_retry_limit(wma_handle, msg->bodyptr);
8426 		qdf_mem_free(msg->bodyptr);
8427 		break;
8428 	case SIR_HAL_SHORT_RETRY_LIMIT_CNT:
8429 		wma_update_short_retry_limit(wma_handle, msg->bodyptr);
8430 		qdf_mem_free(msg->bodyptr);
8431 		break;
8432 	case SIR_HAL_POWER_DEBUG_STATS_REQ:
8433 		wma_process_power_debug_stats_req(wma_handle);
8434 		break;
8435 	case WMA_GET_RCPI_REQ:
8436 		wma_get_rcpi_req(wma_handle,
8437 				 (struct sme_rcpi_req *)msg->bodyptr);
8438 		qdf_mem_free(msg->bodyptr);
8439 		break;
8440 	case WMA_SET_WOW_PULSE_CMD:
8441 		wma_send_wow_pulse_cmd(wma_handle,
8442 			(struct wow_pulse_mode *)msg->bodyptr);
8443 		qdf_mem_free(msg->bodyptr);
8444 		break;
8445 	case WMA_SET_DBS_SCAN_SEL_CONF_PARAMS:
8446 		wma_send_dbs_scan_selection_params(wma_handle,
8447 			(struct wmi_dbs_scan_sel_params *)msg->bodyptr);
8448 		qdf_mem_free(msg->bodyptr);
8449 		break;
8450 	case WMA_SET_ARP_STATS_REQ:
8451 		wma_set_arp_req_stats(wma_handle,
8452 			(struct set_arp_stats_params *)msg->bodyptr);
8453 		qdf_mem_free(msg->bodyptr);
8454 		break;
8455 	case WMA_GET_ARP_STATS_REQ:
8456 		wma_get_arp_req_stats(wma_handle,
8457 			(struct get_arp_stats_params *)msg->bodyptr);
8458 		qdf_mem_free(msg->bodyptr);
8459 		break;
8460 	case SIR_HAL_SET_DEL_PMKID_CACHE:
8461 		wma_set_del_pmkid_cache(wma_handle,
8462 			(struct wmi_unified_pmk_cache *) msg->bodyptr);
8463 		qdf_mem_free(msg->bodyptr);
8464 		break;
8465 	case SIR_HAL_HLP_IE_INFO:
8466 		wma_roam_scan_send_hlp(wma_handle,
8467 			(struct hlp_params *)msg->bodyptr);
8468 		qdf_mem_free(msg->bodyptr);
8469 		break;
8470 	case WMA_SET_LIMIT_OFF_CHAN:
8471 		wma_process_limit_off_chan(wma_handle, msg->bodyptr);
8472 		qdf_mem_free(msg->bodyptr);
8473 		break;
8474 	case WMA_OBSS_DETECTION_REQ:
8475 		wma_send_obss_detection_cfg(wma_handle, msg->bodyptr);
8476 		qdf_mem_free(msg->bodyptr);
8477 		break;
8478 	case WMA_INVOKE_NEIGHBOR_REPORT:
8479 		wma_send_invoke_neighbor_report(wma_handle, msg->bodyptr);
8480 		qdf_mem_free(msg->bodyptr);
8481 		break;
8482 	case WMA_OBSS_COLOR_COLLISION_REQ:
8483 		wma_process_obss_color_collision_req(wma_handle, msg->bodyptr);
8484 		qdf_mem_free(msg->bodyptr);
8485 		break;
8486 	default:
8487 		WMA_LOGE("Unhandled WMA message of type %d", msg->type);
8488 		if (msg->bodyptr)
8489 			qdf_mem_free(msg->bodyptr);
8490 	}
8491 end:
8492 	return qdf_status;
8493 }
8494 
8495 QDF_STATUS wma_mc_process_handler(struct scheduler_msg *msg)
8496 {
8497 	return wma_mc_process_msg(msg);
8498 }
8499 
8500 /**
8501  * wma_log_completion_timeout() - Log completion timeout
8502  * @data: Timeout handler data
8503  *
8504  * This function is called when log completion timer expires
8505  *
8506  * Return: None
8507  */
8508 void wma_log_completion_timeout(void *data)
8509 {
8510 	tp_wma_handle wma_handle;
8511 
8512 	WMA_LOGE("%s: Timeout occurred for log completion command", __func__);
8513 
8514 	wma_handle = (tp_wma_handle) data;
8515 	if (!wma_handle)
8516 		WMA_LOGE("%s: Invalid WMA handle", __func__);
8517 
8518 	/* Though we did not receive any event from FW,
8519 	 * we can flush whatever logs we have with us
8520 	 */
8521 	cds_logging_set_fw_flush_complete();
8522 }
8523 
8524 /**
8525  * wma_map_pcl_weights() - Map PCL weights
8526  * @pcl_weight: Internal PCL weights
8527  *
8528  * Maps the internal weights of PCL to the weights needed by FW
8529  *
8530  * Return: Mapped channel weight of type wmi_pcl_chan_weight
8531  */
8532 static wmi_pcl_chan_weight wma_map_pcl_weights(uint32_t pcl_weight)
8533 {
8534 	switch (pcl_weight) {
8535 	case WEIGHT_OF_GROUP1_PCL_CHANNELS:
8536 		return WMI_PCL_WEIGHT_VERY_HIGH;
8537 	case WEIGHT_OF_GROUP2_PCL_CHANNELS:
8538 		return WMI_PCL_WEIGHT_HIGH;
8539 	case WEIGHT_OF_GROUP3_PCL_CHANNELS:
8540 		return WMI_PCL_WEIGHT_MEDIUM;
8541 	case WEIGHT_OF_NON_PCL_CHANNELS:
8542 		return WMI_PCL_WEIGHT_LOW;
8543 	default:
8544 		return WMI_PCL_WEIGHT_DISALLOW;
8545 	}
8546 }
8547 
8548 /**
8549  * wma_send_pdev_set_pcl_cmd() - Send WMI_SOC_SET_PCL_CMDID to FW
8550  * @wma_handle: WMA handle
8551  * @msg: PCL structure containing the PCL and the number of channels
8552  *
8553  * WMI_PDEV_SET_PCL_CMDID provides a Preferred Channel List (PCL) to the WLAN
8554  * firmware. The DBS Manager is the consumer of this information in the WLAN
8555  * firmware. The channel list will be used when a Virtual DEVice (VDEV) needs
8556  * to migrate to a new channel without host driver involvement. An example of
8557  * this behavior is Legacy Fast Roaming (LFR 3.0). Generally, the host will
8558  * manage the channel selection without firmware involvement.
8559  *
8560  * WMI_PDEV_SET_PCL_CMDID will carry only the weight list and not the actual
8561  * channel list. The weights corresponds to the channels sent in
8562  * WMI_SCAN_CHAN_LIST_CMDID. The channels from PCL would be having a higher
8563  * weightage compared to the non PCL channels.
8564  *
8565  * Return: Success if the cmd is sent successfully to the firmware
8566  */
8567 QDF_STATUS wma_send_pdev_set_pcl_cmd(tp_wma_handle wma_handle,
8568 				struct wmi_pcl_chan_weights *msg)
8569 {
8570 	uint32_t i;
8571 	QDF_STATUS status;
8572 
8573 	if (!wma_handle) {
8574 		WMA_LOGE("%s: WMA handle is NULL. Cannot issue command",
8575 				__func__);
8576 		return QDF_STATUS_E_NULL_VALUE;
8577 	}
8578 
8579 	for (i = 0; i < wma_handle->saved_chan.num_channels; i++) {
8580 		msg->saved_chan_list[i] =
8581 			wma_handle->saved_chan.channel_list[i];
8582 	}
8583 
8584 	msg->saved_num_chan = wma_handle->saved_chan.num_channels;
8585 	status = policy_mgr_get_valid_chan_weights(wma_handle->psoc,
8586 		(struct policy_mgr_pcl_chan_weights *)msg);
8587 
8588 	for (i = 0; i < msg->saved_num_chan; i++) {
8589 		msg->weighed_valid_list[i] =
8590 			wma_map_pcl_weights(msg->weighed_valid_list[i]);
8591 		/* Dont allow roaming on 2G when 5G_ONLY configured */
8592 		if ((wma_handle->bandcapability == BAND_5G) &&
8593 			(msg->saved_chan_list[i] <= MAX_24GHZ_CHANNEL)) {
8594 			msg->weighed_valid_list[i] =
8595 				WEIGHT_OF_DISALLOWED_CHANNELS;
8596 		}
8597 		WMA_LOGD("%s: chan:%d weight[%d]=%d", __func__,
8598 			 msg->saved_chan_list[i], i,
8599 			 msg->weighed_valid_list[i]);
8600 	}
8601 
8602 	if (!QDF_IS_STATUS_SUCCESS(status)) {
8603 		WMA_LOGE("%s: Error in creating weighed pcl", __func__);
8604 		return status;
8605 	}
8606 
8607 	if (wmi_unified_pdev_set_pcl_cmd(wma_handle->wmi_handle, msg))
8608 		return QDF_STATUS_E_FAILURE;
8609 
8610 	return QDF_STATUS_SUCCESS;
8611 }
8612 
8613 /**
8614  * wma_send_pdev_set_hw_mode_cmd() - Send WMI_PDEV_SET_HW_MODE_CMDID to FW
8615  * @wma_handle: WMA handle
8616  * @msg: Structure containing the following parameters
8617  *
8618  * - hw_mode_index: The HW_Mode field is a enumerated type that is selected
8619  * from the HW_Mode table, which is returned in the WMI_SERVICE_READY_EVENTID.
8620  *
8621  * Provides notification to the WLAN firmware that host driver is requesting a
8622  * HardWare (HW) Mode change. This command is needed to support iHelium in the
8623  * configurations that include the Dual Band Simultaneous (DBS) feature.
8624  *
8625  * Return: Success if the cmd is sent successfully to the firmware
8626  */
8627 QDF_STATUS wma_send_pdev_set_hw_mode_cmd(tp_wma_handle wma_handle,
8628 					 struct policy_mgr_hw_mode *msg)
8629 {
8630 	struct sir_set_hw_mode_resp *param;
8631 	struct wma_target_req *timeout_msg;
8632 
8633 	if (!wma_handle) {
8634 		WMA_LOGE("%s: WMA handle is NULL. Cannot issue command",
8635 				__func__);
8636 		/* Handle is NULL. Will not be able to send failure
8637 		 * response as well
8638 		 */
8639 		return QDF_STATUS_E_NULL_VALUE;
8640 	}
8641 
8642 	if (!msg) {
8643 		WMA_LOGE("%s: Set HW mode param is NULL", __func__);
8644 		/* Lets try to free the active command list */
8645 		goto fail;
8646 	}
8647 
8648 	wma_acquire_wakelock(&wma_handle->wmi_cmd_rsp_wake_lock,
8649 			     WMA_VDEV_HW_MODE_REQUEST_TIMEOUT);
8650 	if (wmi_unified_soc_set_hw_mode_cmd(wma_handle->wmi_handle,
8651 					    msg->hw_mode_index)) {
8652 		wma_release_wakelock(&wma_handle->wmi_cmd_rsp_wake_lock);
8653 		goto fail;
8654 	}
8655 	timeout_msg = wma_fill_hold_req(wma_handle, 0,
8656 			SIR_HAL_PDEV_SET_HW_MODE,
8657 			WMA_PDEV_SET_HW_MODE_RESP, NULL,
8658 			WMA_VDEV_HW_MODE_REQUEST_TIMEOUT - 1);
8659 	if (!timeout_msg) {
8660 		WMA_LOGE("Failed to allocate request for SIR_HAL_PDEV_SET_HW_MODE");
8661 		wma_remove_req(wma_handle, 0, WMA_PDEV_SET_HW_MODE_RESP);
8662 	}
8663 
8664 	return QDF_STATUS_SUCCESS;
8665 fail:
8666 	param = qdf_mem_malloc(sizeof(*param));
8667 	if (!param) {
8668 		WMA_LOGE("%s: Memory allocation failed", __func__);
8669 		return QDF_STATUS_E_NULL_VALUE;
8670 	}
8671 	param->status = SET_HW_MODE_STATUS_ECANCELED;
8672 	param->cfgd_hw_mode_index = 0;
8673 	param->num_vdev_mac_entries = 0;
8674 	WMA_LOGE("%s: Sending HW mode fail response to LIM", __func__);
8675 	wma_send_msg(wma_handle, SIR_HAL_PDEV_SET_HW_MODE_RESP,
8676 			(void *) param, 0);
8677 	return QDF_STATUS_SUCCESS;
8678 }
8679 
8680 /**
8681  * wma_send_pdev_set_dual_mac_config() - Set dual mac config to FW
8682  * @wma_handle: WMA handle
8683  * @msg: Dual MAC config parameters
8684  *
8685  * Configures WLAN firmware with the dual MAC features
8686  *
8687  * Return: QDF_STATUS. 0 on success.
8688  */
8689 QDF_STATUS wma_send_pdev_set_dual_mac_config(tp_wma_handle wma_handle,
8690 		struct policy_mgr_dual_mac_config *msg)
8691 {
8692 	QDF_STATUS status;
8693 
8694 	if (!wma_handle) {
8695 		WMA_LOGE("%s: WMA handle is NULL. Cannot issue command",
8696 				__func__);
8697 		return QDF_STATUS_E_NULL_VALUE;
8698 	}
8699 
8700 	if (!msg) {
8701 		WMA_LOGE("%s: Set dual mode config is NULL", __func__);
8702 		return QDF_STATUS_E_NULL_VALUE;
8703 	}
8704 
8705 	/*
8706 	 * aquire the wake lock here and release it in response handler function
8707 	 * In error condition, release the wake lock right away
8708 	 */
8709 	wma_acquire_wakelock(&wma_handle->wmi_cmd_rsp_wake_lock,
8710 			     WMA_VDEV_PLCY_MGR_CMD_TIMEOUT);
8711 	status = wmi_unified_pdev_set_dual_mac_config_cmd(
8712 				wma_handle->wmi_handle,
8713 				(struct policy_mgr_dual_mac_config *)msg);
8714 	if (QDF_IS_STATUS_ERROR(status)) {
8715 		WMA_LOGE("%s: Failed to send WMI_PDEV_SET_DUAL_MAC_CONFIG_CMDID: %d",
8716 				__func__, status);
8717 		wma_release_wakelock(&wma_handle->wmi_cmd_rsp_wake_lock);
8718 		return status;
8719 	}
8720 	policy_mgr_update_dbs_req_config(wma_handle->psoc,
8721 	msg->scan_config, msg->fw_mode_config);
8722 
8723 	return QDF_STATUS_SUCCESS;
8724 }
8725 
8726 /**
8727  * wma_send_pdev_set_antenna_mode() - Set antenna mode to FW
8728  * @wma_handle: WMA handle
8729  * @msg: Antenna mode parameters
8730  *
8731  * Send WMI_PDEV_SET_ANTENNA_MODE_CMDID to FW requesting to
8732  * modify the number of TX/RX chains from host
8733  *
8734  * Return: QDF_STATUS. 0 on success.
8735  */
8736 QDF_STATUS wma_send_pdev_set_antenna_mode(tp_wma_handle wma_handle,
8737 		struct sir_antenna_mode_param *msg)
8738 {
8739 	wmi_pdev_set_antenna_mode_cmd_fixed_param *cmd;
8740 	wmi_buf_t buf;
8741 	uint32_t len;
8742 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8743 	struct sir_antenna_mode_resp *param;
8744 
8745 	if (!wma_handle) {
8746 		WMA_LOGE("%s: WMA handle is NULL. Cannot issue command",
8747 				__func__);
8748 		return QDF_STATUS_E_NULL_VALUE;
8749 	}
8750 
8751 	if (!msg) {
8752 		WMA_LOGE("%s: Set antenna mode param is NULL", __func__);
8753 		return QDF_STATUS_E_NULL_VALUE;
8754 	}
8755 
8756 	len = sizeof(*cmd);
8757 
8758 	buf = wmi_buf_alloc(wma_handle->wmi_handle, len);
8759 	if (!buf) {
8760 		WMA_LOGE("%s: wmi_buf_alloc failed", __func__);
8761 		status = QDF_STATUS_E_NOMEM;
8762 		goto resp;
8763 	}
8764 
8765 	cmd = (wmi_pdev_set_antenna_mode_cmd_fixed_param *) wmi_buf_data(buf);
8766 	WMITLV_SET_HDR(&cmd->tlv_header,
8767 		WMITLV_TAG_STRUC_wmi_pdev_set_antenna_mode_cmd_fixed_param,
8768 		WMITLV_GET_STRUCT_TLVLEN(
8769 			wmi_pdev_set_antenna_mode_cmd_fixed_param));
8770 
8771 	cmd->pdev_id = WMI_PDEV_ID_SOC;
8772 	/* Bits 0-15 is num of RX chains 16-31 is num of TX chains */
8773 	cmd->num_txrx_chains = msg->num_rx_chains;
8774 	cmd->num_txrx_chains |= (msg->num_tx_chains << 16);
8775 
8776 	WMA_LOGD("%s: Num of chains TX: %d RX: %d txrx_chains: 0x%x",
8777 		 __func__, msg->num_tx_chains,
8778 		 msg->num_rx_chains, cmd->num_txrx_chains);
8779 
8780 	if (wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len,
8781 				 WMI_PDEV_SET_ANTENNA_MODE_CMDID)) {
8782 		WMA_LOGE("%s: Failed to send WMI_PDEV_SET_ANTENNA_MODE_CMDID",
8783 				__func__);
8784 		wmi_buf_free(buf);
8785 		status = QDF_STATUS_E_FAILURE;
8786 		goto resp;
8787 	}
8788 	status = QDF_STATUS_SUCCESS;
8789 
8790 resp:
8791 	param = qdf_mem_malloc(sizeof(*param));
8792 	if (!param) {
8793 		WMA_LOGE("%s: Memory allocation failed", __func__);
8794 		return QDF_STATUS_E_NOMEM;
8795 	}
8796 	param->status = (status) ?
8797 		SET_ANTENNA_MODE_STATUS_ECANCELED :
8798 		SET_ANTENNA_MODE_STATUS_OK;
8799 	WMA_LOGE("%s: Send antenna mode resp to LIM status: %d",
8800 		 __func__, param->status);
8801 	wma_send_msg(wma_handle, SIR_HAL_SOC_ANTENNA_MODE_RESP,
8802 			(void *) param, 0);
8803 	return status;
8804 }
8805 
8806 /**
8807  * wma_crash_inject() - sends command to FW to simulate crash
8808  * @wma_handle:         pointer of WMA context
8809  * @type:               subtype of the command
8810  * @delay_time_ms:      time in milliseconds for FW to delay the crash
8811  *
8812  * This function will send a command to FW in order to simulate different
8813  * kinds of FW crashes.
8814  *
8815  * Return: QDF_STATUS_SUCCESS for success or error code
8816  */
8817 QDF_STATUS wma_crash_inject(WMA_HANDLE wma_handle, uint32_t type,
8818 			    uint32_t delay_time_ms)
8819 {
8820 	struct crash_inject param;
8821 	tp_wma_handle wma = (tp_wma_handle)wma_handle;
8822 
8823 	param.type = type;
8824 	param.delay_time_ms = delay_time_ms;
8825 	return wmi_crash_inject(wma->wmi_handle, &param);
8826 }
8827 
8828 #ifdef RECEIVE_OFFLOAD
8829 int wma_lro_init(struct cdp_lro_hash_config *lro_config)
8830 {
8831 	struct scheduler_msg msg = {0};
8832 	struct cdp_lro_hash_config *iwcmd;
8833 
8834 	iwcmd = qdf_mem_malloc(sizeof(*iwcmd));
8835 	if (!iwcmd) {
8836 		WMA_LOGE("memory allocation for WMA_LRO_CONFIG_CMD failed!");
8837 		return -ENOMEM;
8838 	}
8839 
8840 	*iwcmd = *lro_config;
8841 
8842 	msg.type = WMA_LRO_CONFIG_CMD;
8843 	msg.reserved = 0;
8844 	msg.bodyptr = iwcmd;
8845 
8846 	if (QDF_STATUS_SUCCESS !=
8847 		scheduler_post_msg(QDF_MODULE_ID_WMA, &msg)) {
8848 		WMA_LOGE("Failed to post WMA_LRO_CONFIG_CMD msg!");
8849 		qdf_mem_free(iwcmd);
8850 		return -EAGAIN;
8851 	}
8852 
8853 	WMA_LOGD("sending the LRO configuration to the fw");
8854 	return 0;
8855 }
8856 #endif
8857 
8858 void wma_peer_set_default_routing(void *scn_handle, uint8_t *peer_macaddr,
8859 	uint8_t vdev_id, bool hash_based, uint8_t ring_num)
8860 {
8861 	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
8862 	struct peer_set_params param;
8863 
8864 	if (!wma) {
8865 		WMA_LOGE("%s:wma_handle is NULL", __func__);
8866 		return;
8867 	}
8868 
8869 	/* TODO: Need bit definitions for ring number and hash based routing
8870 	 * fields in common wmi header file
8871 	 */
8872 	param.param_id = WMI_HOST_PEER_SET_DEFAULT_ROUTING;
8873 	param.vdev_id = vdev_id;
8874 	param.param_value = ((hash_based) ? 1 : 0) | (ring_num << 1);
8875 	WMA_LOGD("%s: param_value 0x%x", __func__, param.param_value);
8876 	wmi_set_peer_param_send(wma->wmi_handle, peer_macaddr, &param);
8877 }
8878 
8879 int wma_peer_rx_reorder_queue_setup(void *scn_handle,
8880 	uint8_t vdev_id, uint8_t *peer_macaddr, qdf_dma_addr_t hw_qdesc,
8881 	int tid, uint16_t queue_no)
8882 {
8883 	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
8884 	struct rx_reorder_queue_setup_params param;
8885 
8886 	if (!wma) {
8887 		WMA_LOGE("%s:wma_handle is NULL", __func__);
8888 		return QDF_STATUS_E_FAILURE;
8889 	}
8890 
8891 	param.tid = tid;
8892 	param.vdev_id = vdev_id;
8893 	param.peer_macaddr = peer_macaddr;
8894 	param.hw_qdesc_paddr_lo = hw_qdesc & 0xffffffff;
8895 	param.hw_qdesc_paddr_hi = (uint64_t)hw_qdesc >> 32;
8896 	param.queue_no = queue_no;
8897 
8898 	return wmi_unified_peer_rx_reorder_queue_setup_send(wma->wmi_handle,
8899 		&param);
8900 }
8901 
8902 int wma_peer_rx_reorder_queue_remove(void *scn_handle,
8903 	uint8_t vdev_id, uint8_t *peer_macaddr, uint32_t peer_tid_bitmap)
8904 {
8905 	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
8906 	struct rx_reorder_queue_remove_params param;
8907 
8908 	if (!wma) {
8909 		WMA_LOGE("%s:wma_handle is NULL", __func__);
8910 		return QDF_STATUS_E_FAILURE;
8911 	}
8912 
8913 	param.vdev_id = vdev_id;
8914 	param.peer_macaddr = peer_macaddr;
8915 	param.peer_tid_bitmap = peer_tid_bitmap;
8916 
8917 	return wmi_unified_peer_rx_reorder_queue_remove_send(wma->wmi_handle,
8918 		&param);
8919 }
8920 
8921 QDF_STATUS wma_configure_smps_params(uint32_t vdev_id, uint32_t param_id,
8922 							uint32_t param_val)
8923 {
8924 	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
8925 	int smps_cmd_value;
8926 	int status = QDF_STATUS_E_INVAL;
8927 
8928 	if (!wma) {
8929 		WMA_LOGE("%s: Failed to get wma", __func__);
8930 		return status;
8931 	}
8932 
8933 	smps_cmd_value = param_id << WMI_SMPS_PARAM_VALUE_S;
8934 	smps_cmd_value = smps_cmd_value | param_val;
8935 
8936 	status = wma_set_smps_params(wma, vdev_id, smps_cmd_value);
8937 	if (status)
8938 		WMA_LOGE("Failed to set SMPS Param");
8939 
8940 	return status;
8941 }
8942 
8943 
8944 void wma_ipa_uc_stat_request(wma_cli_set_cmd_t *privcmd)
8945 {
8946 	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
8947 
8948 	if (!wma) {
8949 		WMA_LOGE("%s: Failed to get wma", __func__);
8950 		return;
8951 	}
8952 
8953 	if (wma_set_priv_cfg(wma, privcmd))
8954 		WMA_LOGE("Failed to set wma priv congiuration");
8955 }
8956 
8957 /**
8958  * wma_config_bmiss_bcnt_params() - set bmiss config parameters
8959  * @vdev_id: virtual device for the command
8960  * @first_cnt: bmiss first value
8961  * @final_cnt: bmiss final value
8962  *
8963  * Return: QDF_STATUS_SUCCESS or non-zero on failure
8964  */
8965 QDF_STATUS wma_config_bmiss_bcnt_params(uint32_t vdev_id, uint32_t first_cnt,
8966 		uint32_t final_cnt)
8967 {
8968 	tp_wma_handle wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
8969 	int status = QDF_STATUS_E_INVAL;
8970 
8971 	if (!wma_handle) {
8972 		WMA_LOGE("%s: Failed to get wma", __func__);
8973 		return status;
8974 	}
8975 
8976 	status = wma_roam_scan_bmiss_cnt(wma_handle, first_cnt, final_cnt,
8977 			vdev_id);
8978 
8979 	if (status)
8980 		WMA_LOGE("Failed to set Bmiss Param");
8981 
8982 	return status;
8983 }
8984 
8985