xref: /wlan-dirver/qcacld-3.0/core/wma/src/wma_mgmt.c (revision a4715adff446403c8635800dda08025c59945475)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  *  DOC:  wma_mgmt.c
22  *
23  *  This file contains STA/SAP and protocol related functions.
24  */
25 
26 /* Header files */
27 
28 #include "wma.h"
29 #include "wma_api.h"
30 #include "cds_api.h"
31 #include "wmi_unified_api.h"
32 #include "wlan_qct_sys.h"
33 #include "wni_api.h"
34 #include "ani_global.h"
35 #include "wmi_unified.h"
36 #include "wni_cfg.h"
37 
38 #include "qdf_nbuf.h"
39 #include "qdf_types.h"
40 #include "qdf_mem.h"
41 
42 #include "wma_types.h"
43 #include "lim_api.h"
44 #include "lim_session_utils.h"
45 
46 #include "cds_utils.h"
47 #include "wlan_dlm_api.h"
48 #if defined(CONNECTIVITY_PKTLOG) || !defined(REMOVE_PKT_LOG)
49 #include "pktlog_ac.h"
50 #else
51 #include "pktlog_ac_fmt.h"
52 #endif /* REMOVE_PKT_LOG */
53 
54 #include "dbglog_host.h"
55 #include "csr_api.h"
56 #include "ol_fw.h"
57 #include "wma_internal.h"
58 #include "wlan_policy_mgr_api.h"
59 #include "cdp_txrx_flow_ctrl_legacy.h"
60 #include <cdp_txrx_peer_ops.h>
61 #include <cdp_txrx_pmf.h>
62 #include <cdp_txrx_cfg.h>
63 #include <cdp_txrx_cmn.h>
64 #include <cdp_txrx_misc.h>
65 #include <cdp_txrx_misc.h>
66 #include "wlan_mgmt_txrx_tgt_api.h"
67 #include "wlan_objmgr_psoc_obj.h"
68 #include "wlan_objmgr_pdev_obj.h"
69 #include "wlan_objmgr_vdev_obj.h"
70 #include "wlan_lmac_if_api.h"
71 #include <cdp_txrx_handle.h>
72 #include "wma_he.h"
73 #include "wma_eht.h"
74 #include <qdf_crypto.h>
75 #include "wma_twt.h"
76 #include "wlan_p2p_cfg_api.h"
77 #include "cfg_ucfg_api.h"
78 #include "cfg_mlme_sta.h"
79 #include "wlan_mlme_api.h"
80 #include "wmi_unified_bcn_api.h"
81 #include <wlan_crypto_global_api.h>
82 #include <wlan_mlme_main.h>
83 #include <../../core/src/vdev_mgr_ops.h>
84 #include "wlan_pkt_capture_ucfg_api.h"
85 
86 #if defined(CONNECTIVITY_PKTLOG) || !defined(REMOVE_PKT_LOG)
87 #include <wlan_logging_sock_svc.h>
88 #endif
89 #include "wlan_cm_roam_api.h"
90 #include "wlan_cm_api.h"
91 #include "wlan_mlo_link_force.h"
92 #include <target_if_spatial_reuse.h>
93 
94 /* Max debug string size for WMM in bytes */
95 #define WMA_WMM_DEBUG_STRING_SIZE    512
96 
97 /**
98  * wma_send_bcn_buf_ll() - prepare and send beacon buffer to fw for LL
99  * @wma: wma handle
100  * @vdev_id: vdev id
101  * @param_buf: SWBA parameters
102  *
103  * Return: none
104  */
105 #ifdef WLAN_WMI_BCN
106 static void wma_send_bcn_buf_ll(tp_wma_handle wma,
107 				uint8_t vdev_id,
108 				WMI_HOST_SWBA_EVENTID_param_tlvs *param_buf)
109 {
110 	struct ieee80211_frame *wh;
111 	struct beacon_info *bcn;
112 	wmi_tim_info *tim_info = param_buf->tim_info;
113 	uint8_t *bcn_payload;
114 	QDF_STATUS ret;
115 	struct beacon_tim_ie *tim_ie;
116 	wmi_p2p_noa_info *p2p_noa_info = param_buf->p2p_noa_info;
117 	struct p2p_sub_element_noa noa_ie;
118 	struct wmi_bcn_send_from_host params;
119 	uint8_t i;
120 
121 	bcn = wma->interfaces[vdev_id].beacon;
122 	if (!bcn || !bcn->buf) {
123 		wma_err("Invalid beacon buffer");
124 		return;
125 	}
126 
127 	if (!param_buf->tim_info || !param_buf->p2p_noa_info) {
128 		wma_err("Invalid tim info or p2p noa info");
129 		return;
130 	}
131 
132 	if (WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET(p2p_noa_info) >
133 			WMI_P2P_MAX_NOA_DESCRIPTORS) {
134 		wma_err("Too many descriptors %d",
135 			WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET(p2p_noa_info));
136 		return;
137 	}
138 
139 	qdf_spin_lock_bh(&bcn->lock);
140 
141 	bcn_payload = qdf_nbuf_data(bcn->buf);
142 
143 	tim_ie = (struct beacon_tim_ie *)(&bcn_payload[bcn->tim_ie_offset]);
144 
145 	if (tim_info->tim_changed) {
146 		if (tim_info->tim_num_ps_pending)
147 			qdf_mem_copy(&tim_ie->tim_bitmap, tim_info->tim_bitmap,
148 				     WMA_TIM_SUPPORTED_PVB_LENGTH);
149 		else
150 			qdf_mem_zero(&tim_ie->tim_bitmap,
151 				     WMA_TIM_SUPPORTED_PVB_LENGTH);
152 		/*
153 		 * Currently we support fixed number of
154 		 * peers as limited by HAL_NUM_STA.
155 		 * tim offset is always 0
156 		 */
157 		tim_ie->tim_bitctl = 0;
158 	}
159 
160 	/* Update DTIM Count */
161 	if (tim_ie->dtim_count == 0)
162 		tim_ie->dtim_count = tim_ie->dtim_period - 1;
163 	else
164 		tim_ie->dtim_count--;
165 
166 	/*
167 	 * DTIM count needs to be backedup so that
168 	 * when umac updates the beacon template
169 	 * current dtim count can be updated properly
170 	 */
171 	bcn->dtim_count = tim_ie->dtim_count;
172 
173 	/* update state for buffered multicast frames on DTIM */
174 	if (tim_info->tim_mcast && (tim_ie->dtim_count == 0 ||
175 				    tim_ie->dtim_period == 1))
176 		tim_ie->tim_bitctl |= 1;
177 	else
178 		tim_ie->tim_bitctl &= ~1;
179 
180 	/* To avoid sw generated frame sequence the same as H/W generated frame,
181 	 * the value lower than min_sw_seq is reserved for HW generated frame
182 	 */
183 	if ((bcn->seq_no & IEEE80211_SEQ_MASK) < MIN_SW_SEQ)
184 		bcn->seq_no = MIN_SW_SEQ;
185 
186 	wh = (struct ieee80211_frame *)bcn_payload;
187 	*(uint16_t *) &wh->i_seq[0] = htole16(bcn->seq_no
188 					      << IEEE80211_SEQ_SEQ_SHIFT);
189 	bcn->seq_no++;
190 
191 	if (WMI_UNIFIED_NOA_ATTR_IS_MODIFIED(p2p_noa_info)) {
192 		qdf_mem_zero(&noa_ie, sizeof(noa_ie));
193 
194 		noa_ie.index =
195 			(uint8_t) WMI_UNIFIED_NOA_ATTR_INDEX_GET(p2p_noa_info);
196 		noa_ie.oppPS =
197 			(uint8_t) WMI_UNIFIED_NOA_ATTR_OPP_PS_GET(p2p_noa_info);
198 		noa_ie.ctwindow =
199 			(uint8_t) WMI_UNIFIED_NOA_ATTR_CTWIN_GET(p2p_noa_info);
200 		noa_ie.num_descriptors = (uint8_t)
201 				WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET(p2p_noa_info);
202 		wma_debug("index %u, oppPs %u, ctwindow %u, num_descriptors = %u",
203 			 noa_ie.index,
204 			 noa_ie.oppPS, noa_ie.ctwindow, noa_ie.num_descriptors);
205 		for (i = 0; i < noa_ie.num_descriptors; i++) {
206 			noa_ie.noa_descriptors[i].type_count =
207 				(uint8_t) p2p_noa_info->noa_descriptors[i].
208 				type_count;
209 			noa_ie.noa_descriptors[i].duration =
210 				p2p_noa_info->noa_descriptors[i].duration;
211 			noa_ie.noa_descriptors[i].interval =
212 				p2p_noa_info->noa_descriptors[i].interval;
213 			noa_ie.noa_descriptors[i].start_time =
214 				p2p_noa_info->noa_descriptors[i].start_time;
215 			wma_debug("NoA descriptor[%d] type_count %u, duration %u, interval %u, start_time = %u",
216 				 i,
217 				 noa_ie.noa_descriptors[i].type_count,
218 				 noa_ie.noa_descriptors[i].duration,
219 				 noa_ie.noa_descriptors[i].interval,
220 				 noa_ie.noa_descriptors[i].start_time);
221 		}
222 		wma_update_noa(bcn, &noa_ie);
223 
224 		/* Send a msg to LIM to update the NoA IE in probe response
225 		 * frames transmitted by the host
226 		 */
227 		wma_update_probe_resp_noa(wma, &noa_ie);
228 	}
229 
230 	if (bcn->dma_mapped) {
231 		qdf_nbuf_unmap_single(wma->qdf_dev, bcn->buf, QDF_DMA_TO_DEVICE);
232 		bcn->dma_mapped = 0;
233 	}
234 	ret = qdf_nbuf_map_single(wma->qdf_dev, bcn->buf, QDF_DMA_TO_DEVICE);
235 	if (ret != QDF_STATUS_SUCCESS) {
236 		wma_err("failed map beacon buf to DMA region");
237 		qdf_spin_unlock_bh(&bcn->lock);
238 		return;
239 	}
240 
241 	bcn->dma_mapped = 1;
242 	params.vdev_id = vdev_id;
243 	params.data_len = bcn->len;
244 	params.frame_ctrl = *((A_UINT16 *) wh->i_fc);
245 	params.frag_ptr = qdf_nbuf_get_frag_paddr(bcn->buf, 0);
246 	params.dtim_flag = 0;
247 	/* notify Firmware of DTM and mcast/bcast traffic */
248 	if (tim_ie->dtim_count == 0) {
249 		params.dtim_flag |= WMI_BCN_SEND_DTIM_ZERO;
250 		/* deliver mcast/bcast traffic in next DTIM beacon */
251 		if (tim_ie->tim_bitctl & 0x01)
252 			params.dtim_flag |= WMI_BCN_SEND_DTIM_BITCTL_SET;
253 	}
254 
255 	wmi_unified_bcn_buf_ll_cmd(wma->wmi_handle,
256 					&params);
257 
258 	qdf_spin_unlock_bh(&bcn->lock);
259 }
260 #else
261 static inline void
262 wma_send_bcn_buf_ll(tp_wma_handle wma,
263 		    uint8_t vdev_id,
264 		    WMI_HOST_SWBA_EVENTID_param_tlvs *param_buf)
265 {
266 }
267 #endif
268 /**
269  * wma_beacon_swba_handler() - swba event handler
270  * @handle: wma handle
271  * @event: event data
272  * @len: data length
273  *
274  * SWBA event is alert event to Host requesting host to Queue a beacon
275  * for transmission use only in host beacon mode
276  *
277  * Return: 0 for success or error code
278  */
279 #ifdef WLAN_WMI_BCN
280 int wma_beacon_swba_handler(void *handle, uint8_t *event, uint32_t len)
281 {
282 	tp_wma_handle wma = (tp_wma_handle) handle;
283 	WMI_HOST_SWBA_EVENTID_param_tlvs *param_buf;
284 	wmi_host_swba_event_fixed_param *swba_event;
285 	uint32_t vdev_map;
286 	uint8_t vdev_id = 0;
287 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
288 
289 	param_buf = (WMI_HOST_SWBA_EVENTID_param_tlvs *) event;
290 	if (!param_buf) {
291 		wma_err("Invalid swba event buffer");
292 		return -EINVAL;
293 	}
294 	swba_event = param_buf->fixed_param;
295 	vdev_map = swba_event->vdev_map;
296 
297 	wma_debug("vdev_map = %d", vdev_map);
298 	for (; vdev_map && vdev_id < wma->max_bssid;
299 			vdev_id++, vdev_map >>= 1) {
300 		if (!(vdev_map & 0x1))
301 			continue;
302 		if (!cdp_cfg_is_high_latency(soc,
303 			(struct cdp_cfg *)cds_get_context(QDF_MODULE_ID_CFG)))
304 			wma_send_bcn_buf_ll(wma, vdev_id, param_buf);
305 		break;
306 	}
307 	return 0;
308 }
309 #else
310 static inline int
311 wma_beacon_swba_handler(void *handle, uint8_t *event, uint32_t len)
312 {
313 	return 0;
314 }
315 #endif
316 
317 #ifdef FEATURE_WLAN_DIAG_SUPPORT
318 void wma_sta_kickout_event(uint32_t kickout_reason, uint8_t vdev_id,
319 			   uint8_t *macaddr)
320 {
321 	WLAN_HOST_DIAG_EVENT_DEF(sta_kickout, struct host_event_wlan_kickout);
322 	qdf_mem_zero(&sta_kickout, sizeof(sta_kickout));
323 	sta_kickout.reasoncode = kickout_reason;
324 	sta_kickout.vdev_id = vdev_id;
325 	if (macaddr)
326 		qdf_mem_copy(sta_kickout.peer_mac, macaddr,
327 			     QDF_MAC_ADDR_SIZE);
328 	WLAN_HOST_DIAG_EVENT_REPORT(&sta_kickout, EVENT_WLAN_STA_KICKOUT);
329 }
330 #endif
331 
332 int wma_peer_sta_kickout_event_handler(void *handle, uint8_t *event,
333 				       uint32_t len)
334 {
335 	tp_wma_handle wma = (tp_wma_handle) handle;
336 	WMI_PEER_STA_KICKOUT_EVENTID_param_tlvs *param_buf = NULL;
337 	wmi_peer_sta_kickout_event_fixed_param *kickout_event = NULL;
338 	uint8_t vdev_id, macaddr[QDF_MAC_ADDR_SIZE];
339 	tpDeleteStaContext del_sta_ctx;
340 	uint8_t *addr, *bssid;
341 	struct wlan_objmgr_vdev *vdev;
342 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
343 
344 	param_buf = (WMI_PEER_STA_KICKOUT_EVENTID_param_tlvs *) event;
345 	kickout_event = param_buf->fixed_param;
346 	WMI_MAC_ADDR_TO_CHAR_ARRAY(&kickout_event->peer_macaddr, macaddr);
347 	if (cdp_peer_get_vdevid(soc, macaddr, &vdev_id) !=
348 			QDF_STATUS_SUCCESS) {
349 		wma_err("Not able to find BSSID for peer ["QDF_MAC_ADDR_FMT"]",
350 			 QDF_MAC_ADDR_REF(macaddr));
351 		return -EINVAL;
352 	}
353 
354 	if (!wma_is_vdev_valid(vdev_id))
355 		return -EINVAL;
356 
357 	vdev = wma->interfaces[vdev_id].vdev;
358 	if (!vdev) {
359 		wma_err("Not able to find vdev for VDEV_%d", vdev_id);
360 		return -EINVAL;
361 	}
362 	addr = wlan_vdev_mlme_get_macaddr(vdev);
363 
364 	wma_nofl_info("STA kickout for "QDF_MAC_ADDR_FMT", on mac "QDF_MAC_ADDR_FMT", vdev %d, reason:%d",
365 		      QDF_MAC_ADDR_REF(macaddr), QDF_MAC_ADDR_REF(addr),
366 		      vdev_id, kickout_event->reason);
367 
368 	if (wma_is_roam_in_progress(vdev_id)) {
369 		wma_err("vdev_id %d: Ignore STA kick out since roaming is in progress",
370 			vdev_id);
371 		return -EINVAL;
372 	}
373 	bssid = wma_get_vdev_bssid(vdev);
374 	if (!bssid) {
375 		wma_err("Failed to get bssid for vdev_%d", vdev_id);
376 		return -ENOMEM;
377 	}
378 
379 	switch (kickout_event->reason) {
380 	case WMI_PEER_STA_KICKOUT_REASON_IBSS_DISCONNECT:
381 		goto exit_handler;
382 #ifdef FEATURE_WLAN_TDLS
383 	case WMI_PEER_STA_KICKOUT_REASON_TDLS_DISCONNECT:
384 		del_sta_ctx = (tpDeleteStaContext)
385 			qdf_mem_malloc(sizeof(tDeleteStaContext));
386 		if (!del_sta_ctx) {
387 			wma_err("mem alloc failed for struct del_sta_context for TDLS peer: "QDF_MAC_ADDR_FMT,
388 				QDF_MAC_ADDR_REF(macaddr));
389 			return -ENOMEM;
390 		}
391 
392 		del_sta_ctx->is_tdls = true;
393 		del_sta_ctx->vdev_id = vdev_id;
394 		qdf_mem_copy(del_sta_ctx->addr2, macaddr, QDF_MAC_ADDR_SIZE);
395 		qdf_mem_copy(del_sta_ctx->bssId, bssid,
396 			     QDF_MAC_ADDR_SIZE);
397 		del_sta_ctx->reasonCode = HAL_DEL_STA_REASON_CODE_KEEP_ALIVE;
398 		wma_send_msg(wma, SIR_LIM_DELETE_STA_CONTEXT_IND,
399 			     (void *)del_sta_ctx, 0);
400 		goto exit_handler;
401 #endif /* FEATURE_WLAN_TDLS */
402 
403 	case WMI_PEER_STA_KICKOUT_REASON_UNSPECIFIED:
404 		/*
405 		 * Default legacy value used by original firmware implementation
406 		 */
407 		if (wma->interfaces[vdev_id].type == WMI_VDEV_TYPE_STA &&
408 		    (wma->interfaces[vdev_id].sub_type == 0 ||
409 		     wma->interfaces[vdev_id].sub_type ==
410 		     WMI_UNIFIED_VDEV_SUBTYPE_P2P_CLIENT) &&
411 		    !qdf_mem_cmp(bssid,
412 				    macaddr, QDF_MAC_ADDR_SIZE)) {
413 			wma_sta_kickout_event(
414 			HOST_STA_KICKOUT_REASON_UNSPECIFIED, vdev_id, macaddr);
415 			/*
416 			 * KICKOUT event is for current station-AP connection.
417 			 * Treat it like final beacon miss. Station may not have
418 			 * missed beacons but not able to transmit frames to AP
419 			 * for a long time. Must disconnect to get out of
420 			 * this sticky situation.
421 			 * In future implementation, roaming module will also
422 			 * handle this event and perform a scan.
423 			 */
424 			wma_warn("WMI_PEER_STA_KICKOUT_REASON_UNSPECIFIED event for STA");
425 			wma_beacon_miss_handler(wma, vdev_id,
426 						kickout_event->rssi);
427 			goto exit_handler;
428 		}
429 		break;
430 
431 	case WMI_PEER_STA_KICKOUT_REASON_XRETRY:
432 	case WMI_PEER_STA_KICKOUT_REASON_INACTIVITY:
433 	/*
434 	 * Handle SA query kickout is same as inactivity kickout.
435 	 * This could be for STA or SAP role
436 	 */
437 	case WMI_PEER_STA_KICKOUT_REASON_SA_QUERY_TIMEOUT:
438 	default:
439 		break;
440 	}
441 
442 	/*
443 	 * default action is to send delete station context indication to LIM
444 	 */
445 	del_sta_ctx =
446 		(tDeleteStaContext *) qdf_mem_malloc(sizeof(tDeleteStaContext));
447 	if (!del_sta_ctx) {
448 		wma_err("QDF MEM Alloc Failed for struct del_sta_context");
449 		return -ENOMEM;
450 	}
451 
452 	del_sta_ctx->is_tdls = false;
453 	del_sta_ctx->vdev_id = vdev_id;
454 	qdf_mem_copy(del_sta_ctx->addr2, macaddr, QDF_MAC_ADDR_SIZE);
455 	qdf_mem_copy(del_sta_ctx->bssId, addr, QDF_MAC_ADDR_SIZE);
456 	if (kickout_event->reason ==
457 		WMI_PEER_STA_KICKOUT_REASON_SA_QUERY_TIMEOUT)
458 		del_sta_ctx->reasonCode =
459 			HAL_DEL_STA_REASON_CODE_SA_QUERY_TIMEOUT;
460 	else if (kickout_event->reason == WMI_PEER_STA_KICKOUT_REASON_XRETRY)
461 		del_sta_ctx->reasonCode = HAL_DEL_STA_REASON_CODE_XRETRY;
462 	else
463 		del_sta_ctx->reasonCode = HAL_DEL_STA_REASON_CODE_KEEP_ALIVE;
464 
465 	if (wmi_service_enabled(wma->wmi_handle,
466 				wmi_service_hw_db2dbm_support))
467 		del_sta_ctx->rssi = kickout_event->rssi;
468 	else
469 		del_sta_ctx->rssi = kickout_event->rssi +
470 					WMA_TGT_NOISE_FLOOR_DBM;
471 	wma_sta_kickout_event(del_sta_ctx->reasonCode, vdev_id, macaddr);
472 	wma_send_msg(wma, SIR_LIM_DELETE_STA_CONTEXT_IND, (void *)del_sta_ctx,
473 		     0);
474 	wma_lost_link_info_handler(wma, vdev_id, del_sta_ctx->rssi);
475 
476 exit_handler:
477 	return 0;
478 }
479 
480 int wma_unified_bcntx_status_event_handler(void *handle,
481 					   uint8_t *cmd_param_info,
482 					   uint32_t len)
483 {
484 	tp_wma_handle wma = (tp_wma_handle) handle;
485 	WMI_OFFLOAD_BCN_TX_STATUS_EVENTID_param_tlvs *param_buf;
486 	wmi_offload_bcn_tx_status_event_fixed_param *resp_event;
487 	tSirFirstBeaconTxCompleteInd *beacon_tx_complete_ind;
488 
489 	param_buf =
490 		(WMI_OFFLOAD_BCN_TX_STATUS_EVENTID_param_tlvs *) cmd_param_info;
491 	if (!param_buf) {
492 		wma_err("Invalid bcn tx response event buffer");
493 		return -EINVAL;
494 	}
495 
496 	resp_event = param_buf->fixed_param;
497 
498 	if (resp_event->vdev_id >= wma->max_bssid) {
499 		wma_err("received invalid vdev_id %d", resp_event->vdev_id);
500 		return -EINVAL;
501 	}
502 
503 	/* Check for valid handle to ensure session is not
504 	 * deleted in any race
505 	 */
506 	if (!wma->interfaces[resp_event->vdev_id].vdev) {
507 		wma_err("vdev is NULL for vdev_%d", resp_event->vdev_id);
508 		return -EINVAL;
509 	}
510 
511 	/* Beacon Tx Indication supports only AP mode. Ignore in other modes */
512 	if (wma_is_vdev_in_ap_mode(wma, resp_event->vdev_id) == false) {
513 		wma_debug("Beacon Tx Indication does not support type %d and sub_type %d",
514 			 wma->interfaces[resp_event->vdev_id].type,
515 			 wma->interfaces[resp_event->vdev_id].sub_type);
516 		return 0;
517 	}
518 
519 	beacon_tx_complete_ind = (tSirFirstBeaconTxCompleteInd *)
520 			qdf_mem_malloc(sizeof(tSirFirstBeaconTxCompleteInd));
521 	if (!beacon_tx_complete_ind) {
522 		wma_err("Failed to alloc beacon_tx_complete_ind");
523 		return -ENOMEM;
524 	}
525 
526 	beacon_tx_complete_ind->messageType = WMA_DFS_BEACON_TX_SUCCESS_IND;
527 	beacon_tx_complete_ind->length = sizeof(tSirFirstBeaconTxCompleteInd);
528 	beacon_tx_complete_ind->bss_idx = resp_event->vdev_id;
529 
530 	wma_send_msg(wma, WMA_DFS_BEACON_TX_SUCCESS_IND,
531 		     (void *)beacon_tx_complete_ind, 0);
532 	return 0;
533 }
534 
535 /**
536  * wma_get_go_probe_timeout() - get P2P GO probe timeout
537  * @mac: UMAC handler
538  * @max_inactive_time: return max inactive time
539  * @max_unresponsive_time: return max unresponsive time
540  *
541  * Return: none
542  */
543 #ifdef CONVERGED_P2P_ENABLE
544 static inline void
545 wma_get_go_probe_timeout(struct mac_context *mac,
546 			 uint32_t *max_inactive_time,
547 			 uint32_t *max_unresponsive_time)
548 {
549 	uint32_t keep_alive;
550 	QDF_STATUS status;
551 
552 	status = cfg_p2p_get_go_link_monitor_period(mac->psoc,
553 						    max_inactive_time);
554 	if (QDF_IS_STATUS_ERROR(status)) {
555 		wma_err("Failed to go monitor period");
556 		*max_inactive_time = WMA_LINK_MONITOR_DEFAULT_TIME_SECS;
557 	}
558 	status = cfg_p2p_get_go_keepalive_period(mac->psoc,
559 						 &keep_alive);
560 	if (QDF_IS_STATUS_ERROR(status)) {
561 		wma_err("Failed to read go keep alive");
562 		keep_alive = WMA_KEEP_ALIVE_DEFAULT_TIME_SECS;
563 	}
564 
565 	*max_unresponsive_time = *max_inactive_time + keep_alive;
566 }
567 #else
568 static inline void
569 wma_get_go_probe_timeout(struct mac_context *mac,
570 			 uint32_t *max_inactive_time,
571 			 uint32_t *max_unresponsive_time)
572 {
573 }
574 #endif
575 
576 /**
577  * wma_get_link_probe_timeout() - get link timeout based on sub type
578  * @mac: UMAC handler
579  * @sub_type: vdev syb type
580  * @max_inactive_time: return max inactive time
581  * @max_unresponsive_time: return max unresponsive time
582  *
583  * Return: none
584  */
585 static inline void
586 wma_get_link_probe_timeout(struct mac_context *mac,
587 			   uint32_t sub_type,
588 			   uint32_t *max_inactive_time,
589 			   uint32_t *max_unresponsive_time)
590 {
591 	if (sub_type == WMI_UNIFIED_VDEV_SUBTYPE_P2P_GO) {
592 		wma_get_go_probe_timeout(mac, max_inactive_time,
593 					 max_unresponsive_time);
594 	} else {
595 		*max_inactive_time =
596 			mac->mlme_cfg->timeouts.ap_link_monitor_timeout;
597 		*max_unresponsive_time = *max_inactive_time +
598 			mac->mlme_cfg->timeouts.ap_keep_alive_timeout;
599 	}
600 }
601 
602 /**
603  * wma_verify_rate_code() - verify if rate code is valid.
604  * @rate_code:     rate code
605  * @band:     band information
606  *
607  * Return: verify result
608  */
609 static bool wma_verify_rate_code(u_int32_t rate_code, enum cds_band_type band)
610 {
611 	uint8_t preamble, nss, rate;
612 	bool valid = true;
613 
614 	preamble = (rate_code & 0xc0) >> 6;
615 	nss = (rate_code & 0x30) >> 4;
616 	rate = rate_code & 0xf;
617 
618 	switch (preamble) {
619 	case WMI_RATE_PREAMBLE_CCK:
620 		if (nss != 0 || rate > 3 || band == CDS_BAND_5GHZ)
621 			valid = false;
622 		break;
623 	case WMI_RATE_PREAMBLE_OFDM:
624 		if (nss != 0 || rate > 7)
625 			valid = false;
626 		break;
627 	case WMI_RATE_PREAMBLE_HT:
628 		if (nss != 0 || rate > 7)
629 			valid = false;
630 		break;
631 	case WMI_RATE_PREAMBLE_VHT:
632 		if (nss != 0 || rate > 9)
633 			valid = false;
634 		break;
635 	default:
636 		break;
637 	}
638 	return valid;
639 }
640 
641 #define TX_MGMT_RATE_2G_ENABLE_OFFSET 30
642 #define TX_MGMT_RATE_5G_ENABLE_OFFSET 31
643 #define TX_MGMT_RATE_2G_OFFSET 0
644 #define TX_MGMT_RATE_5G_OFFSET 12
645 
646 #define MAX_VDEV_MGMT_RATE_PARAMS 2
647 /* params being sent:
648  * wmi_vdev_param_mgmt_tx_rate
649  * wmi_vdev_param_per_band_mgmt_tx_rate
650  */
651 
652 /**
653  * wma_set_mgmt_rate() - set vdev mgmt rate.
654  * @wma:     wma handle
655  * @vdev_id: vdev id
656  *
657  * Return: None
658  */
659 void wma_set_vdev_mgmt_rate(tp_wma_handle wma, uint8_t vdev_id)
660 {
661 	uint32_t cfg_val;
662 	uint32_t per_band_mgmt_tx_rate = 0;
663 	enum cds_band_type band = 0;
664 	struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE);
665 	struct dev_set_param setparam[MAX_VDEV_MGMT_RATE_PARAMS] = {};
666 	uint8_t index = 0;
667 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
668 
669 	if (!mac) {
670 		wma_err("Failed to get mac");
671 		return;
672 	}
673 
674 	cfg_val = mac->mlme_cfg->sap_cfg.rate_tx_mgmt;
675 	band = CDS_BAND_ALL;
676 	if ((cfg_val == MLME_CFG_TX_MGMT_RATE_DEF) ||
677 	    !wma_verify_rate_code(cfg_val, band)) {
678 		wma_nofl_debug("default WNI_CFG_RATE_FOR_TX_MGMT, ignore");
679 	} else {
680 		status = mlme_check_index_setparam(setparam,
681 						   wmi_vdev_param_mgmt_tx_rate,
682 						   cfg_val, index++,
683 						   MAX_VDEV_MGMT_RATE_PARAMS);
684 		if (QDF_IS_STATUS_ERROR(status)) {
685 			wma_err("failed at wmi_vdev_param_mgmt_tx_rate");
686 			goto error;
687 		}
688 	}
689 
690 	cfg_val = mac->mlme_cfg->sap_cfg.rate_tx_mgmt_2g;
691 	band = CDS_BAND_2GHZ;
692 	if ((cfg_val == MLME_CFG_TX_MGMT_2G_RATE_DEF) ||
693 	    !wma_verify_rate_code(cfg_val, band)) {
694 		wma_nofl_debug("use default 2G MGMT rate.");
695 		per_band_mgmt_tx_rate &=
696 		    ~(1 << TX_MGMT_RATE_2G_ENABLE_OFFSET);
697 	} else {
698 		per_band_mgmt_tx_rate |=
699 		    (1 << TX_MGMT_RATE_2G_ENABLE_OFFSET);
700 		per_band_mgmt_tx_rate |=
701 		    ((cfg_val & 0x7FF) << TX_MGMT_RATE_2G_OFFSET);
702 	}
703 
704 	cfg_val = mac->mlme_cfg->sap_cfg.rate_tx_mgmt;
705 	band = CDS_BAND_5GHZ;
706 	if ((cfg_val == MLME_CFG_TX_MGMT_5G_RATE_DEF) ||
707 	    !wma_verify_rate_code(cfg_val, band)) {
708 		wma_nofl_debug("use default 5G MGMT rate.");
709 		per_band_mgmt_tx_rate &=
710 		    ~(1 << TX_MGMT_RATE_5G_ENABLE_OFFSET);
711 	} else {
712 		per_band_mgmt_tx_rate |=
713 		    (1 << TX_MGMT_RATE_5G_ENABLE_OFFSET);
714 		per_band_mgmt_tx_rate |=
715 		    ((cfg_val & 0x7FF) << TX_MGMT_RATE_5G_OFFSET);
716 	}
717 
718 	status = mlme_check_index_setparam(setparam,
719 					   wmi_vdev_param_per_band_mgmt_tx_rate,
720 					   per_band_mgmt_tx_rate, index++,
721 					   MAX_VDEV_MGMT_RATE_PARAMS);
722 	if (QDF_IS_STATUS_ERROR(status)) {
723 		wma_err("failed at wmi_vdev_param_per_band_mgmt_tx_rate");
724 		goto error;
725 	}
726 
727 	status = wma_send_multi_pdev_vdev_set_params(MLME_VDEV_SETPARAM,
728 						     vdev_id, setparam, index);
729 	if (QDF_IS_STATUS_ERROR(status))
730 		wma_debug("failed to send MGMT_TX_RATE vdev set params stat:%d",
731 			  status);
732 error:
733 	return;
734 }
735 
736 #define MAX_VDEV_SAP_KEEPALIVE_PARAMS 3
737 /* params being sent:
738  * wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs
739  * wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs
740  * wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs
741  */
742 
743 /**
744  * wma_set_sap_keepalive() - set SAP keep alive parameters to fw
745  * @wma: wma handle
746  * @vdev_id: vdev id
747  *
748  * Return: none
749  */
750 void wma_set_sap_keepalive(tp_wma_handle wma, uint8_t vdev_id)
751 {
752 	uint32_t min_inactive_time, max_inactive_time, max_unresponsive_time;
753 	struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE);
754 	QDF_STATUS status;
755 	struct dev_set_param setparam[MAX_VDEV_SAP_KEEPALIVE_PARAMS] = {};
756 	uint8_t index = 0;
757 
758 	if (!mac) {
759 		wma_err("Failed to get mac");
760 		return;
761 	}
762 
763 	wma_get_link_probe_timeout(mac, wma->interfaces[vdev_id].sub_type,
764 				   &max_inactive_time, &max_unresponsive_time);
765 
766 	min_inactive_time = max_inactive_time / 2;
767 	status = mlme_check_index_setparam(
768 			setparam,
769 			wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs,
770 			min_inactive_time, index++,
771 			MAX_VDEV_SAP_KEEPALIVE_PARAMS);
772 	if (QDF_IS_STATUS_ERROR(status)) {
773 		wma_err("failed to set wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs");
774 		goto error;
775 	}
776 	status = mlme_check_index_setparam(
777 			setparam,
778 			wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs,
779 			max_inactive_time, index++,
780 			MAX_VDEV_SAP_KEEPALIVE_PARAMS);
781 	if (QDF_IS_STATUS_ERROR(status)) {
782 		wma_err("failed to set wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs");
783 		goto error;
784 	}
785 	status = mlme_check_index_setparam(
786 			setparam,
787 			wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs,
788 			max_unresponsive_time, index++,
789 			MAX_VDEV_SAP_KEEPALIVE_PARAMS);
790 	if (QDF_IS_STATUS_ERROR(status)) {
791 		wma_err("failed to set wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs");
792 		goto error;
793 	}
794 
795 	status = wma_send_multi_pdev_vdev_set_params(MLME_VDEV_SETPARAM,
796 						     vdev_id, setparam, index);
797 	if (QDF_IS_STATUS_ERROR(status))
798 		wma_err("Failed to Set AP MIN/MAX IDLE INACTIVE TIME, MAX UNRESPONSIVE TIME:%d", status);
799 	else
800 		wma_debug("vdev_id:%d min_inactive_time: %u max_inactive_time: %u max_unresponsive_time: %u",
801 			  vdev_id, min_inactive_time, max_inactive_time,
802 			  max_unresponsive_time);
803 error:
804 	return;
805 }
806 
807 /**
808  * wma_set_sta_sa_query_param() - set sta sa query parameters
809  * @wma: wma handle
810  * @vdev_id: vdev id
811 
812  * This function sets sta query related parameters in fw.
813  *
814  * Return: none
815  */
816 
817 void wma_set_sta_sa_query_param(tp_wma_handle wma,
818 				  uint8_t vdev_id)
819 {
820 	struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE);
821 	uint8_t max_retries;
822 	uint16_t retry_interval;
823 
824 	if (!mac) {
825 		wma_err("mac context is NULL");
826 		return;
827 	}
828 
829 	max_retries = mac->mlme_cfg->gen.pmf_sa_query_max_retries;
830 	retry_interval = mac->mlme_cfg->gen.pmf_sa_query_retry_interval;
831 
832 	wmi_unified_set_sta_sa_query_param_cmd(wma->wmi_handle,
833 						vdev_id,
834 						max_retries,
835 						retry_interval);
836 }
837 
838 /**
839  * wma_set_sta_keep_alive() - set sta keep alive parameters
840  * @wma: wma handle
841  * @vdev_id: vdev id
842  * @method: method for keep alive
843  * @timeperiod: time period
844  * @hostv4addr: host ipv4 address
845  * @destv4addr: dst ipv4 address
846  * @destmac: destination mac
847  *
848  * This function sets keep alive related parameters in fw.
849  *
850  * Return: none
851  */
852 void wma_set_sta_keep_alive(tp_wma_handle wma, uint8_t vdev_id,
853 			    uint32_t method, uint32_t timeperiod,
854 			    uint8_t *hostv4addr, uint8_t *destv4addr,
855 			    uint8_t *destmac)
856 {
857 	struct sta_keep_alive_params params = { 0 };
858 	struct wma_txrx_node *intr;
859 
860 	if (wma_validate_handle(wma))
861 		return;
862 
863 	intr = &wma->interfaces[vdev_id];
864 	if (timeperiod > cfg_max(CFG_INFRA_STA_KEEP_ALIVE_PERIOD)) {
865 		wmi_err("Invalid period %d Max limit %d", timeperiod,
866 			 cfg_max(CFG_INFRA_STA_KEEP_ALIVE_PERIOD));
867 		return;
868 	}
869 
870 	params.vdev_id = vdev_id;
871 	params.method = method;
872 	params.timeperiod = timeperiod;
873 	if (intr) {
874 		if (intr->bss_max_idle_period) {
875 			params.timeperiod = intr->bss_max_idle_period;
876 			if (method == WMI_KEEP_ALIVE_NULL_PKT)
877 				params.method = WMI_KEEP_ALIVE_MGMT_FRAME;
878 		}
879 	}
880 
881 	if (hostv4addr)
882 		qdf_mem_copy(params.hostv4addr, hostv4addr, QDF_IPV4_ADDR_SIZE);
883 	if (destv4addr)
884 		qdf_mem_copy(params.destv4addr, destv4addr, QDF_IPV4_ADDR_SIZE);
885 	if (destmac)
886 		qdf_mem_copy(params.destmac, destmac, QDF_MAC_ADDR_SIZE);
887 
888 	wmi_unified_set_sta_keep_alive_cmd(wma->wmi_handle, &params);
889 }
890 
891 /*
892  * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
893  *   0 for no restriction
894  *   1 for 1/4 us - Our lower layer calculations limit our precision to 1 msec
895  *   2 for 1/2 us - Our lower layer calculations limit our precision to 1 msec
896  *   3 for 1 us
897  *   4 for 2 us
898  *   5 for 4 us
899  *   6 for 8 us
900  *   7 for 16 us
901  */
902 static const uint8_t wma_mpdu_spacing[] = { 0, 1, 1, 1, 2, 4, 8, 16 };
903 
904 /**
905  * wma_parse_mpdudensity() - give mpdu spacing from mpdu density
906  * @mpdudensity: mpdu density
907  *
908  * Return: mpdu spacing or 0 for error
909  */
910 static inline uint8_t wma_parse_mpdudensity(uint8_t mpdudensity)
911 {
912 	if (mpdudensity < sizeof(wma_mpdu_spacing))
913 		return wma_mpdu_spacing[mpdudensity];
914 	else
915 		return 0;
916 }
917 
918 #define CFG_CTRL_MASK              0xFF00
919 #define CFG_DATA_MASK              0x00FF
920 
921 /**
922  * wma_mask_tx_ht_rate() - mask tx ht rate based on config
923  * @wma:     wma handle
924  * @mcs_set  mcs set buffer
925  *
926  * Return: None
927  */
928 static void wma_mask_tx_ht_rate(tp_wma_handle wma, uint8_t *mcs_set)
929 {
930 	uint32_t i, j;
931 	uint16_t mcs_limit;
932 	uint8_t *rate_pos = mcs_set;
933 	struct mac_context *mac = wma->mac_context;
934 
935 	/*
936 	 * Get MCS limit from ini configure, and map it to rate parameters
937 	 * This will limit HT rate upper bound. CFG_CTRL_MASK is used to
938 	 * check whether ini config is enabled and CFG_DATA_MASK to get the
939 	 * MCS value.
940 	 */
941 	mcs_limit = mac->mlme_cfg->rates.max_htmcs_txdata;
942 
943 	if (mcs_limit & CFG_CTRL_MASK) {
944 		wma_debug("set mcs_limit %x", mcs_limit);
945 
946 		mcs_limit &= CFG_DATA_MASK;
947 		for (i = 0, j = 0; i < MAX_SUPPORTED_RATES;) {
948 			if (j < mcs_limit / 8) {
949 				rate_pos[j] = 0xff;
950 				j++;
951 				i += 8;
952 			} else if (j < mcs_limit / 8 + 1) {
953 				if (i <= mcs_limit)
954 					rate_pos[i / 8] |= 1 << (i % 8);
955 				else
956 					rate_pos[i / 8] &= ~(1 << (i % 8));
957 				i++;
958 
959 				if (i >= (j + 1) * 8)
960 					j++;
961 			} else {
962 				rate_pos[j++] = 0;
963 				i += 8;
964 			}
965 		}
966 	}
967 }
968 
969 #if SUPPORT_11AX
970 /**
971  * wma_fw_to_host_phymode_11ax() - convert fw to host phymode for 11ax phymodes
972  * @phymode: phymode to convert
973  *
974  * Return: one of the 11ax values defined in enum wlan_phymode;
975  *         or WLAN_PHYMODE_AUTO if the input is not an 11ax phymode
976  */
977 static enum wlan_phymode
978 wma_fw_to_host_phymode_11ax(WMI_HOST_WLAN_PHY_MODE phymode)
979 {
980 	switch (phymode) {
981 	default:
982 		return WLAN_PHYMODE_AUTO;
983 	case WMI_HOST_MODE_11AX_HE20:
984 		return WLAN_PHYMODE_11AXA_HE20;
985 	case WMI_HOST_MODE_11AX_HE40:
986 		return WLAN_PHYMODE_11AXA_HE40;
987 	case WMI_HOST_MODE_11AX_HE80:
988 		return WLAN_PHYMODE_11AXA_HE80;
989 	case WMI_HOST_MODE_11AX_HE80_80:
990 		return WLAN_PHYMODE_11AXA_HE80_80;
991 	case WMI_HOST_MODE_11AX_HE160:
992 		return WLAN_PHYMODE_11AXA_HE160;
993 	case WMI_HOST_MODE_11AX_HE20_2G:
994 		return WLAN_PHYMODE_11AXG_HE20;
995 	case WMI_HOST_MODE_11AX_HE40_2G:
996 		return WLAN_PHYMODE_11AXG_HE40;
997 	case WMI_HOST_MODE_11AX_HE80_2G:
998 		return WLAN_PHYMODE_11AXG_HE80;
999 	}
1000 	return WLAN_PHYMODE_AUTO;
1001 }
1002 #else
1003 static enum wlan_phymode
1004 wma_fw_to_host_phymode_11ax(WMI_HOST_WLAN_PHY_MODE phymode)
1005 {
1006 	return WLAN_PHYMODE_AUTO;
1007 }
1008 #endif
1009 
1010 #ifdef WLAN_FEATURE_11BE
1011 /**
1012  * wma_fw_to_host_phymode_11be() - convert fw to host phymode for 11be phymodes
1013  * @phymode: phymode to convert
1014  *
1015  * Return: one of the 11be values defined in enum wlan_phymode;
1016  *         or WLAN_PHYMODE_AUTO if the input is not an 11be phymode
1017  */
1018 static enum wlan_phymode
1019 wma_fw_to_host_phymode_11be(WMI_HOST_WLAN_PHY_MODE phymode)
1020 {
1021 	switch (phymode) {
1022 	default:
1023 		return WLAN_PHYMODE_AUTO;
1024 	case WMI_HOST_MODE_11BE_EHT20:
1025 		return WLAN_PHYMODE_11BEA_EHT20;
1026 	case WMI_HOST_MODE_11BE_EHT40:
1027 		return WLAN_PHYMODE_11BEA_EHT40;
1028 	case WMI_HOST_MODE_11BE_EHT80:
1029 		return WLAN_PHYMODE_11BEA_EHT80;
1030 	case WMI_HOST_MODE_11BE_EHT160:
1031 		return WLAN_PHYMODE_11BEA_EHT160;
1032 	case WMI_HOST_MODE_11BE_EHT320:
1033 		return WLAN_PHYMODE_11BEA_EHT320;
1034 	case WMI_HOST_MODE_11BE_EHT20_2G:
1035 		return WLAN_PHYMODE_11BEG_EHT20;
1036 	case WMI_HOST_MODE_11BE_EHT40_2G:
1037 		return WLAN_PHYMODE_11BEG_EHT40;
1038 	}
1039 	return WLAN_PHYMODE_AUTO;
1040 }
1041 
1042 static inline bool wma_is_phymode_eht(enum wlan_phymode phymode)
1043 {
1044 	return IS_WLAN_PHYMODE_EHT(phymode);
1045 }
1046 #else
1047 static enum wlan_phymode
1048 wma_fw_to_host_phymode_11be(WMI_HOST_WLAN_PHY_MODE phymode)
1049 {
1050 	return WLAN_PHYMODE_AUTO;
1051 }
1052 
1053 static inline bool wma_is_phymode_eht(enum wlan_phymode phymode)
1054 {
1055 	return false;
1056 }
1057 #endif
1058 
1059 #ifdef CONFIG_160MHZ_SUPPORT
1060 /**
1061  * wma_fw_to_host_phymode_160() - convert fw to host phymode for 160 mhz
1062  * phymodes
1063  * @phymode: phymode to convert
1064  *
1065  * Return: one of the 160 mhz values defined in enum wlan_phymode;
1066  *         or WLAN_PHYMODE_AUTO if the input is not a 160 mhz phymode
1067  */
1068 static enum wlan_phymode
1069 wma_fw_to_host_phymode_160(WMI_HOST_WLAN_PHY_MODE phymode)
1070 {
1071 	switch (phymode) {
1072 	default:
1073 		return WLAN_PHYMODE_AUTO;
1074 	case WMI_HOST_MODE_11AC_VHT80_80:
1075 		return WLAN_PHYMODE_11AC_VHT80_80;
1076 	case WMI_HOST_MODE_11AC_VHT160:
1077 		return WLAN_PHYMODE_11AC_VHT160;
1078 	}
1079 }
1080 #else
1081 static enum wlan_phymode
1082 wma_fw_to_host_phymode_160(WMI_HOST_WLAN_PHY_MODE phymode)
1083 {
1084 	return WLAN_PHYMODE_AUTO;
1085 }
1086 #endif
1087 
1088 enum wlan_phymode wma_fw_to_host_phymode(WMI_HOST_WLAN_PHY_MODE phymode)
1089 {
1090 	enum wlan_phymode host_phymode;
1091 	switch (phymode) {
1092 	default:
1093 		host_phymode = wma_fw_to_host_phymode_160(phymode);
1094 		if (host_phymode != WLAN_PHYMODE_AUTO)
1095 			return host_phymode;
1096 		host_phymode = wma_fw_to_host_phymode_11ax(phymode);
1097 		if (host_phymode != WLAN_PHYMODE_AUTO)
1098 			return host_phymode;
1099 		return wma_fw_to_host_phymode_11be(phymode);
1100 	case WMI_HOST_MODE_11A:
1101 		return WLAN_PHYMODE_11A;
1102 	case WMI_HOST_MODE_11G:
1103 		return WLAN_PHYMODE_11G;
1104 	case WMI_HOST_MODE_11B:
1105 		return WLAN_PHYMODE_11B;
1106 	case WMI_HOST_MODE_11GONLY:
1107 		return WLAN_PHYMODE_11G_ONLY;
1108 	case WMI_HOST_MODE_11NA_HT20:
1109 		return WLAN_PHYMODE_11NA_HT20;
1110 	case WMI_HOST_MODE_11NG_HT20:
1111 		return WLAN_PHYMODE_11NG_HT20;
1112 	case WMI_HOST_MODE_11NA_HT40:
1113 		return WLAN_PHYMODE_11NA_HT40;
1114 	case WMI_HOST_MODE_11NG_HT40:
1115 		return WLAN_PHYMODE_11NG_HT40;
1116 	case WMI_HOST_MODE_11AC_VHT20:
1117 		return WLAN_PHYMODE_11AC_VHT20;
1118 	case WMI_HOST_MODE_11AC_VHT40:
1119 		return WLAN_PHYMODE_11AC_VHT40;
1120 	case WMI_HOST_MODE_11AC_VHT80:
1121 		return WLAN_PHYMODE_11AC_VHT80;
1122 	case WMI_HOST_MODE_11AC_VHT20_2G:
1123 		return WLAN_PHYMODE_11AC_VHT20_2G;
1124 	case WMI_HOST_MODE_11AC_VHT40_2G:
1125 		return WLAN_PHYMODE_11AC_VHT40_2G;
1126 	case WMI_HOST_MODE_11AC_VHT80_2G:
1127 		return WLAN_PHYMODE_11AC_VHT80_2G;
1128 	}
1129 }
1130 
1131 #ifdef WLAN_FEATURE_11BE
1132 static void wma_populate_peer_puncture(struct peer_assoc_params *peer,
1133 				       struct wlan_channel *des_chan)
1134 {
1135 	peer->puncture_bitmap = des_chan->puncture_bitmap;
1136 	wma_debug("Peer EHT puncture bitmap %d", peer->puncture_bitmap);
1137 }
1138 
1139 static void wma_populate_peer_mlo_cap(struct peer_assoc_params *peer,
1140 				      tpAddStaParams params)
1141 {
1142 	struct peer_assoc_ml_partner_links *ml_links;
1143 	struct peer_assoc_mlo_params *mlo_params;
1144 	struct peer_ml_info *ml_info;
1145 	uint8_t i;
1146 
1147 	ml_info = &params->ml_info;
1148 	mlo_params = &peer->mlo_params;
1149 	ml_links = &peer->ml_links;
1150 
1151 	/* Assoc link info */
1152 	mlo_params->vdev_id = ml_info->vdev_id;
1153 	mlo_params->ieee_link_id = ml_info->link_id;
1154 	qdf_mem_copy(&mlo_params->chan, &ml_info->channel_info,
1155 		     sizeof(struct wlan_channel));
1156 	qdf_mem_copy(&mlo_params->bssid, &ml_info->link_addr,
1157 		     QDF_MAC_ADDR_SIZE);
1158 	qdf_mem_copy(&mlo_params->mac_addr, &ml_info->self_mac_addr,
1159 		     QDF_MAC_ADDR_SIZE);
1160 
1161 	mlo_params->rec_max_simultaneous_links =
1162 		ml_info->rec_max_simultaneous_links;
1163 
1164 	/* Fill partner link info */
1165 	ml_links->num_links = ml_info->num_links;
1166 	for (i = 0; i < ml_links->num_links; i++) {
1167 		ml_links->partner_info[i].vdev_id =
1168 					ml_info->partner_info[i].vdev_id;
1169 		ml_links->partner_info[i].link_id =
1170 					ml_info->partner_info[i].link_id;
1171 		qdf_mem_copy(&ml_links->partner_info[i].chan,
1172 			     &ml_info->partner_info[i].channel_info,
1173 			     sizeof(struct wlan_channel));
1174 		qdf_mem_copy(&ml_links->partner_info[i].bssid,
1175 			     &ml_info->partner_info[i].link_addr,
1176 			     QDF_MAC_ADDR_SIZE);
1177 		qdf_mem_copy(&ml_links->partner_info[i].mac_addr,
1178 			     &ml_info->partner_info[i].self_mac_addr,
1179 			     QDF_MAC_ADDR_SIZE);
1180 	}
1181 }
1182 #else
1183 static void wma_populate_peer_puncture(struct peer_assoc_params *peer,
1184 				       struct wlan_channel *des_chan)
1185 {
1186 }
1187 
1188 static void wma_populate_peer_mlo_cap(struct peer_assoc_params *peer,
1189 				      tpAddStaParams params)
1190 {
1191 }
1192 #endif
1193 
1194 void wma_objmgr_set_peer_mlme_nss(tp_wma_handle wma, uint8_t *mac_addr,
1195 				  uint8_t nss)
1196 {
1197 	uint8_t pdev_id;
1198 	struct wlan_objmgr_peer *peer;
1199 	struct peer_mlme_priv_obj *peer_priv;
1200 	struct wlan_objmgr_psoc *psoc = wma->psoc;
1201 
1202 	pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev);
1203 	peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr,
1204 				    WLAN_LEGACY_WMA_ID);
1205 	if (!peer)
1206 		return;
1207 
1208 	peer_priv = wlan_objmgr_peer_get_comp_private_obj(peer,
1209 							  WLAN_UMAC_COMP_MLME);
1210 	if (!peer_priv) {
1211 		wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1212 		return;
1213 	}
1214 
1215 	peer_priv->nss = nss;
1216 	wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1217 }
1218 
1219 uint8_t wma_objmgr_get_peer_mlme_nss(tp_wma_handle wma, uint8_t *mac_addr)
1220 {
1221 	uint8_t pdev_id;
1222 	struct wlan_objmgr_peer *peer;
1223 	struct peer_mlme_priv_obj *peer_priv;
1224 	struct wlan_objmgr_psoc *psoc = wma->psoc;
1225 	uint8_t nss;
1226 
1227 	pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev);
1228 	peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr,
1229 				    WLAN_LEGACY_WMA_ID);
1230 	if (!peer)
1231 		return 0;
1232 
1233 	peer_priv = wlan_objmgr_peer_get_comp_private_obj(peer,
1234 							  WLAN_UMAC_COMP_MLME);
1235 	if (!peer_priv) {
1236 		wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1237 		return 0;
1238 	}
1239 
1240 	nss = peer_priv->nss;
1241 	wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1242 	return nss;
1243 }
1244 
1245 void wma_objmgr_set_peer_mlme_phymode(tp_wma_handle wma, uint8_t *mac_addr,
1246 				      enum wlan_phymode phymode)
1247 {
1248 	uint8_t pdev_id;
1249 	struct wlan_objmgr_peer *peer;
1250 	struct wlan_objmgr_psoc *psoc = wma->psoc;
1251 
1252 	pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev);
1253 	peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr,
1254 				    WLAN_LEGACY_WMA_ID);
1255 	if (!peer)
1256 		return;
1257 
1258 	wlan_peer_obj_lock(peer);
1259 	wlan_peer_set_phymode(peer, phymode);
1260 	wlan_peer_obj_unlock(peer);
1261 	wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1262 }
1263 
1264 /**
1265  * wma_objmgr_set_peer_mlme_type() - set peer type to peer object
1266  * @wma:      wma handle
1267  * @mac_addr: mac addr of peer
1268  * @peer_type:  peer type value to set
1269  *
1270  * Return: None
1271  */
1272 static void wma_objmgr_set_peer_mlme_type(tp_wma_handle wma,
1273 					  uint8_t *mac_addr,
1274 					  enum wlan_peer_type peer_type)
1275 {
1276 	uint8_t pdev_id;
1277 	struct wlan_objmgr_peer *peer;
1278 	struct wlan_objmgr_psoc *psoc = wma->psoc;
1279 
1280 	pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev);
1281 	peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr,
1282 				    WLAN_LEGACY_WMA_ID);
1283 	if (!peer)
1284 		return;
1285 
1286 	wlan_peer_obj_lock(peer);
1287 	wlan_peer_set_peer_type(peer, peer_type);
1288 	wlan_peer_obj_unlock(peer);
1289 	wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1290 }
1291 
1292 #ifdef WLAN_FEATURE_11BE_MLO
1293 
1294 #define MIN_TIMEOUT_VAL 0
1295 #define MAX_TIMEOUT_VAL 11
1296 
1297 #define TIMEOUT_TO_US 6
1298 
1299 /*
1300  * wma_convert_trans_timeout_us() - API to convert
1301  * emlsr transition timeout to microseconds. Refer Table 9-401h
1302  * of IEEE802.11be specification
1303  * @timeout: EMLSR transition timeout
1304  *
1305  * Return: Timeout value in microseconds
1306  */
1307 static inline uint32_t
1308 wma_convert_trans_timeout_us(uint16_t timeout)
1309 {
1310 	uint32_t us = 0;
1311 
1312 	if (timeout > MIN_TIMEOUT_VAL && timeout < MAX_TIMEOUT_VAL) {
1313 		/* timeout = 1 is for 128us*/
1314 		us = (1 << (timeout + TIMEOUT_TO_US));
1315 	}
1316 
1317 	return us;
1318 }
1319 
1320 /**
1321  * wma_set_mlo_capability() - set MLO caps to the peer assoc request
1322  * @wma: wma handle
1323  * @vdev: vdev object
1324  * @params: Add sta params
1325  * @req: peer assoc request parameters
1326  *
1327  * Return: None
1328  */
1329 static void wma_set_mlo_capability(tp_wma_handle wma,
1330 				   struct wlan_objmgr_vdev *vdev,
1331 				   tpAddStaParams params,
1332 				   struct peer_assoc_params *req)
1333 {
1334 	uint8_t pdev_id;
1335 	struct wlan_objmgr_peer *peer;
1336 	struct wlan_objmgr_psoc *psoc = wma->psoc;
1337 	uint16_t link_id_bitmap;
1338 
1339 	pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev);
1340 	peer = wlan_objmgr_get_peer(psoc, pdev_id, req->peer_mac,
1341 				    WLAN_LEGACY_WMA_ID);
1342 
1343 	if (!peer) {
1344 		wma_err("peer not valid");
1345 		return;
1346 	}
1347 
1348 	if (!qdf_is_macaddr_zero((struct qdf_mac_addr *)peer->mldaddr)) {
1349 		req->mlo_params.mlo_enabled = true;
1350 		req->mlo_params.mlo_assoc_link =
1351 					wlan_peer_mlme_is_assoc_peer(peer);
1352 		WLAN_ADDR_COPY(req->mlo_params.mld_mac, peer->mldaddr);
1353 		if (policy_mgr_ml_link_vdev_need_to_be_disabled(psoc, vdev,
1354 								true) ||
1355 		    policy_mgr_is_emlsr_sta_concurrency_present(psoc)) {
1356 			req->mlo_params.mlo_force_link_inactive = 1;
1357 			link_id_bitmap = 1 << params->link_id;
1358 			ml_nlink_set_curr_force_inactive_state(
1359 					psoc, vdev, link_id_bitmap, LINK_ADD);
1360 		}
1361 		wma_debug("assoc_link %d" QDF_MAC_ADDR_FMT ", force inactive %d link id %d",
1362 			  req->mlo_params.mlo_assoc_link,
1363 			  QDF_MAC_ADDR_REF(peer->mldaddr),
1364 			  req->mlo_params.mlo_force_link_inactive,
1365 			  params->link_id);
1366 
1367 		req->mlo_params.emlsr_support = params->emlsr_support;
1368 		req->mlo_params.ieee_link_id = params->link_id;
1369 		if (req->mlo_params.emlsr_support) {
1370 			req->mlo_params.trans_timeout_us =
1371 			wma_convert_trans_timeout_us(params->emlsr_trans_timeout);
1372 		}
1373 		req->mlo_params.msd_cap_support = params->msd_caps_present;
1374 		req->mlo_params.medium_sync_duration =
1375 				params->msd_caps.med_sync_duration;
1376 		req->mlo_params.medium_sync_ofdm_ed_thresh =
1377 				params->msd_caps.med_sync_ofdm_ed_thresh;
1378 		req->mlo_params.medium_sync_max_txop_num =
1379 				params->msd_caps.med_sync_max_txop_num;
1380 		req->mlo_params.link_switch_in_progress =
1381 			wlan_vdev_mlme_is_mlo_link_switch_in_progress(vdev);
1382 		/*
1383 		 * Set max simultaneous links = 1 for MLSR, 2 for MLMR. The +1
1384 		 * is added as per the agreement with FW for backward
1385 		 * compatibility purposes. Our internal structures still
1386 		 * conform to the values as per spec i.e. 0 = MLSR, 1 = MLMR.
1387 		 */
1388 		req->mlo_params.max_num_simultaneous_links =
1389 			wlan_mlme_get_sta_mlo_simultaneous_links(psoc) + 1;
1390 	} else {
1391 		wma_debug("Peer MLO context is NULL");
1392 		req->mlo_params.mlo_enabled = false;
1393 		req->mlo_params.emlsr_support = false;
1394 	}
1395 	wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1396 }
1397 
1398 static void wma_set_mlo_assoc_vdev(struct wlan_objmgr_vdev *vdev,
1399 				   struct peer_assoc_params *req)
1400 {
1401 	if (wlan_vdev_mlme_is_mlo_vdev(vdev) &&
1402 	    !wlan_vdev_mlme_is_mlo_link_vdev(vdev))
1403 		req->is_assoc_vdev = true;
1404 }
1405 #else
1406 static inline void wma_set_mlo_capability(tp_wma_handle wma,
1407 					  struct wlan_objmgr_vdev *vdev,
1408 					  tpAddStaParams params,
1409 					  struct peer_assoc_params *req)
1410 {
1411 }
1412 
1413 static inline void wma_set_mlo_assoc_vdev(struct wlan_objmgr_vdev *vdev,
1414 					  struct peer_assoc_params *req)
1415 {
1416 }
1417 #endif
1418 
1419 /**
1420  * wmi_unified_send_peer_assoc() - send peer assoc command to fw
1421  * @wma: wma handle
1422  * @nw_type: nw type
1423  * @params: add sta params
1424  *
1425  * This function send peer assoc command to firmware with
1426  * different parameters.
1427  *
1428  * Return: QDF_STATUS
1429  */
1430 QDF_STATUS wma_send_peer_assoc(tp_wma_handle wma,
1431 				    tSirNwType nw_type,
1432 				    tpAddStaParams params)
1433 {
1434 	struct peer_assoc_params *cmd;
1435 	int32_t ret, max_rates, i;
1436 	uint8_t *rate_pos;
1437 	wmi_rate_set peer_legacy_rates, peer_ht_rates;
1438 	uint32_t num_peer_11b_rates = 0;
1439 	uint32_t num_peer_11a_rates = 0;
1440 	enum wlan_phymode phymode, vdev_phymode;
1441 	uint32_t peer_nss = 1;
1442 	struct wma_txrx_node *intr = NULL;
1443 	bool is_he;
1444 	bool is_eht;
1445 	QDF_STATUS status;
1446 	struct mac_context *mac = wma->mac_context;
1447 	struct wlan_channel *des_chan;
1448 	int32_t keymgmt, uccipher, authmode;
1449 
1450 	cmd = qdf_mem_malloc(sizeof(struct peer_assoc_params));
1451 	if (!cmd) {
1452 		wma_err("Failed to allocate peer_assoc_params param");
1453 		return QDF_STATUS_E_NOMEM;
1454 	}
1455 
1456 	intr = &wma->interfaces[params->smesessionId];
1457 
1458 	wma_mask_tx_ht_rate(wma, params->supportedRates.supportedMCSSet);
1459 
1460 	qdf_mem_zero(&peer_legacy_rates, sizeof(wmi_rate_set));
1461 	qdf_mem_zero(&peer_ht_rates, sizeof(wmi_rate_set));
1462 	qdf_mem_zero(cmd, sizeof(struct peer_assoc_params));
1463 
1464 	is_he = wma_is_peer_he_capable(params);
1465 	is_eht = wma_is_peer_eht_capable(params);
1466 	if ((params->ch_width > CH_WIDTH_40MHZ) &&
1467 	    ((nw_type == eSIR_11G_NW_TYPE) ||
1468 	     (nw_type == eSIR_11B_NW_TYPE))) {
1469 		wma_err("ch_width %d sent in 11G, configure to 40MHz",
1470 			params->ch_width);
1471 		params->ch_width = CH_WIDTH_40MHZ;
1472 	}
1473 	phymode = wma_peer_phymode(nw_type, params->staType,
1474 				   params->htCapable, params->ch_width,
1475 				   params->vhtCapable, is_he, is_eht);
1476 
1477 	des_chan = wlan_vdev_mlme_get_des_chan(intr->vdev);
1478 	vdev_phymode = des_chan->ch_phymode;
1479 	if ((intr->type == WMI_VDEV_TYPE_AP) && (phymode > vdev_phymode)) {
1480 		wma_nofl_debug("Peer phymode %d is not allowed. Set it equal to sap/go phymode %d",
1481 			       phymode, vdev_phymode);
1482 		phymode = vdev_phymode;
1483 	}
1484 
1485 	if (!mac->mlme_cfg->rates.disable_abg_rate_txdata &&
1486 	    !WLAN_REG_IS_6GHZ_CHAN_FREQ(des_chan->ch_freq)) {
1487 		/* Legacy Rateset */
1488 		rate_pos = (uint8_t *) peer_legacy_rates.rates;
1489 		for (i = 0; i < SIR_NUM_11B_RATES; i++) {
1490 			if (!params->supportedRates.llbRates[i])
1491 				continue;
1492 			rate_pos[peer_legacy_rates.num_rates++] =
1493 				params->supportedRates.llbRates[i];
1494 			num_peer_11b_rates++;
1495 		}
1496 		for (i = 0; i < SIR_NUM_11A_RATES; i++) {
1497 			if (!params->supportedRates.llaRates[i])
1498 				continue;
1499 			rate_pos[peer_legacy_rates.num_rates++] =
1500 				params->supportedRates.llaRates[i];
1501 			num_peer_11a_rates++;
1502 		}
1503 	}
1504 
1505 	if ((phymode == WLAN_PHYMODE_11A && num_peer_11a_rates == 0) ||
1506 	    (phymode == WLAN_PHYMODE_11B && num_peer_11b_rates == 0)) {
1507 		wma_warn("Invalid phy rates. phymode 0x%x, 11b_rates %d, 11a_rates %d",
1508 			phymode, num_peer_11b_rates,
1509 			num_peer_11a_rates);
1510 		qdf_mem_free(cmd);
1511 		return QDF_STATUS_E_INVAL;
1512 	}
1513 
1514 	/* HT Rateset */
1515 	max_rates = sizeof(peer_ht_rates.rates) /
1516 		    sizeof(peer_ht_rates.rates[0]);
1517 	rate_pos = (uint8_t *) peer_ht_rates.rates;
1518 	for (i = 0; i < MAX_SUPPORTED_RATES; i++) {
1519 		if (params->supportedRates.supportedMCSSet[i / 8] &
1520 		    (1 << (i % 8))) {
1521 			rate_pos[peer_ht_rates.num_rates++] = i;
1522 			if (i >= 8) {
1523 				/* MCS8 or higher rate is present, must be 2x2 */
1524 				peer_nss = 2;
1525 			}
1526 		}
1527 		if (peer_ht_rates.num_rates == max_rates)
1528 			break;
1529 	}
1530 
1531 	if (params->htCapable && !peer_ht_rates.num_rates) {
1532 		uint8_t temp_ni_rates[8] = { 0x0, 0x1, 0x2, 0x3,
1533 					     0x4, 0x5, 0x6, 0x7};
1534 		/*
1535 		 * Workaround for EV 116382: The peer is marked HT but with
1536 		 * supported rx mcs set is set to 0. 11n spec mandates MCS0-7
1537 		 * for a HT STA. So forcing the supported rx mcs rate to
1538 		 * MCS 0-7. This workaround will be removed once we get
1539 		 * clarification from WFA regarding this STA behavior.
1540 		 */
1541 
1542 		/* TODO: Do we really need this? */
1543 		wma_warn("Peer is marked as HT capable but supported mcs rate is 0");
1544 		peer_ht_rates.num_rates = sizeof(temp_ni_rates);
1545 		qdf_mem_copy((uint8_t *) peer_ht_rates.rates, temp_ni_rates,
1546 			     peer_ht_rates.num_rates);
1547 	}
1548 
1549 	/* in ap mode and for tdls peer, use mac address of the peer in
1550 	 * the other end as the new peer address; in sta mode, use bss id to
1551 	 * be the new peer address
1552 	 */
1553 	if ((wma_is_vdev_in_ap_mode(wma, params->smesessionId))
1554 #ifdef FEATURE_WLAN_TDLS
1555 	    || (STA_ENTRY_TDLS_PEER == params->staType)
1556 #endif /* FEATURE_WLAN_TDLS */
1557 	    ) {
1558 		qdf_mem_copy(cmd->peer_mac, params->staMac,
1559 						sizeof(cmd->peer_mac));
1560 	} else {
1561 		qdf_mem_copy(cmd->peer_mac, params->bssId,
1562 						sizeof(cmd->peer_mac));
1563 	}
1564 	wma_objmgr_set_peer_mlme_phymode(wma, cmd->peer_mac, phymode);
1565 
1566 	cmd->vdev_id = params->smesessionId;
1567 	cmd->peer_new_assoc = 1;
1568 	cmd->peer_associd = params->assocId;
1569 
1570 	cmd->is_wme_set = 1;
1571 
1572 	if (params->wmmEnabled)
1573 		cmd->qos_flag = 1;
1574 
1575 	if (params->uAPSD) {
1576 		cmd->apsd_flag = 1;
1577 		wma_nofl_debug("Set WMI_PEER_APSD: uapsd Mask %d",
1578 			       params->uAPSD);
1579 	}
1580 
1581 	if (params->htCapable) {
1582 		cmd->ht_flag = 1;
1583 		cmd->qos_flag = 1;
1584 		cmd->peer_rate_caps |= WMI_RC_HT_FLAG;
1585 	}
1586 
1587 	if (params->vhtCapable) {
1588 		cmd->ht_flag = 1;
1589 		cmd->qos_flag = 1;
1590 		cmd->vht_flag = 1;
1591 		cmd->peer_rate_caps |= WMI_RC_HT_FLAG;
1592 	}
1593 
1594 	if (params->ch_width) {
1595 		cmd->peer_rate_caps |= WMI_RC_CW40_FLAG;
1596 		if (params->fShortGI40Mhz)
1597 			cmd->peer_rate_caps |= WMI_RC_SGI_FLAG;
1598 	} else if (params->fShortGI20Mhz) {
1599 		cmd->peer_rate_caps |= WMI_RC_SGI_FLAG;
1600 	}
1601 
1602 	switch (params->ch_width) {
1603 	case CH_WIDTH_320MHZ:
1604 		wma_set_peer_assoc_params_bw_320(cmd, params->ch_width);
1605 		fallthrough;
1606 	case CH_WIDTH_80P80MHZ:
1607 	case CH_WIDTH_160MHZ:
1608 		cmd->bw_160 = 1;
1609 		fallthrough;
1610 	case CH_WIDTH_80MHZ:
1611 		cmd->bw_80 = 1;
1612 		fallthrough;
1613 	case CH_WIDTH_40MHZ:
1614 		cmd->bw_40 = 1;
1615 		fallthrough;
1616 	default:
1617 		break;
1618 	}
1619 
1620 	cmd->peer_vht_caps = params->vht_caps;
1621 	if (params->p2pCapableSta) {
1622 		cmd->p2p_capable_sta = 1;
1623 		wma_objmgr_set_peer_mlme_type(wma, params->staMac,
1624 					      WLAN_PEER_P2P_CLI);
1625 	}
1626 
1627 	if (params->rmfEnabled)
1628 		cmd->is_pmf_enabled = 1;
1629 
1630 	if (params->stbc_capable)
1631 		cmd->stbc_flag = 1;
1632 
1633 	if (params->htLdpcCapable || params->vhtLdpcCapable)
1634 		cmd->ldpc_flag = 1;
1635 
1636 	switch (params->mimoPS) {
1637 	case eSIR_HT_MIMO_PS_STATIC:
1638 		cmd->static_mimops_flag = 1;
1639 		break;
1640 	case eSIR_HT_MIMO_PS_DYNAMIC:
1641 		cmd->dynamic_mimops_flag = 1;
1642 		break;
1643 	case eSIR_HT_MIMO_PS_NO_LIMIT:
1644 		cmd->spatial_mux_flag = 1;
1645 		break;
1646 	default:
1647 		break;
1648 	}
1649 
1650 	wma_set_twt_peer_caps(params, cmd);
1651 #ifdef FEATURE_WLAN_TDLS
1652 	if (STA_ENTRY_TDLS_PEER == params->staType)
1653 		cmd->auth_flag = 1;
1654 #endif /* FEATURE_WLAN_TDLS */
1655 
1656 	if (params->wpa_rsn
1657 #ifdef FEATURE_WLAN_WAPI
1658 	    || params->encryptType == eSIR_ED_WPI
1659 #endif /* FEATURE_WLAN_WAPI */
1660 	    ) {
1661 		if (!params->no_ptk_4_way) {
1662 			cmd->need_ptk_4_way = 1;
1663 			wlan_acquire_peer_key_wakelock(wma->pdev,
1664 						       cmd->peer_mac);
1665 		}
1666 	}
1667 
1668 	if (params->wpa_rsn >> 1)
1669 		cmd->need_gtk_2_way = 1;
1670 
1671 #ifdef FEATURE_WLAN_WAPI
1672 	if (params->encryptType == eSIR_ED_WPI) {
1673 		ret = wma_vdev_set_param(wma->wmi_handle, params->smesessionId,
1674 				      wmi_vdev_param_drop_unencry, false);
1675 		if (ret) {
1676 			wma_err("Set wmi_vdev_param_drop_unencry Param status:%d",
1677 				ret);
1678 			qdf_mem_free(cmd);
1679 			return ret;
1680 		}
1681 	}
1682 #endif /* FEATURE_WLAN_WAPI */
1683 
1684 	cmd->peer_caps = params->capab_info;
1685 	cmd->peer_listen_intval = params->listenInterval;
1686 	cmd->peer_ht_caps = params->ht_caps;
1687 	cmd->peer_max_mpdu = (1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
1688 				    params->maxAmpduSize)) - 1;
1689 	cmd->peer_mpdu_density = wma_parse_mpdudensity(params->maxAmpduDensity);
1690 
1691 	if (params->supportedRates.supportedMCSSet[1] &&
1692 	    params->supportedRates.supportedMCSSet[2])
1693 		cmd->peer_rate_caps |= WMI_RC_TS_FLAG;
1694 	else if (params->supportedRates.supportedMCSSet[1])
1695 		cmd->peer_rate_caps |= WMI_RC_DS_FLAG;
1696 
1697 	/* Update peer legacy rate information */
1698 	cmd->peer_legacy_rates.num_rates = peer_legacy_rates.num_rates;
1699 	qdf_mem_copy(cmd->peer_legacy_rates.rates, peer_legacy_rates.rates,
1700 		     peer_legacy_rates.num_rates);
1701 
1702 	/* Update peer HT rate information */
1703 	cmd->peer_ht_rates.num_rates = peer_ht_rates.num_rates;
1704 	qdf_mem_copy(cmd->peer_ht_rates.rates, peer_ht_rates.rates,
1705 				 peer_ht_rates.num_rates);
1706 
1707 	/* VHT Rates */
1708 
1709 	cmd->peer_nss = peer_nss;
1710 	/*
1711 	 * Because of DBS a vdev may come up in any of the two MACs with
1712 	 * different capabilities. STBC capab should be fetched for given
1713 	 * hard_mode->MAC_id combo. It is planned that firmware should provide
1714 	 * these dev capabilities. But for now number of tx streams can be used
1715 	 * to identify if Tx STBC needs to be disabled.
1716 	 */
1717 	if (intr->tx_streams < 2) {
1718 		cmd->peer_vht_caps &= ~(1 << SIR_MAC_VHT_CAP_TXSTBC);
1719 		wma_nofl_debug("Num tx_streams: %d, Disabled txSTBC",
1720 			       intr->tx_streams);
1721 	}
1722 
1723 	cmd->vht_capable = params->vhtCapable;
1724 	if (params->vhtCapable) {
1725 #define VHT2x2MCSMASK 0xc
1726 		cmd->rx_max_rate = params->supportedRates.vhtRxHighestDataRate;
1727 		cmd->rx_mcs_set = params->supportedRates.vhtRxMCSMap;
1728 		cmd->tx_max_rate = params->supportedRates.vhtTxHighestDataRate;
1729 		cmd->tx_mcs_set = params->supportedRates.vhtTxMCSMap;
1730 		/*
1731 		 *  tx_mcs_set is intersection of self tx NSS and peer rx mcs map
1732 		 */
1733 		if (params->vhtSupportedRxNss)
1734 			cmd->peer_nss = params->vhtSupportedRxNss;
1735 		else
1736 			cmd->peer_nss = ((cmd->tx_mcs_set & VHT2x2MCSMASK)
1737 					== VHT2x2MCSMASK) ? 1 : 2;
1738 
1739 		if (params->vht_mcs_10_11_supp) {
1740 			WMI_SET_BITS(cmd->tx_mcs_set, 16, cmd->peer_nss,
1741 				     ((1 << cmd->peer_nss) - 1));
1742 			WMI_VHT_MCS_NOTIFY_EXT_SS_SET(cmd->tx_mcs_set, 1);
1743 		}
1744 		if (params->vht_extended_nss_bw_cap &&
1745 		    (params->vht_160mhz_nss || params->vht_80p80mhz_nss)) {
1746 			/*
1747 			 * bit[2:0] : Represents value of Rx NSS for 160 MHz
1748 			 * bit[5:3] : Represents value of Rx NSS for 80_80 MHz
1749 			 *             Extended NSS support
1750 			 * bit[30:6]: Reserved
1751 			 * bit[31]  : MSB(0/1): 1 in case of valid data
1752 			 */
1753 			cmd->peer_bw_rxnss_override |= (1 << 31);
1754 			if (params->vht_160mhz_nss)
1755 				cmd->peer_bw_rxnss_override |=
1756 					(params->vht_160mhz_nss - 1);
1757 			if (params->vht_80p80mhz_nss)
1758 				cmd->peer_bw_rxnss_override |=
1759 					((params->vht_80p80mhz_nss - 1) << 3);
1760 			wma_debug("peer_bw_rxnss_override %0X",
1761 				  cmd->peer_bw_rxnss_override);
1762 		}
1763 	}
1764 
1765 	wma_set_mlo_capability(wma, intr->vdev, params, cmd);
1766 
1767 	wma_set_mlo_assoc_vdev(intr->vdev, cmd);
1768 
1769 	wma_debug("rx_max_rate %d, rx_mcs %x, tx_max_rate %d, tx_mcs: %x num rates %d need 4 way %d",
1770 		  cmd->rx_max_rate, cmd->rx_mcs_set, cmd->tx_max_rate,
1771 		  cmd->tx_mcs_set, peer_ht_rates.num_rates,
1772 		  cmd->need_ptk_4_way);
1773 
1774 	/*
1775 	 * Limit nss to max number of rf chain supported by target
1776 	 * Otherwise Fw will crash
1777 	 */
1778 	if (cmd->peer_nss > WMA_MAX_NSS) {
1779 		wma_err("peer Nss %d is more than supported", cmd->peer_nss);
1780 		cmd->peer_nss = WMA_MAX_NSS;
1781 	}
1782 
1783 	wma_populate_peer_he_cap(cmd, params);
1784 	wma_populate_peer_eht_cap(cmd, params);
1785 	wma_populate_peer_puncture(cmd, des_chan);
1786 	wma_populate_peer_mlo_cap(cmd, params);
1787 	if (!wma_is_vdev_in_ap_mode(wma, params->smesessionId))
1788 		intr->nss = cmd->peer_nss;
1789 	wma_objmgr_set_peer_mlme_nss(wma, cmd->peer_mac, cmd->peer_nss);
1790 
1791 	/* Till conversion is not done in WMI we need to fill fw phy mode */
1792 	cmd->peer_phymode = wmi_host_to_fw_phymode(phymode);
1793 
1794 	keymgmt = wlan_crypto_get_param(intr->vdev, WLAN_CRYPTO_PARAM_KEY_MGMT);
1795 	authmode = wlan_crypto_get_param(intr->vdev,
1796 					 WLAN_CRYPTO_PARAM_AUTH_MODE);
1797 	uccipher = wlan_crypto_get_param(intr->vdev,
1798 					 WLAN_CRYPTO_PARAM_UCAST_CIPHER);
1799 
1800 	cmd->akm = cm_crypto_authmode_to_wmi_authmode(authmode,
1801 						      keymgmt,
1802 						      uccipher);
1803 
1804 	status = wmi_unified_peer_assoc_send(wma->wmi_handle,
1805 					 cmd);
1806 	if (QDF_IS_STATUS_ERROR(status))
1807 		wma_alert("Failed to send peer assoc command status = %d",
1808 			 status);
1809 	qdf_mem_free(cmd);
1810 
1811 	return status;
1812 }
1813 
1814 /**
1815  * wmi_unified_vdev_set_gtx_cfg_send() - set GTX params
1816  * @wmi_handle: wmi handle
1817  * @if_id: vdev id
1818  * @gtx_info: GTX config params
1819  *
1820  * This function set GTX related params in firmware.
1821  *
1822  * Return: 0 for success or error code
1823  */
1824 QDF_STATUS wmi_unified_vdev_set_gtx_cfg_send(wmi_unified_t wmi_handle,
1825 				  uint32_t if_id,
1826 				  gtx_config_t *gtx_info)
1827 {
1828 	struct wmi_gtx_config params;
1829 
1830 	params.gtx_rt_mask[0] = gtx_info->gtxRTMask[0];
1831 	params.gtx_rt_mask[1] = gtx_info->gtxRTMask[1];
1832 	params.gtx_usrcfg = gtx_info->gtxUsrcfg;
1833 	params.gtx_threshold = gtx_info->gtxPERThreshold;
1834 	params.gtx_margin = gtx_info->gtxPERMargin;
1835 	params.gtx_tpcstep = gtx_info->gtxTPCstep;
1836 	params.gtx_tpcmin = gtx_info->gtxTPCMin;
1837 	params.gtx_bwmask = gtx_info->gtxBWMask;
1838 
1839 	return wmi_unified_vdev_set_gtx_cfg_cmd(wmi_handle,
1840 						if_id, &params);
1841 
1842 }
1843 
1844 /**
1845  * wma_update_protection_mode() - update protection mode
1846  * @wma: wma handle
1847  * @vdev_id: vdev id
1848  * @llbcoexist: protection mode info
1849  *
1850  * This function set protection mode(RTS/CTS) to fw for passed vdev id.
1851  *
1852  * Return: none
1853  */
1854 void wma_update_protection_mode(tp_wma_handle wma, uint8_t vdev_id,
1855 			   uint8_t llbcoexist)
1856 {
1857 	QDF_STATUS ret;
1858 	enum ieee80211_protmode prot_mode;
1859 
1860 	prot_mode = llbcoexist ? IEEE80211_PROT_CTSONLY : IEEE80211_PROT_NONE;
1861 
1862 	ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
1863 					      wmi_vdev_param_protection_mode,
1864 					      prot_mode);
1865 
1866 	if (QDF_IS_STATUS_ERROR(ret))
1867 		wma_err("Failed to send wmi protection mode cmd");
1868 	else
1869 		wma_nofl_debug("Updated protection mode %d to target",
1870 			       prot_mode);
1871 }
1872 
1873 void
1874 wma_update_beacon_interval(tp_wma_handle wma, uint8_t vdev_id,
1875 			   uint16_t beaconInterval)
1876 {
1877 	QDF_STATUS ret;
1878 
1879 	ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
1880 					      wmi_vdev_param_beacon_interval,
1881 					      beaconInterval);
1882 
1883 	if (QDF_IS_STATUS_ERROR(ret))
1884 		wma_err("Failed to update beacon interval");
1885 	else
1886 		wma_info("Updated beacon interval %d for vdev %d",
1887 			 beaconInterval, vdev_id);
1888 }
1889 
1890 #ifdef WLAN_FEATURE_11AX_BSS_COLOR
1891 /**
1892  * wma_update_bss_color() - update beacon bss color in fw
1893  * @wma: wma handle
1894  * @vdev_id: vdev id
1895  * @he_ops: HE operation, only the bss_color and bss_color_disabled fields
1896  * are updated.
1897  *
1898  * Return: none
1899  */
1900 static void
1901 wma_update_bss_color(tp_wma_handle wma, uint8_t vdev_id,
1902 		     tUpdateBeaconParams *bcn_params)
1903 {
1904 	QDF_STATUS ret;
1905 	uint32_t dword_he_ops = 0;
1906 
1907 	WMI_HEOPS_COLOR_SET(dword_he_ops, bcn_params->bss_color);
1908 	WMI_HEOPS_BSSCOLORDISABLE_SET(dword_he_ops,
1909 				bcn_params->bss_color_disabled);
1910 	wma_nofl_debug("vdev: %d, update bss color, HE_OPS: 0x%x",
1911 		       vdev_id, dword_he_ops);
1912 	ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
1913 			      wmi_vdev_param_he_bss_color, dword_he_ops);
1914 	if (QDF_IS_STATUS_ERROR(ret))
1915 		wma_err("Failed to update HE operations");
1916 }
1917 #else
1918 static void wma_update_bss_color(tp_wma_handle wma, uint8_t vdev_id,
1919 			   tUpdateBeaconParams *bcn_params)
1920 {
1921 }
1922 #endif
1923 
1924 /**
1925  * wma_process_update_beacon_params() - update beacon parameters to target
1926  * @wma: wma handle
1927  * @bcn_params: beacon parameters
1928  *
1929  * Return: none
1930  */
1931 void
1932 wma_process_update_beacon_params(tp_wma_handle wma,
1933 				 tUpdateBeaconParams *bcn_params)
1934 {
1935 	if (!bcn_params) {
1936 		wma_err("bcn_params NULL");
1937 		return;
1938 	}
1939 
1940 	if (bcn_params->vdev_id >= wma->max_bssid) {
1941 		wma_err("Invalid vdev id %d", bcn_params->vdev_id);
1942 		return;
1943 	}
1944 
1945 	if (bcn_params->paramChangeBitmap & PARAM_BCN_INTERVAL_CHANGED) {
1946 		wma_update_beacon_interval(wma, bcn_params->vdev_id,
1947 					   bcn_params->beaconInterval);
1948 	}
1949 
1950 	if (bcn_params->paramChangeBitmap & PARAM_llBCOEXIST_CHANGED)
1951 		wma_update_protection_mode(wma, bcn_params->vdev_id,
1952 					   bcn_params->llbCoexist);
1953 
1954 	if (bcn_params->paramChangeBitmap & PARAM_BSS_COLOR_CHANGED)
1955 		wma_update_bss_color(wma, bcn_params->vdev_id,
1956 				     bcn_params);
1957 }
1958 
1959 void wma_update_rts_params(tp_wma_handle wma, uint32_t value)
1960 {
1961 	uint8_t vdev_id;
1962 	QDF_STATUS ret;
1963 	struct wlan_objmgr_vdev *vdev;
1964 
1965 	for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) {
1966 		vdev = wma->interfaces[vdev_id].vdev;
1967 		if (!vdev)
1968 			continue;
1969 		ret = wma_vdev_set_param(wma->wmi_handle,
1970 					 vdev_id,
1971 					 wmi_vdev_param_rts_threshold,
1972 					 value);
1973 		if (QDF_IS_STATUS_ERROR(ret))
1974 			wma_err("Update cfg param fail for vdevId %d", vdev_id);
1975 	}
1976 }
1977 
1978 void wma_update_frag_params(tp_wma_handle wma, uint32_t value)
1979 {
1980 	uint8_t vdev_id;
1981 	QDF_STATUS ret;
1982 	struct wlan_objmgr_vdev *vdev;
1983 
1984 	for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) {
1985 		vdev = wma->interfaces[vdev_id].vdev;
1986 		if (!vdev)
1987 			continue;
1988 		ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
1989 					 wmi_vdev_param_fragmentation_threshold,
1990 					 value);
1991 		if (QDF_IS_STATUS_ERROR(ret))
1992 			wma_err("Update cfg params failed for vdevId %d",
1993 				 vdev_id);
1994 	}
1995 }
1996 
1997 /**
1998  * wma_process_update_edca_param_req() - update EDCA params
1999  * @handle: wma handle
2000  * @edca_params: edca parameters
2001  *
2002  * This function updates EDCA parameters to the target
2003  *
2004  * Return: QDF Status
2005  */
2006 QDF_STATUS wma_process_update_edca_param_req(WMA_HANDLE handle,
2007 					     tEdcaParams *edca_params)
2008 {
2009 	tp_wma_handle wma_handle = (tp_wma_handle) handle;
2010 	struct wmi_host_wme_vparams wmm_param[QCA_WLAN_AC_ALL];
2011 	tSirMacEdcaParamRecord *edca_record;
2012 	int ac;
2013 	struct ol_tx_wmm_param_t ol_tx_wmm_param;
2014 	uint8_t vdev_id;
2015 	QDF_STATUS status;
2016 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
2017 	uint8_t *debug_str;
2018 	uint32_t len = 0;
2019 
2020 	vdev_id = edca_params->vdev_id;
2021 	if (!wma_is_vdev_valid(vdev_id)) {
2022 		wma_err("vdev id:%d is not active ", vdev_id);
2023 		goto fail;
2024 	}
2025 
2026 	debug_str = qdf_mem_malloc(WMA_WMM_DEBUG_STRING_SIZE);
2027 	if (!debug_str)
2028 		goto fail;
2029 
2030 	for (ac = 0; ac < QCA_WLAN_AC_ALL; ac++) {
2031 		switch (ac) {
2032 		case QCA_WLAN_AC_BE:
2033 			edca_record = &edca_params->acbe;
2034 			break;
2035 		case QCA_WLAN_AC_BK:
2036 			edca_record = &edca_params->acbk;
2037 			break;
2038 		case QCA_WLAN_AC_VI:
2039 			edca_record = &edca_params->acvi;
2040 			break;
2041 		case QCA_WLAN_AC_VO:
2042 			edca_record = &edca_params->acvo;
2043 			break;
2044 		default:
2045 			qdf_mem_free(debug_str);
2046 			goto fail;
2047 		}
2048 
2049 		wma_update_edca_params_for_ac(edca_record, &wmm_param[ac], ac,
2050 					      edca_params->mu_edca_params,
2051 					      debug_str,
2052 					      WMA_WMM_DEBUG_STRING_SIZE, &len);
2053 
2054 		ol_tx_wmm_param.ac[ac].aifs = wmm_param[ac].aifs;
2055 		ol_tx_wmm_param.ac[ac].cwmin = wmm_param[ac].cwmin;
2056 		ol_tx_wmm_param.ac[ac].cwmax = wmm_param[ac].cwmax;
2057 	}
2058 
2059 	wma_nofl_debug("WMM params: %s", debug_str);
2060 	qdf_mem_free(debug_str);
2061 
2062 	status = wmi_unified_process_update_edca_param(wma_handle->wmi_handle,
2063 						vdev_id,
2064 						edca_params->mu_edca_params,
2065 						wmm_param);
2066 	if (status == QDF_STATUS_E_NOMEM)
2067 		return status;
2068 	else if (status == QDF_STATUS_E_FAILURE)
2069 		goto fail;
2070 
2071 	cdp_set_wmm_param(soc, WMI_PDEV_ID_SOC, ol_tx_wmm_param);
2072 
2073 	return QDF_STATUS_SUCCESS;
2074 
2075 fail:
2076 	wma_err("Failed to set WMM Parameters");
2077 	return QDF_STATUS_E_FAILURE;
2078 }
2079 
2080 /**
2081  * wmi_unified_probe_rsp_tmpl_send() - send probe response template to fw
2082  * @wma: wma handle
2083  * @vdev_id: vdev id
2084  * @probe_rsp_info: probe response info
2085  *
2086  * Return: 0 for success or error code
2087  */
2088 static int wmi_unified_probe_rsp_tmpl_send(tp_wma_handle wma,
2089 				   uint8_t vdev_id,
2090 				   tpSendProbeRespParams probe_rsp_info)
2091 {
2092 	uint64_t adjusted_tsf_le;
2093 	struct ieee80211_frame *wh;
2094 	struct wmi_probe_resp_params params;
2095 
2096 	/*
2097 	 * Make the TSF offset negative so probe response in the same
2098 	 * staggered batch have the same TSF.
2099 	 */
2100 	adjusted_tsf_le = cpu_to_le64(0ULL -
2101 				      wma->interfaces[vdev_id].tsfadjust);
2102 	/* Update the timstamp in the probe response buffer with adjusted TSF */
2103 	wh = (struct ieee80211_frame *)probe_rsp_info->probeRespTemplate;
2104 	A_MEMCPY(&wh[1], &adjusted_tsf_le, sizeof(adjusted_tsf_le));
2105 
2106 	params.prb_rsp_template_len = probe_rsp_info->probeRespTemplateLen;
2107 	params.prb_rsp_template_frm = probe_rsp_info->probeRespTemplate;
2108 
2109 	return wmi_unified_probe_rsp_tmpl_send_cmd(wma->wmi_handle, vdev_id,
2110 						   &params);
2111 }
2112 
2113 #ifdef WLAN_FEATURE_11BE_MLO
2114 /**
2115  * wma_upt_mlo_partner_info() - update mlo info in beacon template
2116  * @params: beacon template params
2117  * @bcn_param: beacon params
2118  * @bytes_to_strip: bytes to strip
2119  *
2120  * Return: Void
2121  */
2122 static void wma_upt_mlo_partner_info(struct beacon_tmpl_params *params,
2123 				     const tpSendbeaconParams bcn_param,
2124 				     uint8_t bytes_to_strip)
2125 {
2126 	struct ml_bcn_partner_info *bcn_info;
2127 	struct ml_bcn_partner_info *info;
2128 	int link;
2129 
2130 	params->mlo_partner.num_links = bcn_param->mlo_partner.num_links;
2131 	for (link = 0; link < params->mlo_partner.num_links; link++) {
2132 		bcn_info = &bcn_param->mlo_partner.partner_info[link];
2133 		info = &params->mlo_partner.partner_info[link];
2134 		info->vdev_id = bcn_info->vdev_id;
2135 		info->beacon_interval = bcn_info->beacon_interval;
2136 		if (bcn_info->csa_switch_count_offset &&
2137 		    bcn_info->csa_switch_count_offset > bytes_to_strip)
2138 			info->csa_switch_count_offset =
2139 				bcn_info->csa_switch_count_offset -
2140 					bytes_to_strip;
2141 		if (bcn_info->ext_csa_switch_count_offset &&
2142 		    bcn_info->ext_csa_switch_count_offset > bytes_to_strip)
2143 			info->ext_csa_switch_count_offset =
2144 				bcn_info->ext_csa_switch_count_offset -
2145 					bytes_to_strip;
2146 	}
2147 }
2148 #else
2149 static void wma_upt_mlo_partner_info(struct beacon_tmpl_params *params,
2150 				     const tpSendbeaconParams bcn_param,
2151 				     uint8_t bytes_to_strip)
2152 {
2153 }
2154 #endif
2155 
2156 /**
2157  * wma_unified_bcn_tmpl_send() - send beacon template to fw
2158  * @wma:wma handle
2159  * @vdev_id: vdev id
2160  * @bcn_info: beacon info
2161  * @bytes_to_strip: bytes to strip
2162  *
2163  * Return: QDF_STATUS_SUCCESS for success or error code
2164  */
2165 static QDF_STATUS wma_unified_bcn_tmpl_send(tp_wma_handle wma,
2166 				     uint8_t vdev_id,
2167 				     const tpSendbeaconParams bcn_info,
2168 				     uint8_t bytes_to_strip)
2169 {
2170 	struct beacon_tmpl_params params = {0};
2171 	uint32_t tmpl_len, tmpl_len_aligned;
2172 	uint8_t *frm;
2173 	QDF_STATUS ret;
2174 	uint8_t *p2p_ie;
2175 	uint16_t p2p_ie_len = 0;
2176 	uint64_t adjusted_tsf_le;
2177 	struct ieee80211_frame *wh;
2178 
2179 	if (!wma_is_vdev_valid(vdev_id)) {
2180 		wma_err("vdev id:%d is not active ", vdev_id);
2181 		return QDF_STATUS_E_INVAL;
2182 	}
2183 
2184 	wma_nofl_debug("vdev %d: bcn update reason %d", vdev_id,
2185 		       bcn_info->reason);
2186 
2187 	if (bcn_info->p2pIeOffset) {
2188 		p2p_ie = bcn_info->beacon + bcn_info->p2pIeOffset;
2189 		p2p_ie_len = (uint16_t) p2p_ie[1] + 2;
2190 	}
2191 
2192 	/*
2193 	 * XXX: The first byte of beacon buffer contains beacon length
2194 	 * only when UMAC in sending the beacon template. In othercases
2195 	 * (ex: from tbtt update) beacon length is read from beacon
2196 	 * information.
2197 	 */
2198 	if (bytes_to_strip)
2199 		tmpl_len = *(uint32_t *) &bcn_info->beacon[0];
2200 	else
2201 		tmpl_len = bcn_info->beaconLength;
2202 
2203 	if (tmpl_len > WMI_BEACON_TX_BUFFER_SIZE) {
2204 		wma_err("tmpl_len: %d > %d. Invalid tmpl len", tmpl_len,
2205 			WMI_BEACON_TX_BUFFER_SIZE);
2206 		return -EINVAL;
2207 	}
2208 
2209 	if (p2p_ie_len) {
2210 		if (tmpl_len <= p2p_ie_len) {
2211 			wma_err("tmpl_len %d <= p2p_ie_len %d, Invalid",
2212 				tmpl_len, p2p_ie_len);
2213 			return -EINVAL;
2214 		}
2215 		tmpl_len -= (uint32_t) p2p_ie_len;
2216 	}
2217 
2218 	frm = bcn_info->beacon + bytes_to_strip;
2219 	tmpl_len_aligned = roundup(tmpl_len, sizeof(A_UINT32));
2220 	/*
2221 	 * Make the TSF offset negative so beacons in the same
2222 	 * staggered batch have the same TSF.
2223 	 */
2224 	adjusted_tsf_le = cpu_to_le64(0ULL -
2225 				      wma->interfaces[vdev_id].tsfadjust);
2226 	/* Update the timstamp in the beacon buffer with adjusted TSF */
2227 	wh = (struct ieee80211_frame *)frm;
2228 	A_MEMCPY(&wh[1], &adjusted_tsf_le, sizeof(adjusted_tsf_le));
2229 
2230 
2231 
2232 	params.vdev_id = vdev_id;
2233 	params.tim_ie_offset = bcn_info->timIeOffset - bytes_to_strip;
2234 	params.tmpl_len = tmpl_len;
2235 	params.frm = frm;
2236 	params.tmpl_len_aligned = tmpl_len_aligned;
2237 	params.enable_bigtk =
2238 		mlme_get_bigtk_support(wma->interfaces[vdev_id].vdev);
2239 	if (bcn_info->csa_count_offset &&
2240 	    (bcn_info->csa_count_offset > bytes_to_strip))
2241 		params.csa_switch_count_offset =
2242 			bcn_info->csa_count_offset - bytes_to_strip;
2243 	if (bcn_info->ecsa_count_offset &&
2244 	    (bcn_info->ecsa_count_offset > bytes_to_strip))
2245 		params.ext_csa_switch_count_offset =
2246 			bcn_info->ecsa_count_offset - bytes_to_strip;
2247 
2248 	wma_upt_mlo_partner_info(&params, bcn_info, bytes_to_strip);
2249 
2250 	ret = wmi_unified_beacon_tmpl_send_cmd(wma->wmi_handle,
2251 				 &params);
2252 	if (QDF_IS_STATUS_ERROR(ret))
2253 		wma_err("Failed to send bcn tmpl: %d", ret);
2254 
2255 	return ret;
2256 }
2257 
2258 /**
2259  * wma_store_bcn_tmpl() - store beacon template
2260  * @wma: wma handle
2261  * @vdev_id: vdev id
2262  * @bcn_info: beacon params
2263  *
2264  * This function stores beacon template locally.
2265  * This will send to target on the reception of
2266  * SWBA event.
2267  *
2268  * Return: QDF status
2269  */
2270 static QDF_STATUS wma_store_bcn_tmpl(tp_wma_handle wma, uint8_t vdev_id,
2271 				     tpSendbeaconParams bcn_info)
2272 {
2273 	struct beacon_info *bcn;
2274 	uint32_t len;
2275 	uint8_t *bcn_payload;
2276 	struct beacon_tim_ie *tim_ie;
2277 
2278 	bcn = wma->interfaces[vdev_id].beacon;
2279 	if (!bcn || !bcn->buf) {
2280 		wma_err("Memory is not allocated to hold bcn template");
2281 		return QDF_STATUS_E_INVAL;
2282 	}
2283 
2284 	len = *(uint32_t *) &bcn_info->beacon[0];
2285 	if (len > SIR_MAX_BEACON_SIZE - sizeof(uint32_t)) {
2286 		wma_err("Received beacon len %u exceeding max limit %lu",
2287 			len, (unsigned long)(
2288 			 SIR_MAX_BEACON_SIZE - sizeof(uint32_t)));
2289 		return QDF_STATUS_E_INVAL;
2290 	}
2291 	qdf_spin_lock_bh(&bcn->lock);
2292 
2293 	/*
2294 	 * Copy received beacon template content in local buffer.
2295 	 * this will be send to target on the reception of SWBA
2296 	 * event from target.
2297 	 */
2298 	qdf_nbuf_trim_tail(bcn->buf, qdf_nbuf_len(bcn->buf));
2299 	memcpy(qdf_nbuf_data(bcn->buf),
2300 	       bcn_info->beacon + 4 /* Exclude beacon length field */,
2301 	       len);
2302 	if (bcn_info->timIeOffset > 3)
2303 		bcn->tim_ie_offset = bcn_info->timIeOffset - 4;
2304 	else
2305 		bcn->tim_ie_offset = bcn_info->timIeOffset;
2306 
2307 	if (bcn_info->p2pIeOffset > 3)
2308 		bcn->p2p_ie_offset = bcn_info->p2pIeOffset - 4;
2309 	else
2310 		bcn->p2p_ie_offset = bcn_info->p2pIeOffset;
2311 
2312 	if (bcn_info->csa_count_offset > 3)
2313 		bcn->csa_count_offset = bcn_info->csa_count_offset - 4;
2314 	else
2315 		bcn->csa_count_offset = bcn_info->csa_count_offset;
2316 
2317 	if (bcn_info->ecsa_count_offset > 3)
2318 		bcn->ecsa_count_offset = bcn_info->ecsa_count_offset - 4;
2319 	else
2320 		bcn->ecsa_count_offset = bcn_info->ecsa_count_offset;
2321 
2322 	bcn_payload = qdf_nbuf_data(bcn->buf);
2323 	if (bcn->tim_ie_offset) {
2324 		tim_ie = (struct beacon_tim_ie *)
2325 				(&bcn_payload[bcn->tim_ie_offset]);
2326 		/*
2327 		 * Initial Value of bcn->dtim_count will be 0.
2328 		 * But if the beacon gets updated then current dtim
2329 		 * count will be restored
2330 		 */
2331 		tim_ie->dtim_count = bcn->dtim_count;
2332 		tim_ie->tim_bitctl = 0;
2333 	}
2334 
2335 	qdf_nbuf_put_tail(bcn->buf, len);
2336 	bcn->len = len;
2337 
2338 	qdf_spin_unlock_bh(&bcn->lock);
2339 
2340 	return QDF_STATUS_SUCCESS;
2341 }
2342 
2343 int wma_tbttoffset_update_event_handler(void *handle, uint8_t *event,
2344 					       uint32_t len)
2345 {
2346 	tp_wma_handle wma = (tp_wma_handle) handle;
2347 	WMI_TBTTOFFSET_UPDATE_EVENTID_param_tlvs *param_buf;
2348 	wmi_tbtt_offset_event_fixed_param *tbtt_offset_event;
2349 	struct wma_txrx_node *intf;
2350 	struct beacon_info *bcn;
2351 	tSendbeaconParams bcn_info;
2352 	uint32_t *adjusted_tsf = NULL;
2353 	uint32_t if_id = 0, vdev_map;
2354 
2355 	if (wma_validate_handle(wma))
2356 		return -EINVAL;
2357 
2358 	param_buf = (WMI_TBTTOFFSET_UPDATE_EVENTID_param_tlvs *) event;
2359 	if (!param_buf) {
2360 		wma_err("Invalid tbtt update event buffer");
2361 		return -EINVAL;
2362 	}
2363 
2364 	tbtt_offset_event = param_buf->fixed_param;
2365 	intf = wma->interfaces;
2366 	vdev_map = tbtt_offset_event->vdev_map;
2367 	adjusted_tsf = param_buf->tbttoffset_list;
2368 	if (!adjusted_tsf) {
2369 		wma_err("Invalid adjusted_tsf");
2370 		return -EINVAL;
2371 	}
2372 
2373 	for (; (if_id < wma->max_bssid && vdev_map); vdev_map >>= 1, if_id++) {
2374 		if (!intf[if_id].vdev)
2375 			continue;
2376 
2377 		if (!(vdev_map & 0x1))
2378 			continue;
2379 
2380 		bcn = intf[if_id].beacon;
2381 		if (!bcn) {
2382 			wma_err("Invalid beacon");
2383 			return -EINVAL;
2384 		}
2385 		if (!bcn->buf) {
2386 			wma_err("Invalid beacon buffer");
2387 			return -EINVAL;
2388 		}
2389 		/* Save the adjusted TSF */
2390 		intf[if_id].tsfadjust = adjusted_tsf[if_id];
2391 
2392 		qdf_spin_lock_bh(&bcn->lock);
2393 		qdf_mem_zero(&bcn_info, sizeof(bcn_info));
2394 		qdf_mem_copy(bcn_info.beacon,
2395 			     qdf_nbuf_data(bcn->buf), bcn->len);
2396 		bcn_info.p2pIeOffset = bcn->p2p_ie_offset;
2397 		bcn_info.beaconLength = bcn->len;
2398 		bcn_info.timIeOffset = bcn->tim_ie_offset;
2399 		bcn_info.csa_count_offset = bcn->csa_count_offset;
2400 		bcn_info.ecsa_count_offset = bcn->ecsa_count_offset;
2401 		qdf_spin_unlock_bh(&bcn->lock);
2402 
2403 		wma_err_rl("Update beacon template for vdev %d due to TBTT offset update",
2404 			   if_id);
2405 		/* Update beacon template in firmware */
2406 		wma_unified_bcn_tmpl_send(wma, if_id, &bcn_info, 0);
2407 	}
2408 	return 0;
2409 }
2410 
2411 /**
2412  * wma_p2p_go_set_beacon_ie() - set beacon IE for p2p go
2413  * @wma_handle: wma handle
2414  * @vdev_id: vdev id
2415  * @p2pIe: p2p IE
2416  *
2417  * Return: 0 for success or error code
2418  */
2419 static int wma_p2p_go_set_beacon_ie(t_wma_handle *wma_handle,
2420 				    A_UINT32 vdev_id, uint8_t *p2pIe)
2421 {
2422 	if (wma_validate_handle(wma_handle))
2423 		return QDF_STATUS_E_FAILURE;
2424 
2425 	return wmi_unified_p2p_go_set_beacon_ie_cmd(wma_handle->wmi_handle,
2426 							vdev_id, p2pIe);
2427 }
2428 
2429 /**
2430  * wma_send_probe_rsp_tmpl() - send probe resp template
2431  * @wma: wma handle
2432  * @probe_rsp_info: probe response info
2433  *
2434  * This function sends probe response template to fw which
2435  * firmware will use in case of probe response offload.
2436  *
2437  * Return: none
2438  */
2439 void wma_send_probe_rsp_tmpl(tp_wma_handle wma,
2440 				    tpSendProbeRespParams probe_rsp_info)
2441 {
2442 	uint8_t vdev_id;
2443 	struct sAniProbeRspStruct *probe_rsp;
2444 
2445 	if (!probe_rsp_info) {
2446 		wma_err("probe_rsp_info is NULL");
2447 		return;
2448 	}
2449 
2450 	probe_rsp = (struct sAniProbeRspStruct *)
2451 			(probe_rsp_info->probeRespTemplate);
2452 	if (!probe_rsp) {
2453 		wma_err("probe_rsp is NULL");
2454 		return;
2455 	}
2456 
2457 	if (wma_find_vdev_id_by_addr(wma, probe_rsp->macHdr.sa, &vdev_id)) {
2458 		wma_err("failed to get vdev id");
2459 		return;
2460 	}
2461 
2462 	if (wmi_service_enabled(wma->wmi_handle,
2463 				   wmi_service_beacon_offload)) {
2464 		if (wmi_unified_probe_rsp_tmpl_send(wma, vdev_id,
2465 						    probe_rsp_info) < 0) {
2466 			wma_err("wmi_unified_probe_rsp_tmpl_send Failed");
2467 			return;
2468 		}
2469 	}
2470 }
2471 
2472 QDF_STATUS wma_set_ap_vdev_up(tp_wma_handle wma, uint8_t vdev_id)
2473 {
2474 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2475 	struct vdev_mlme_obj *mlme_obj;
2476 	struct wlan_objmgr_vdev *vdev;
2477 	struct wma_txrx_node *iface;
2478 
2479 	iface = &wma->interfaces[vdev_id];
2480 	vdev = iface->vdev;
2481 	mlme_obj = wlan_vdev_mlme_get_cmpt_obj(vdev);
2482 	if (!mlme_obj) {
2483 		wma_err("failed to get mlme_obj");
2484 		return QDF_STATUS_E_INVAL;
2485 	}
2486 	mlme_obj->proto.sta.assoc_id = 0;
2487 
2488 	status = vdev_mgr_up_send(mlme_obj);
2489 	if (QDF_IS_STATUS_ERROR(status)) {
2490 		wma_err("failed to send vdev up");
2491 		return status;
2492 	}
2493 	wma_set_sap_keepalive(wma, vdev_id);
2494 	wma_set_vdev_mgmt_rate(wma, vdev_id);
2495 	wma_vdev_set_he_bss_params(wma, vdev_id, &mlme_obj->proto.he_ops_info);
2496 	mlme_sr_update(vdev, true);
2497 
2498 	return status;
2499 }
2500 
2501 /**
2502  * wma_send_beacon() - send beacon template
2503  * @wma: wma handle
2504  * @bcn_info: beacon info
2505  *
2506  * This function store beacon template locally and
2507  * update keep alive parameters
2508  *
2509  * Return: none
2510  */
2511 void wma_send_beacon(tp_wma_handle wma, tpSendbeaconParams bcn_info)
2512 {
2513 	uint8_t vdev_id;
2514 	QDF_STATUS status;
2515 	uint8_t *p2p_ie;
2516 	struct sAniBeaconStruct *beacon;
2517 
2518 	beacon = (struct sAniBeaconStruct *) (bcn_info->beacon);
2519 	if (wma_find_vdev_id_by_addr(wma, beacon->macHdr.sa, &vdev_id)) {
2520 		wma_err("failed to get vdev id");
2521 		status = QDF_STATUS_E_INVAL;
2522 		goto send_rsp;
2523 	}
2524 
2525 	if (wmi_service_enabled(wma->wmi_handle,
2526 				   wmi_service_beacon_offload)) {
2527 		status = wma_unified_bcn_tmpl_send(wma, vdev_id, bcn_info, 4);
2528 		if (QDF_IS_STATUS_ERROR(status)) {
2529 			wma_err("wmi_unified_bcn_tmpl_send Failed");
2530 			goto send_rsp;
2531 		}
2532 
2533 		if (bcn_info->p2pIeOffset) {
2534 			p2p_ie = bcn_info->beacon + bcn_info->p2pIeOffset;
2535 			wma_debug("p2pIe is present - vdev_id %hu, p2p_ie = %pK, p2p ie len = %hu",
2536 				  vdev_id, p2p_ie, p2p_ie[1]);
2537 			if (wma_p2p_go_set_beacon_ie(wma, vdev_id,
2538 							 p2p_ie) < 0) {
2539 				wma_err("wmi_unified_bcn_tmpl_send Failed");
2540 				status = QDF_STATUS_E_INVAL;
2541 				goto send_rsp;
2542 			}
2543 		}
2544 	}
2545 	status = wma_store_bcn_tmpl(wma, vdev_id, bcn_info);
2546 	if (status != QDF_STATUS_SUCCESS) {
2547 		wma_err("wma_store_bcn_tmpl Failed");
2548 		goto send_rsp;
2549 	}
2550 
2551 send_rsp:
2552 	bcn_info->status = status;
2553 	wma_send_msg(wma, WMA_SEND_BCN_RSP, (void *)bcn_info, 0);
2554 }
2555 
2556 /**
2557  * wma_set_keepalive_req() - send keep alive request to fw
2558  * @wma: wma handle
2559  * @keepalive: keep alive parameters
2560  *
2561  * Return: none
2562  */
2563 void wma_set_keepalive_req(tp_wma_handle wma,
2564 			   struct keep_alive_req *keepalive)
2565 {
2566 	wma_nofl_debug("KEEPALIVE:PacketType:%d", keepalive->packetType);
2567 	wma_set_sta_keep_alive(wma, keepalive->sessionId,
2568 			       keepalive->packetType,
2569 			       keepalive->timePeriod,
2570 			       keepalive->hostIpv4Addr,
2571 			       keepalive->destIpv4Addr,
2572 			       keepalive->dest_macaddr.bytes);
2573 
2574 	qdf_mem_free(keepalive);
2575 }
2576 
2577 /**
2578  * wma_beacon_miss_handler() - beacon miss event handler
2579  * @wma: wma handle
2580  * @vdev_id: vdev id
2581  * @rssi: rssi value
2582  *
2583  * This function send beacon miss indication to upper layers.
2584  *
2585  * Return: none
2586  */
2587 void wma_beacon_miss_handler(tp_wma_handle wma, uint32_t vdev_id, int32_t rssi)
2588 {
2589 	struct missed_beacon_ind *beacon_miss_ind;
2590 	struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE);
2591 
2592 	beacon_miss_ind = qdf_mem_malloc(sizeof(*beacon_miss_ind));
2593 	if (!beacon_miss_ind)
2594 		return;
2595 
2596 	if (mac && mac->sme.tx_queue_cb)
2597 		mac->sme.tx_queue_cb(mac->hdd_handle, vdev_id,
2598 				     WLAN_STOP_ALL_NETIF_QUEUE,
2599 				     WLAN_CONTROL_PATH);
2600 	beacon_miss_ind->messageType = WMA_MISSED_BEACON_IND;
2601 	beacon_miss_ind->length = sizeof(*beacon_miss_ind);
2602 	beacon_miss_ind->bss_idx = vdev_id;
2603 	beacon_miss_ind->rssi = rssi;
2604 
2605 	wma_send_msg(wma, WMA_MISSED_BEACON_IND, beacon_miss_ind, 0);
2606 	if (!wmi_service_enabled(wma->wmi_handle,
2607 				 wmi_service_hw_db2dbm_support))
2608 		rssi += WMA_TGT_NOISE_FLOOR_DBM;
2609 	wma_lost_link_info_handler(wma, vdev_id, rssi);
2610 }
2611 
2612 void wlan_cm_send_beacon_miss(uint8_t vdev_id, int32_t rssi)
2613 {
2614 	tp_wma_handle wma;
2615 
2616 	wma = cds_get_context(QDF_MODULE_ID_WMA);
2617 	if (!wma)
2618 		return;
2619 
2620 	wma_beacon_miss_handler(wma, vdev_id, rssi);
2621 }
2622 
2623 /**
2624  * wma_get_status_str() - get string of tx status from firmware
2625  * @status: tx status
2626  *
2627  * Return: converted string of tx status
2628  */
2629 static const char *wma_get_status_str(uint32_t status)
2630 {
2631 	switch (status) {
2632 	default:
2633 		return "unknown";
2634 	CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_COMPLETE_OK);
2635 	CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_DISCARD);
2636 	CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_INSPECT);
2637 	CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_COMPLETE_NO_ACK);
2638 	CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_MAX);
2639 	}
2640 }
2641 
2642 #ifdef CONFIG_HL_SUPPORT
2643 static inline void wma_mgmt_unmap_buf(tp_wma_handle wma_handle, qdf_nbuf_t buf)
2644 {
2645 }
2646 #else
2647 static inline void wma_mgmt_unmap_buf(tp_wma_handle wma_handle, qdf_nbuf_t buf)
2648 {
2649 	qdf_nbuf_unmap_single(wma_handle->qdf_dev, buf, QDF_DMA_TO_DEVICE);
2650 }
2651 #endif
2652 
2653 #if defined(CONNECTIVITY_PKTLOG) || !defined(REMOVE_PKT_LOG)
2654 /**
2655  * wma_mgmt_qdf_status_map() - map MGMT Tx completion status with
2656  * packet dump Tx status
2657  * @status: MGMT Tx completion status
2658  *
2659  * Return: packet dump tx_status enum
2660  */
2661 static inline enum qdf_dp_tx_rx_status
2662 wma_mgmt_qdf_status_map(WMI_MGMT_TX_COMP_STATUS_TYPE status)
2663 {
2664 	enum qdf_dp_tx_rx_status pktdump_status;
2665 
2666 	switch (status) {
2667 	case WMI_MGMT_TX_COMP_TYPE_COMPLETE_OK:
2668 		pktdump_status = QDF_TX_RX_STATUS_OK;
2669 		break;
2670 	case WMI_MGMT_TX_COMP_TYPE_DISCARD:
2671 		pktdump_status = QDF_TX_RX_STATUS_DROP;
2672 		break;
2673 	case WMI_MGMT_TX_COMP_TYPE_COMPLETE_NO_ACK:
2674 		pktdump_status = QDF_TX_RX_STATUS_NO_ACK;
2675 		break;
2676 	default:
2677 		pktdump_status = QDF_TX_RX_STATUS_DROP;
2678 		break;
2679 	}
2680 	return pktdump_status;
2681 }
2682 
2683 /**
2684  * wma_mgmt_pktdump_tx_handler() - calls tx cb if CONNECTIVITY_PKTLOG
2685  * feature is enabled
2686  * @wma_handle: wma handle
2687  * @buf: nbuf
2688  * @vdev_id : vdev id
2689  * @status : status
2690  *
2691  * Return: none
2692  */
2693 static inline void wma_mgmt_pktdump_tx_handler(tp_wma_handle wma_handle,
2694 					       qdf_nbuf_t buf, uint8_t vdev_id,
2695 					       uint32_t status)
2696 {
2697 	ol_txrx_pktdump_cb packetdump_cb;
2698 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
2699 	enum qdf_dp_tx_rx_status pktdump_status;
2700 
2701 	packetdump_cb = wma_handle->wma_mgmt_tx_packetdump_cb;
2702 	pktdump_status = wma_mgmt_qdf_status_map(status);
2703 	if (packetdump_cb)
2704 		packetdump_cb(soc, WMI_PDEV_ID_SOC, vdev_id,
2705 			      buf, pktdump_status, QDF_TX_MGMT_PKT);
2706 }
2707 
2708 /**
2709  * wma_mgmt_pktdump_rx_handler() - calls rx cb if CONNECTIVITY_PKTLOG
2710  * feature is enabled
2711  * @mgmt_rx_params: mgmt rx params
2712  * @rx_pkt: cds packet
2713  * @wma_handle: wma handle
2714  * mgt_type: management type
2715  * mgt_subtype: management subtype
2716  *
2717  * Return: none
2718  */
2719 static inline void wma_mgmt_pktdump_rx_handler(
2720 			struct mgmt_rx_event_params *mgmt_rx_params,
2721 			cds_pkt_t *rx_pkt, tp_wma_handle wma_handle,
2722 			uint8_t mgt_type, uint8_t mgt_subtype)
2723 {
2724 	ol_txrx_pktdump_cb packetdump_cb;
2725 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
2726 
2727 	packetdump_cb = wma_handle->wma_mgmt_rx_packetdump_cb;
2728 	if ((mgt_type == IEEE80211_FC0_TYPE_MGT &&
2729 	     mgt_subtype != MGMT_SUBTYPE_BEACON) &&
2730 	     packetdump_cb)
2731 		packetdump_cb(soc, mgmt_rx_params->pdev_id,
2732 			      rx_pkt->pkt_meta.session_id, rx_pkt->pkt_buf,
2733 			      QDF_TX_RX_STATUS_OK, QDF_RX_MGMT_PKT);
2734 }
2735 
2736 #else
2737 static inline void wma_mgmt_pktdump_tx_handler(tp_wma_handle wma_handle,
2738 					       qdf_nbuf_t buf, uint8_t vdev_id,
2739 					       uint32_t status)
2740 {
2741 }
2742 
2743 static inline void wma_mgmt_pktdump_rx_handler(
2744 			struct mgmt_rx_event_params *mgmt_rx_params,
2745 			cds_pkt_t *rx_pkt, tp_wma_handle wma_handle,
2746 			uint8_t mgt_type, uint8_t mgt_subtype)
2747 {
2748 }
2749 #endif
2750 
2751 /**
2752  * wma_process_mgmt_tx_completion() - process mgmt completion
2753  * @wma_handle: wma handle
2754  * @desc_id: descriptor id
2755  * @status: status
2756  *
2757  * Return: 0 for success or error code
2758  */
2759 static int wma_process_mgmt_tx_completion(tp_wma_handle wma_handle,
2760 					  uint32_t desc_id, uint32_t status)
2761 {
2762 	struct wlan_objmgr_pdev *pdev;
2763 	qdf_nbuf_t buf = NULL;
2764 	QDF_STATUS ret;
2765 	uint8_t vdev_id = 0;
2766 	struct wmi_mgmt_params mgmt_params = {};
2767 
2768 	if (wma_validate_handle(wma_handle))
2769 		return -EINVAL;
2770 
2771 	wma_debug("status: %s wmi_desc_id: %d",
2772 		  wma_get_status_str(status), desc_id);
2773 
2774 	pdev = wma_handle->pdev;
2775 	if (!pdev) {
2776 		wma_err("psoc ptr is NULL");
2777 		return -EINVAL;
2778 	}
2779 
2780 	buf = mgmt_txrx_get_nbuf(pdev, desc_id);
2781 
2782 
2783 	if (buf)
2784 		wma_mgmt_unmap_buf(wma_handle, buf);
2785 
2786 	vdev_id = mgmt_txrx_get_vdev_id(pdev, desc_id);
2787 	mgmt_params.vdev_id = vdev_id;
2788 
2789 	wma_mgmt_pktdump_tx_handler(wma_handle, buf, vdev_id, status);
2790 	ret = mgmt_txrx_tx_completion_handler(pdev, desc_id, status,
2791 					      &mgmt_params);
2792 
2793 	if (ret != QDF_STATUS_SUCCESS) {
2794 		wma_err("Failed to process mgmt tx completion");
2795 		return -EINVAL;
2796 	}
2797 
2798 	return 0;
2799 }
2800 
2801 /**
2802  * wma_extract_mgmt_offload_event_params() - Extract mgmt event params
2803  * @params: Management offload event params
2804  * @hdr: Management header to extract
2805  *
2806  * Return: None
2807  */
2808 static void wma_extract_mgmt_offload_event_params(
2809 				struct mgmt_offload_event_params *params,
2810 				wmi_mgmt_hdr *hdr)
2811 {
2812 	params->tsf_l32 = hdr->tsf_l32;
2813 	params->chan_freq = hdr->chan_freq;
2814 	params->rate_kbps = hdr->rate_kbps;
2815 	params->rssi = hdr->rssi;
2816 	params->buf_len = hdr->buf_len;
2817 	params->tx_status = hdr->tx_status;
2818 	params->tx_retry_cnt = hdr->tx_retry_cnt;
2819 }
2820 
2821 /**
2822  * wma_mgmt_tx_completion_handler() - wma mgmt Tx completion event handler
2823  * @handle: wma handle
2824  * @cmpl_event_params: completion event handler data
2825  * @len: length of @cmpl_event_params
2826  *
2827  * Return: 0 on success; error number otherwise
2828  */
2829 
2830 int wma_mgmt_tx_completion_handler(void *handle, uint8_t *cmpl_event_params,
2831 				   uint32_t len)
2832 {
2833 	tp_wma_handle wma_handle = (tp_wma_handle)handle;
2834 	WMI_MGMT_TX_COMPLETION_EVENTID_param_tlvs *param_buf;
2835 	wmi_mgmt_tx_compl_event_fixed_param *cmpl_params;
2836 
2837 	param_buf = (WMI_MGMT_TX_COMPLETION_EVENTID_param_tlvs *)
2838 		cmpl_event_params;
2839 	if (!param_buf || !wma_handle) {
2840 		wma_err("Invalid mgmt Tx completion event");
2841 		return -EINVAL;
2842 	}
2843 	cmpl_params = param_buf->fixed_param;
2844 
2845 	if ((ucfg_pkt_capture_get_pktcap_mode(wma_handle->psoc) &
2846 	    PKT_CAPTURE_MODE_MGMT_ONLY) && param_buf->mgmt_hdr) {
2847 		struct mgmt_offload_event_params params = {0};
2848 
2849 		wma_extract_mgmt_offload_event_params(
2850 					&params,
2851 					(wmi_mgmt_hdr *)param_buf->mgmt_hdr);
2852 		ucfg_pkt_capture_mgmt_tx_completion(wma_handle->pdev,
2853 						    cmpl_params->desc_id,
2854 						    cmpl_params->status,
2855 						    &params);
2856 	}
2857 
2858 	wma_process_mgmt_tx_completion(wma_handle, cmpl_params->desc_id,
2859 				       cmpl_params->status);
2860 
2861 	return 0;
2862 }
2863 
2864 /**
2865  * wma_mgmt_tx_bundle_completion_handler() - mgmt bundle comp handler
2866  * @handle: wma handle
2867  * @buf: buffer
2868  * @len: length
2869  *
2870  * Return: 0 for success or error code
2871  */
2872 int wma_mgmt_tx_bundle_completion_handler(void *handle, uint8_t *buf,
2873 				   uint32_t len)
2874 {
2875 	tp_wma_handle wma_handle = (tp_wma_handle)handle;
2876 	WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID_param_tlvs *param_buf;
2877 	wmi_mgmt_tx_compl_bundle_event_fixed_param	*cmpl_params;
2878 	uint32_t num_reports;
2879 	uint32_t *desc_ids;
2880 	uint32_t *status;
2881 	uint32_t i, buf_len;
2882 	bool excess_data = false;
2883 
2884 	param_buf = (WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID_param_tlvs *)buf;
2885 	if (!param_buf || !wma_handle) {
2886 		wma_err("Invalid mgmt Tx completion event");
2887 		return -EINVAL;
2888 	}
2889 	cmpl_params = param_buf->fixed_param;
2890 	num_reports = cmpl_params->num_reports;
2891 	desc_ids = (uint32_t *)(param_buf->desc_ids);
2892 	status = (uint32_t *)(param_buf->status);
2893 
2894 	/* buf contains num_reports * sizeof(uint32) len of desc_ids and
2895 	 * num_reports * sizeof(uint32) status,
2896 	 * so (2 x (num_reports * sizeof(uint32)) should not exceed MAX
2897 	 */
2898 	if (cmpl_params->num_reports > (WMI_SVC_MSG_MAX_SIZE /
2899 	    (2 * sizeof(uint32_t))))
2900 		excess_data = true;
2901 	else
2902 		buf_len = cmpl_params->num_reports * (2 * sizeof(uint32_t));
2903 
2904 	if (excess_data || (sizeof(*cmpl_params) > (WMI_SVC_MSG_MAX_SIZE -
2905 	    buf_len))) {
2906 		wma_err("excess wmi buffer: num_reports %d",
2907 			cmpl_params->num_reports);
2908 		return -EINVAL;
2909 	}
2910 
2911 	if ((cmpl_params->num_reports > param_buf->num_desc_ids) ||
2912 	    (cmpl_params->num_reports > param_buf->num_status)) {
2913 		wma_err("Invalid num_reports %d, num_desc_ids %d, num_status %d",
2914 			 cmpl_params->num_reports, param_buf->num_desc_ids,
2915 			 param_buf->num_status);
2916 		return -EINVAL;
2917 	}
2918 
2919 	for (i = 0; i < num_reports; i++) {
2920 		if ((ucfg_pkt_capture_get_pktcap_mode(wma_handle->psoc) &
2921 		    PKT_CAPTURE_MODE_MGMT_ONLY) && param_buf->mgmt_hdr) {
2922 			struct mgmt_offload_event_params params = {0};
2923 
2924 			wma_extract_mgmt_offload_event_params(
2925 				&params,
2926 				&((wmi_mgmt_hdr *)param_buf->mgmt_hdr)[i]);
2927 			ucfg_pkt_capture_mgmt_tx_completion(
2928 				wma_handle->pdev, desc_ids[i],
2929 				status[i], &params);
2930 		}
2931 
2932 		wma_process_mgmt_tx_completion(wma_handle,
2933 					       desc_ids[i], status[i]);
2934 	}
2935 	return 0;
2936 }
2937 
2938 /**
2939  * wma_process_update_opmode() - process update VHT opmode cmd from UMAC
2940  * @wma_handle: wma handle
2941  * @update_vht_opmode: vht opmode
2942  *
2943  * Return: none
2944  */
2945 void wma_process_update_opmode(tp_wma_handle wma_handle,
2946 			       tUpdateVHTOpMode *update_vht_opmode)
2947 {
2948 	wmi_host_channel_width ch_width;
2949 	uint8_t pdev_id;
2950 	struct wlan_objmgr_peer *peer;
2951 	struct wlan_objmgr_psoc *psoc = wma_handle->psoc;
2952 	enum wlan_phymode peer_phymode;
2953 	uint32_t fw_phymode;
2954 	enum wlan_peer_type peer_type;
2955 
2956 	pdev_id = wlan_objmgr_pdev_get_pdev_id(wma_handle->pdev);
2957 	peer = wlan_objmgr_get_peer(psoc, pdev_id,
2958 				    update_vht_opmode->peer_mac,
2959 				    WLAN_LEGACY_WMA_ID);
2960 	if (!peer) {
2961 		wma_err("peer object invalid");
2962 		return;
2963 	}
2964 
2965 	peer_type = wlan_peer_get_peer_type(peer);
2966 	if (peer_type == WLAN_PEER_SELF) {
2967 		wma_err("self peer wrongly used");
2968 		wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
2969 		return;
2970 	}
2971 
2972 	wlan_peer_obj_lock(peer);
2973 	peer_phymode = wlan_peer_get_phymode(peer);
2974 	wlan_peer_obj_unlock(peer);
2975 	wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
2976 
2977 	fw_phymode = wmi_host_to_fw_phymode(peer_phymode);
2978 
2979 	ch_width = wmi_get_ch_width_from_phy_mode(wma_handle->wmi_handle,
2980 						  fw_phymode);
2981 	wma_debug("ch_width: %d, fw phymode: %d peer_phymode %d",
2982 		  ch_width, fw_phymode, peer_phymode);
2983 	if (ch_width < update_vht_opmode->opMode) {
2984 		wma_err("Invalid peer bw update %d, self bw %d",
2985 			update_vht_opmode->opMode, ch_width);
2986 		return;
2987 	}
2988 
2989 	wma_debug("opMode = %d", update_vht_opmode->opMode);
2990 	wma_set_peer_param(wma_handle, update_vht_opmode->peer_mac,
2991 			   WMI_HOST_PEER_CHWIDTH, update_vht_opmode->opMode,
2992 			   update_vht_opmode->smesessionId);
2993 }
2994 
2995 /**
2996  * wma_process_update_rx_nss() - process update RX NSS cmd from UMAC
2997  * @wma_handle: wma handle
2998  * @update_rx_nss: rx nss value
2999  *
3000  * Return: none
3001  */
3002 void wma_process_update_rx_nss(tp_wma_handle wma_handle,
3003 			       tUpdateRxNss *update_rx_nss)
3004 {
3005 	struct target_psoc_info *tgt_hdl;
3006 	struct wma_txrx_node *intr =
3007 		&wma_handle->interfaces[update_rx_nss->smesessionId];
3008 	int rx_nss = update_rx_nss->rxNss;
3009 	int num_rf_chains;
3010 
3011 	tgt_hdl = wlan_psoc_get_tgt_if_handle(wma_handle->psoc);
3012 	if (!tgt_hdl) {
3013 		wma_err("target psoc info is NULL");
3014 		return;
3015 	}
3016 
3017 	num_rf_chains = target_if_get_num_rf_chains(tgt_hdl);
3018 	if (rx_nss > num_rf_chains || rx_nss > WMA_MAX_NSS)
3019 		rx_nss = QDF_MIN(num_rf_chains, WMA_MAX_NSS);
3020 
3021 	intr->nss = (uint8_t)rx_nss;
3022 	update_rx_nss->rxNss = (uint32_t)rx_nss;
3023 
3024 	wma_debug("Rx Nss = %d", update_rx_nss->rxNss);
3025 
3026 	wma_set_peer_param(wma_handle, update_rx_nss->peer_mac,
3027 			   WMI_HOST_PEER_NSS, update_rx_nss->rxNss,
3028 			   update_rx_nss->smesessionId);
3029 }
3030 
3031 /**
3032  * wma_process_update_membership() - process update group membership cmd
3033  * @wma_handle: wma handle
3034  * @membership: group membership info
3035  *
3036  * Return: none
3037  */
3038 void wma_process_update_membership(tp_wma_handle wma_handle,
3039 				   tUpdateMembership *membership)
3040 {
3041 	wma_debug("membership = %x ", membership->membership);
3042 
3043 	wma_set_peer_param(wma_handle, membership->peer_mac,
3044 			   WMI_HOST_PEER_MEMBERSHIP, membership->membership,
3045 			   membership->smesessionId);
3046 }
3047 
3048 /**
3049  * wma_process_update_userpos() - process update user pos cmd from UMAC
3050  * @wma_handle: wma handle
3051  * @userpos: user pos value
3052  *
3053  * Return: none
3054  */
3055 void wma_process_update_userpos(tp_wma_handle wma_handle,
3056 				tUpdateUserPos *userpos)
3057 {
3058 	wma_debug("userPos = %x ", userpos->userPos);
3059 
3060 	wma_set_peer_param(wma_handle, userpos->peer_mac,
3061 			   WMI_HOST_PEER_USERPOS, userpos->userPos,
3062 			   userpos->smesessionId);
3063 
3064 	/* Now that membership/userpos is updated in fw,
3065 	 * enable GID PPS.
3066 	 */
3067 	wma_set_ppsconfig(userpos->smesessionId, WMA_VHT_PPS_GID_MATCH, 1);
3068 
3069 }
3070 
3071 QDF_STATUS wma_set_cts2self_for_p2p_go(void *wma_handle,
3072 				    uint32_t cts2self_for_p2p_go)
3073 {
3074 	int32_t ret;
3075 	tp_wma_handle wma = (tp_wma_handle)wma_handle;
3076 	struct pdev_params pdevparam = {};
3077 
3078 	pdevparam.param_id = wmi_pdev_param_cts2self_for_p2p_go_config;
3079 	pdevparam.param_value = cts2self_for_p2p_go;
3080 
3081 	ret = wmi_unified_pdev_param_send(wma->wmi_handle,
3082 			&pdevparam,
3083 			WMA_WILDCARD_PDEV_ID);
3084 	if (ret) {
3085 		wma_err("Fail to Set CTS2SELF for p2p GO %d",
3086 			cts2self_for_p2p_go);
3087 		return QDF_STATUS_E_FAILURE;
3088 	}
3089 
3090 	wma_nofl_debug("Successfully Set CTS2SELF for p2p GO %d",
3091 		       cts2self_for_p2p_go);
3092 
3093 	return QDF_STATUS_SUCCESS;
3094 }
3095 
3096 
3097 /**
3098  * wma_set_htconfig() - set ht config parameters to target
3099  * @vdev_id: vdev id
3100  * @ht_capab: ht capability
3101  * @value: value of ht param
3102  *
3103  * Return: QDF status
3104  */
3105 QDF_STATUS wma_set_htconfig(uint8_t vdev_id, uint16_t ht_capab, int value)
3106 {
3107 	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
3108 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
3109 
3110 	if (!wma)
3111 		return QDF_STATUS_E_INVAL;
3112 
3113 	switch (ht_capab) {
3114 	case WNI_CFG_HT_CAP_INFO_ADVANCE_CODING:
3115 		ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
3116 						      wmi_vdev_param_ldpc,
3117 						      value);
3118 		break;
3119 	case WNI_CFG_HT_CAP_INFO_TX_STBC:
3120 		ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
3121 						      wmi_vdev_param_tx_stbc,
3122 						      value);
3123 		break;
3124 	case WNI_CFG_HT_CAP_INFO_RX_STBC:
3125 		ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
3126 						      wmi_vdev_param_rx_stbc,
3127 						      value);
3128 		break;
3129 	case WNI_CFG_HT_CAP_INFO_SHORT_GI_20MHZ:
3130 	case WNI_CFG_HT_CAP_INFO_SHORT_GI_40MHZ:
3131 		wma_err("ht_capab = %d, value = %d", ht_capab,
3132 			 value);
3133 		ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
3134 						wmi_vdev_param_sgi, value);
3135 		if (ret == QDF_STATUS_SUCCESS)
3136 			wma->interfaces[vdev_id].config.shortgi = value;
3137 		break;
3138 	default:
3139 		wma_err("INVALID HT CONFIG");
3140 	}
3141 
3142 	return ret;
3143 }
3144 
3145 /**
3146  * wma_extract_ccmp_pn() - extract 6 byte PN from the CCMP header
3147  * @ccmp_ptr: CCMP header
3148  *
3149  * Return: PN extracted from header.
3150  */
3151 static uint64_t wma_extract_ccmp_pn(uint8_t *ccmp_ptr)
3152 {
3153 	uint8_t rsvd, key, pn[6];
3154 	uint64_t new_pn;
3155 
3156 	/*
3157 	 *   +-----+-----+------+----------+-----+-----+-----+-----+
3158 	 *   | PN0 | PN1 | rsvd | rsvd/key | PN2 | PN3 | PN4 | PN5 |
3159 	 *   +-----+-----+------+----------+-----+-----+-----+-----+
3160 	 *                   CCMP Header Format
3161 	 */
3162 
3163 	/* Extract individual bytes */
3164 	pn[0] = (uint8_t) *ccmp_ptr;
3165 	pn[1] = (uint8_t) *(ccmp_ptr + 1);
3166 	rsvd = (uint8_t) *(ccmp_ptr + 2);
3167 	key = (uint8_t) *(ccmp_ptr + 3);
3168 	pn[2] = (uint8_t) *(ccmp_ptr + 4);
3169 	pn[3] = (uint8_t) *(ccmp_ptr + 5);
3170 	pn[4] = (uint8_t) *(ccmp_ptr + 6);
3171 	pn[5] = (uint8_t) *(ccmp_ptr + 7);
3172 
3173 	/* Form 6 byte PN with 6 individual bytes of PN */
3174 	new_pn = ((uint64_t) pn[5] << 40) |
3175 		 ((uint64_t) pn[4] << 32) |
3176 		 ((uint64_t) pn[3] << 24) |
3177 		 ((uint64_t) pn[2] << 16) |
3178 		 ((uint64_t) pn[1] << 8) | ((uint64_t) pn[0] << 0);
3179 
3180 	return new_pn;
3181 }
3182 
3183 /**
3184  * wma_is_ccmp_pn_replay_attack() - detect replay attacking using PN in CCMP
3185  * @wma: wma context
3186  * @wh: 802.11 frame header
3187  * @ccmp_ptr: CCMP frame header
3188  *
3189  * Return: true/false
3190  */
3191 static bool
3192 wma_is_ccmp_pn_replay_attack(tp_wma_handle wma, struct ieee80211_frame *wh,
3193 			     uint8_t *ccmp_ptr)
3194 {
3195 	uint64_t new_pn;
3196 	bool ret = false;
3197 	struct peer_mlme_priv_obj *peer_priv;
3198 	struct wlan_objmgr_peer *peer;
3199 
3200 	new_pn = wma_extract_ccmp_pn(ccmp_ptr);
3201 
3202 	peer = wlan_objmgr_get_peer_by_mac(wma->psoc, wh->i_addr2,
3203 					   WLAN_LEGACY_WMA_ID);
3204 	if (!peer)
3205 		return ret;
3206 
3207 	peer_priv = wlan_objmgr_peer_get_comp_private_obj(peer,
3208 							  WLAN_UMAC_COMP_MLME);
3209 	if (!peer_priv) {
3210 		wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
3211 		return ret;
3212 	}
3213 
3214 	if (peer_priv->last_pn_valid) {
3215 		if (new_pn > peer_priv->last_pn) {
3216 			peer_priv->last_pn = new_pn;
3217 		} else {
3218 			wma_err_rl("PN Replay attack detected");
3219 			/* per 11W amendment, keeping track of replay attacks */
3220 			peer_priv->rmf_pn_replays += 1;
3221 			ret = true;
3222 		}
3223 	} else {
3224 		peer_priv->last_pn_valid = 1;
3225 		peer_priv->last_pn = new_pn;
3226 	}
3227 
3228 	wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
3229 
3230 	return ret;
3231 }
3232 
3233 /**
3234  * wma_process_bip() - process mmie in rmf frame
3235  * @wma_handle: wma handle
3236  * @iface: txrx node
3237  * @wh: 80211 frame
3238  * @wbuf: Buffer
3239  *
3240  * Return: 0 for success or error code
3241  */
3242 static
3243 int wma_process_bip(tp_wma_handle wma_handle, struct wma_txrx_node *iface,
3244 		    struct ieee80211_frame *wh, qdf_nbuf_t wbuf)
3245 {
3246 	uint16_t mmie_size;
3247 	uint8_t *efrm;
3248 	int32_t mgmtcipherset;
3249 	enum wlan_crypto_cipher_type key_cipher;
3250 
3251 	efrm = qdf_nbuf_data(wbuf) + qdf_nbuf_len(wbuf);
3252 
3253 	mgmtcipherset = wlan_crypto_get_param(iface->vdev,
3254 					      WLAN_CRYPTO_PARAM_MGMT_CIPHER);
3255 	if (mgmtcipherset <= 0) {
3256 		wma_err("Invalid key cipher %d", mgmtcipherset);
3257 		return -EINVAL;
3258 	}
3259 
3260 	if (mgmtcipherset & (1 << WLAN_CRYPTO_CIPHER_AES_CMAC)) {
3261 		key_cipher = WLAN_CRYPTO_CIPHER_AES_CMAC;
3262 		mmie_size = cds_get_mmie_size();
3263 	} else if (mgmtcipherset & (1 << WLAN_CRYPTO_CIPHER_AES_GMAC)) {
3264 		key_cipher = WLAN_CRYPTO_CIPHER_AES_GMAC;
3265 		mmie_size = cds_get_gmac_mmie_size();
3266 	} else if (mgmtcipherset & (1 << WLAN_CRYPTO_CIPHER_AES_GMAC_256)) {
3267 		key_cipher = WLAN_CRYPTO_CIPHER_AES_GMAC_256;
3268 		mmie_size = cds_get_gmac_mmie_size();
3269 	} else {
3270 		wma_err("Invalid key cipher %d", mgmtcipherset);
3271 		return -EINVAL;
3272 	}
3273 
3274 	/* Check if frame is invalid length */
3275 	if (efrm - (uint8_t *)wh < sizeof(*wh) + mmie_size) {
3276 		wma_err("Invalid frame length");
3277 		return -EINVAL;
3278 	}
3279 
3280 	switch (key_cipher) {
3281 	case WLAN_CRYPTO_CIPHER_AES_CMAC:
3282 		if (!wmi_service_enabled(wma_handle->wmi_handle,
3283 					 wmi_service_sta_pmf_offload)) {
3284 			if (!wlan_crypto_is_mmie_valid(iface->vdev,
3285 						       (uint8_t *)wh, efrm)) {
3286 				wma_debug("BC/MC MIC error or MMIE not present, dropping the frame");
3287 				return -EINVAL;
3288 			}
3289 		}
3290 		break;
3291 	case WLAN_CRYPTO_CIPHER_AES_GMAC:
3292 	case WLAN_CRYPTO_CIPHER_AES_GMAC_256:
3293 		if (!wmi_service_enabled(wma_handle->wmi_handle,
3294 					 wmi_service_gmac_offload_support)) {
3295 			if (!wlan_crypto_is_mmie_valid(iface->vdev,
3296 						       (uint8_t *)wh, efrm)) {
3297 				wma_debug("BC/MC GMAC MIC error or MMIE not present, dropping the frame");
3298 				return -EINVAL;
3299 			}
3300 		}
3301 		break;
3302 	default:
3303 		wma_err("Invalid key_type %d", key_cipher);
3304 		return -EINVAL;
3305 	}
3306 
3307 	qdf_nbuf_trim_tail(wbuf, mmie_size);
3308 
3309 	return 0;
3310 }
3311 
3312 /**
3313  * wma_process_rmf_frame() - process rmf frame
3314  * @wma_handle: wma handle
3315  * @iface: txrx node
3316  * @wh: 80211 frame
3317  * @rx_pkt: rx packet
3318  * @wbuf: Buffer
3319  *
3320  * Return: 0 for success or error code
3321  */
3322 static
3323 int wma_process_rmf_frame(tp_wma_handle wma_handle,
3324 	struct wma_txrx_node *iface,
3325 	struct ieee80211_frame *wh,
3326 	cds_pkt_t *rx_pkt,
3327 	qdf_nbuf_t wbuf)
3328 {
3329 	uint8_t *orig_hdr;
3330 	uint8_t *ccmp;
3331 	uint8_t mic_len, hdr_len, pdev_id;
3332 	QDF_STATUS status;
3333 
3334 	if ((wh)->i_fc[1] & IEEE80211_FC1_WEP) {
3335 		if (QDF_IS_ADDR_BROADCAST(wh->i_addr1) ||
3336 		    IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3337 			wma_err("Encrypted BC/MC frame dropping the frame");
3338 			cds_pkt_return_packet(rx_pkt);
3339 			return -EINVAL;
3340 		}
3341 
3342 		if (iface->type == WMI_VDEV_TYPE_NDI) {
3343 			hdr_len = IEEE80211_CCMP_HEADERLEN;
3344 			mic_len = IEEE80211_CCMP_MICLEN;
3345 		} else {
3346 			pdev_id =
3347 				wlan_objmgr_pdev_get_pdev_id(wma_handle->pdev);
3348 			status = mlme_get_peer_mic_len(wma_handle->psoc,
3349 						       pdev_id, wh->i_addr2,
3350 						       &mic_len, &hdr_len);
3351 			if (QDF_IS_STATUS_ERROR(status)) {
3352 				wma_err("Failed to get mic hdr and length");
3353 				cds_pkt_return_packet(rx_pkt);
3354 				return -EINVAL;
3355 			}
3356 		}
3357 
3358 		if (qdf_nbuf_len(wbuf) < (sizeof(*wh) + hdr_len + mic_len)) {
3359 			wma_err("Buffer length less than expected %d",
3360 				 (int)qdf_nbuf_len(wbuf));
3361 			cds_pkt_return_packet(rx_pkt);
3362 			return -EINVAL;
3363 		}
3364 
3365 		orig_hdr = (uint8_t *) qdf_nbuf_data(wbuf);
3366 		/* Pointer to head of CCMP header */
3367 		ccmp = orig_hdr + sizeof(*wh);
3368 		if (wma_is_ccmp_pn_replay_attack(wma_handle, wh, ccmp)) {
3369 			wma_err_rl("Dropping the frame");
3370 			cds_pkt_return_packet(rx_pkt);
3371 			return -EINVAL;
3372 		}
3373 
3374 		/* Strip privacy headers (and trailer)
3375 		 * for a received frame
3376 		 */
3377 		qdf_mem_move(orig_hdr +
3378 			hdr_len, wh,
3379 			sizeof(*wh));
3380 		qdf_nbuf_pull_head(wbuf,
3381 			hdr_len);
3382 		qdf_nbuf_trim_tail(wbuf, mic_len);
3383 		/*
3384 		 * CCMP header has been pulled off
3385 		 * reinitialize the start pointer of mac header
3386 		 * to avoid accessing incorrect address
3387 		 */
3388 		wh = (struct ieee80211_frame *) qdf_nbuf_data(wbuf);
3389 		rx_pkt->pkt_meta.mpdu_hdr_ptr =
3390 				qdf_nbuf_data(wbuf);
3391 		rx_pkt->pkt_meta.mpdu_len = qdf_nbuf_len(wbuf);
3392 		rx_pkt->pkt_buf = wbuf;
3393 		if (rx_pkt->pkt_meta.mpdu_len >=
3394 			rx_pkt->pkt_meta.mpdu_hdr_len) {
3395 			rx_pkt->pkt_meta.mpdu_data_len =
3396 				rx_pkt->pkt_meta.mpdu_len -
3397 				rx_pkt->pkt_meta.mpdu_hdr_len;
3398 		} else {
3399 			wma_err("mpdu len %d less than hdr %d, dropping frame",
3400 				rx_pkt->pkt_meta.mpdu_len,
3401 				rx_pkt->pkt_meta.mpdu_hdr_len);
3402 			cds_pkt_return_packet(rx_pkt);
3403 			return -EINVAL;
3404 		}
3405 
3406 		if (rx_pkt->pkt_meta.mpdu_data_len > MAX_MGMT_MPDU_LEN) {
3407 			wma_err("Data Len %d greater than max, dropping frame",
3408 				rx_pkt->pkt_meta.mpdu_data_len);
3409 			cds_pkt_return_packet(rx_pkt);
3410 			return -EINVAL;
3411 		}
3412 		rx_pkt->pkt_meta.mpdu_data_ptr =
3413 		rx_pkt->pkt_meta.mpdu_hdr_ptr +
3414 		rx_pkt->pkt_meta.mpdu_hdr_len;
3415 		wma_debug("BSSID: "QDF_MAC_ADDR_FMT" tsf_delta: %u",
3416 			  QDF_MAC_ADDR_REF(wh->i_addr3),
3417 			  rx_pkt->pkt_meta.tsf_delta);
3418 	} else {
3419 		if (QDF_IS_ADDR_BROADCAST(wh->i_addr1) ||
3420 		    IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3421 			if (0 != wma_process_bip(wma_handle, iface, wh, wbuf)) {
3422 				cds_pkt_return_packet(rx_pkt);
3423 				return -EINVAL;
3424 			}
3425 		} else {
3426 			wma_err_rl("Rx unprotected unicast mgmt frame");
3427 			rx_pkt->pkt_meta.dpuFeedback =
3428 				DPU_FEEDBACK_UNPROTECTED_ERROR;
3429 		}
3430 	}
3431 	return 0;
3432 }
3433 
3434 /**
3435  * wma_get_peer_pmf_status() - Get the PMF capability of peer
3436  * @wma: wma handle
3437  * @peer_mac: peer mac addr
3438  *
3439  * Return: True if PMF is enabled, false otherwise.
3440  */
3441 static bool
3442 wma_get_peer_pmf_status(tp_wma_handle wma, uint8_t *peer_mac)
3443 {
3444 	struct wlan_objmgr_peer *peer;
3445 	bool is_pmf_enabled;
3446 
3447 	if (!peer_mac) {
3448 		wma_err("peer_mac is NULL");
3449 		return false;
3450 	}
3451 
3452 	peer = wlan_objmgr_get_peer(wma->psoc,
3453 				    wlan_objmgr_pdev_get_pdev_id(wma->pdev),
3454 				    peer_mac, WLAN_LEGACY_WMA_ID);
3455 	if (!peer) {
3456 		wma_debug("Peer of peer_mac " QDF_MAC_ADDR_FMT " not found",
3457 			  QDF_MAC_ADDR_REF(peer_mac));
3458 		return false;
3459 	}
3460 	is_pmf_enabled = mlme_get_peer_pmf_status(peer);
3461 	wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
3462 	wma_nofl_debug("get is_pmf_enabled %d for "QDF_MAC_ADDR_FMT,
3463 		       is_pmf_enabled, QDF_MAC_ADDR_REF(peer_mac));
3464 
3465 	return is_pmf_enabled;
3466 }
3467 
3468 /**
3469  * wma_check_and_process_rmf_frame() - Process the frame if it is of rmf type
3470  * @wma_handle: wma handle
3471  * @vdev_id: vdev id
3472  * @wh: double pointer to 802.11 frame header which will be updated if the
3473  *	frame is of rmf type.
3474  * @rx_pkt: rx packet
3475  * @buf: Buffer
3476  *
3477  * Process the frame as rmf frame only if both DUT and peer are of PMF capable
3478  *
3479  * Return: 0 for success or error code
3480  */
3481 static int
3482 wma_check_and_process_rmf_frame(tp_wma_handle wma_handle,
3483 				uint8_t vdev_id,
3484 				struct ieee80211_frame **wh,
3485 				cds_pkt_t *rx_pkt,
3486 				qdf_nbuf_t buf)
3487 {
3488 	int status;
3489 	struct wma_txrx_node *iface;
3490 	struct ieee80211_frame *hdr = *wh;
3491 
3492 	iface = &(wma_handle->interfaces[vdev_id]);
3493 	if (iface->type != WMI_VDEV_TYPE_NDI && !iface->rmfEnabled)
3494 		return 0;
3495 
3496 	if (qdf_is_macaddr_group((struct qdf_mac_addr *)(hdr->i_addr1)) ||
3497 	    qdf_is_macaddr_broadcast((struct qdf_mac_addr *)(hdr->i_addr1)) ||
3498 	    wma_get_peer_pmf_status(wma_handle, hdr->i_addr2) ||
3499 	    (iface->type == WMI_VDEV_TYPE_NDI &&
3500 	    (hdr->i_fc[1] & IEEE80211_FC1_WEP))) {
3501 		status = wma_process_rmf_frame(wma_handle, iface, hdr,
3502 					       rx_pkt, buf);
3503 		if (status)
3504 			return status;
3505 		/*
3506 		 * CCMP header might have been pulled off reinitialize the
3507 		 * start pointer of mac header
3508 		 */
3509 		*wh = (struct ieee80211_frame *)qdf_nbuf_data(buf);
3510 	}
3511 
3512 	return 0;
3513 }
3514 
3515 /**
3516  * wma_is_pkt_drop_candidate() - check if the mgmt frame should be dropped
3517  * @wma_handle: wma handle
3518  * @peer_addr: peer MAC address
3519  * @bssid: BSSID Address
3520  * @subtype: Management frame subtype
3521  *
3522  * This function is used to decide if a particular management frame should be
3523  * dropped to prevent DOS attack. Timestamp is used to decide the DOS attack.
3524  *
3525  * Return: true if the packet should be dropped and false otherwise
3526  */
3527 static bool wma_is_pkt_drop_candidate(tp_wma_handle wma_handle,
3528 				      uint8_t *peer_addr, uint8_t *bssid,
3529 				      uint8_t subtype)
3530 {
3531 	bool should_drop = false;
3532 	uint8_t nan_addr[] = {0x50, 0x6F, 0x9A, 0x01, 0x00, 0x00};
3533 
3534 	/* Drop the beacons from NAN device */
3535 	if ((subtype == MGMT_SUBTYPE_BEACON) &&
3536 		(!qdf_mem_cmp(nan_addr, bssid, NAN_CLUSTER_ID_BYTES))) {
3537 			should_drop = true;
3538 			goto end;
3539 	}
3540 end:
3541 	return should_drop;
3542 }
3543 
3544 #define RATE_LIMIT 16
3545 
3546 int wma_form_rx_packet(qdf_nbuf_t buf,
3547 			struct mgmt_rx_event_params *mgmt_rx_params,
3548 			cds_pkt_t *rx_pkt)
3549 {
3550 	uint8_t vdev_id = WMA_INVALID_VDEV_ID;
3551 	struct ieee80211_frame *wh;
3552 	uint8_t mgt_type, mgt_subtype;
3553 	int status;
3554 	tp_wma_handle wma_handle = (tp_wma_handle)
3555 				cds_get_context(QDF_MODULE_ID_WMA);
3556 	static uint8_t limit_prints_invalid_len = RATE_LIMIT - 1;
3557 	static uint8_t limit_prints_load_unload = RATE_LIMIT - 1;
3558 	static uint8_t limit_prints_recovery = RATE_LIMIT - 1;
3559 
3560 	if (!wma_handle) {
3561 		qdf_nbuf_free(buf);
3562 		qdf_mem_free(rx_pkt);
3563 		return -EINVAL;
3564 	}
3565 
3566 	if (!mgmt_rx_params) {
3567 		limit_prints_invalid_len++;
3568 		if (limit_prints_invalid_len == RATE_LIMIT) {
3569 			wma_debug("mgmt rx params is NULL");
3570 			limit_prints_invalid_len = 0;
3571 		}
3572 		qdf_nbuf_free(buf);
3573 		qdf_mem_free(rx_pkt);
3574 		return -EINVAL;
3575 	}
3576 
3577 	if (cds_is_load_or_unload_in_progress()) {
3578 		limit_prints_load_unload++;
3579 		if (limit_prints_load_unload == RATE_LIMIT) {
3580 			wma_debug("Load/Unload in progress");
3581 			limit_prints_load_unload = 0;
3582 		}
3583 		qdf_nbuf_free(buf);
3584 		qdf_mem_free(rx_pkt);
3585 		return -EINVAL;
3586 	}
3587 
3588 	if (cds_is_driver_recovering()) {
3589 		limit_prints_recovery++;
3590 		if (limit_prints_recovery == RATE_LIMIT) {
3591 			wma_debug("Recovery in progress");
3592 			limit_prints_recovery = 0;
3593 		}
3594 		qdf_nbuf_free(buf);
3595 		qdf_mem_free(rx_pkt);
3596 		return -EINVAL;
3597 	}
3598 
3599 	if (cds_is_driver_in_bad_state()) {
3600 		limit_prints_recovery++;
3601 		if (limit_prints_recovery == RATE_LIMIT) {
3602 			wma_debug("Driver in bad state");
3603 			limit_prints_recovery = 0;
3604 		}
3605 		qdf_nbuf_free(buf);
3606 		qdf_mem_free(rx_pkt);
3607 		return -EINVAL;
3608 	}
3609 
3610 	/*
3611 	 * Fill in meta information needed by pe/lim
3612 	 * TODO: Try to maintain rx metainfo as part of skb->data.
3613 	 */
3614 	rx_pkt->pkt_meta.frequency = mgmt_rx_params->chan_freq;
3615 	rx_pkt->pkt_meta.scan_src = mgmt_rx_params->flags;
3616 
3617 	/*
3618 	 * Get the rssi value from the current snr value
3619 	 * using standard noise floor of -96.
3620 	 */
3621 	rx_pkt->pkt_meta.rssi = mgmt_rx_params->snr +
3622 				WMA_NOISE_FLOOR_DBM_DEFAULT;
3623 	rx_pkt->pkt_meta.snr = mgmt_rx_params->snr;
3624 
3625 	/* If absolute rssi is available from firmware, use it */
3626 	if (mgmt_rx_params->rssi != 0)
3627 		rx_pkt->pkt_meta.rssi_raw = mgmt_rx_params->rssi;
3628 	else
3629 		rx_pkt->pkt_meta.rssi_raw = rx_pkt->pkt_meta.rssi;
3630 
3631 
3632 	/*
3633 	 * FIXME: Assigning the local timestamp as hw timestamp is not
3634 	 * available. Need to see if pe/lim really uses this data.
3635 	 */
3636 	rx_pkt->pkt_meta.timestamp = (uint32_t) jiffies;
3637 	rx_pkt->pkt_meta.mpdu_hdr_len = sizeof(struct ieee80211_frame);
3638 	rx_pkt->pkt_meta.mpdu_len = mgmt_rx_params->buf_len;
3639 
3640 	/*
3641 	 * The buf_len should be at least 802.11 header len
3642 	 */
3643 	if (mgmt_rx_params->buf_len < rx_pkt->pkt_meta.mpdu_hdr_len) {
3644 		wma_err("MPDU Len %d lesser than header len %d",
3645 			 mgmt_rx_params->buf_len,
3646 			 rx_pkt->pkt_meta.mpdu_hdr_len);
3647 		qdf_nbuf_free(buf);
3648 		qdf_mem_free(rx_pkt);
3649 		return -EINVAL;
3650 	}
3651 
3652 	rx_pkt->pkt_meta.mpdu_data_len = mgmt_rx_params->buf_len -
3653 					 rx_pkt->pkt_meta.mpdu_hdr_len;
3654 
3655 	rx_pkt->pkt_meta.roamCandidateInd = 0;
3656 
3657 	wh = (struct ieee80211_frame *)qdf_nbuf_data(buf);
3658 
3659 	/*
3660 	 * If the mpdu_data_len is greater than Max (2k), drop the frame
3661 	 */
3662 	if (rx_pkt->pkt_meta.mpdu_data_len > MAX_MGMT_MPDU_LEN) {
3663 		wma_err("Data Len %d greater than max, dropping frame from "QDF_MAC_ADDR_FMT,
3664 			 rx_pkt->pkt_meta.mpdu_data_len,
3665 			 QDF_MAC_ADDR_REF(wh->i_addr3));
3666 		qdf_nbuf_free(buf);
3667 		qdf_mem_free(rx_pkt);
3668 		return -EINVAL;
3669 	}
3670 
3671 	rx_pkt->pkt_meta.mpdu_hdr_ptr = qdf_nbuf_data(buf);
3672 	rx_pkt->pkt_meta.mpdu_data_ptr = rx_pkt->pkt_meta.mpdu_hdr_ptr +
3673 					 rx_pkt->pkt_meta.mpdu_hdr_len;
3674 	rx_pkt->pkt_meta.tsf_delta = mgmt_rx_params->tsf_delta;
3675 	rx_pkt->pkt_buf = buf;
3676 	rx_pkt->pkt_meta.pkt_qdf_buf = buf;
3677 
3678 	/* If it is a beacon/probe response, save it for future use */
3679 	mgt_type = (wh)->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3680 	mgt_subtype = (wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3681 
3682 	if (mgt_type == IEEE80211_FC0_TYPE_MGT &&
3683 	    (mgt_subtype == MGMT_SUBTYPE_DISASSOC ||
3684 	     mgt_subtype == MGMT_SUBTYPE_DEAUTH ||
3685 	     mgt_subtype == MGMT_SUBTYPE_ACTION)) {
3686 		if (wma_find_vdev_id_by_bssid(wma_handle, wh->i_addr3,
3687 					      &vdev_id) == QDF_STATUS_SUCCESS) {
3688 			status = wma_check_and_process_rmf_frame(wma_handle,
3689 								 vdev_id,
3690 								 &wh,
3691 								 rx_pkt,
3692 								 buf);
3693 			if (status)
3694 				return status;
3695 		} else if (wma_find_vdev_id_by_addr(wma_handle, wh->i_addr1,
3696 					      &vdev_id) == QDF_STATUS_SUCCESS) {
3697 			status = wma_check_and_process_rmf_frame(wma_handle,
3698 								 vdev_id,
3699 								 &wh,
3700 								 rx_pkt,
3701 								 buf);
3702 			if (status)
3703 				return status;
3704 		}
3705 	}
3706 
3707 	rx_pkt->pkt_meta.session_id =
3708 		(vdev_id == WMA_INVALID_VDEV_ID ? 0 : vdev_id);
3709 
3710 	if (mgt_type == IEEE80211_FC0_TYPE_MGT &&
3711 	    (mgt_subtype == MGMT_SUBTYPE_BEACON ||
3712 	     mgt_subtype == MGMT_SUBTYPE_PROBE_RESP)) {
3713 		if (mgmt_rx_params->buf_len <=
3714 			(sizeof(struct ieee80211_frame) +
3715 			offsetof(struct wlan_bcn_frame, ie))) {
3716 			wma_debug("Dropping frame from "QDF_MAC_ADDR_FMT,
3717 				 QDF_MAC_ADDR_REF(wh->i_addr3));
3718 			cds_pkt_return_packet(rx_pkt);
3719 			return -EINVAL;
3720 		}
3721 	}
3722 
3723 	if (wma_is_pkt_drop_candidate(wma_handle, wh->i_addr2, wh->i_addr3,
3724 					mgt_subtype)) {
3725 		cds_pkt_return_packet(rx_pkt);
3726 		return -EINVAL;
3727 	}
3728 	wma_mgmt_pktdump_rx_handler(mgmt_rx_params, rx_pkt,
3729 				    wma_handle, mgt_type, mgt_subtype);
3730 
3731 	return 0;
3732 }
3733 
3734 /**
3735  * wma_mem_endianness_based_copy() - does memory copy from src to dst
3736  * @dst: destination address
3737  * @src: source address
3738  * @size: size to be copied
3739  *
3740  * This function copies the memory of size passed from source
3741  * address to destination address.
3742  *
3743  * Return: Nothing
3744  */
3745 #ifdef BIG_ENDIAN_HOST
3746 static void wma_mem_endianness_based_copy(
3747 			uint8_t *dst, uint8_t *src, uint32_t size)
3748 {
3749 	/*
3750 	 * For big endian host, copy engine byte_swap is enabled
3751 	 * But the rx mgmt frame buffer content is in network byte order
3752 	 * Need to byte swap the mgmt frame buffer content - so when
3753 	 * copy engine does byte_swap - host gets buffer content in the
3754 	 * correct byte order.
3755 	 */
3756 
3757 	uint32_t i;
3758 	uint32_t *destp, *srcp;
3759 
3760 	destp = (uint32_t *) dst;
3761 	srcp = (uint32_t *) src;
3762 	for (i = 0; i < (roundup(size, sizeof(uint32_t)) / 4); i++) {
3763 		*destp = cpu_to_le32(*srcp);
3764 		destp++;
3765 		srcp++;
3766 	}
3767 }
3768 #else
3769 static void wma_mem_endianness_based_copy(
3770 			uint8_t *dst, uint8_t *src, uint32_t size)
3771 {
3772 	qdf_mem_copy(dst, src, size);
3773 }
3774 #endif
3775 
3776 #define RESERVE_BYTES                   100
3777 /**
3778  * wma_mgmt_rx_process() - process management rx frame.
3779  * @handle: wma handle
3780  * @data: rx data
3781  * @data_len: data length
3782  *
3783  * Return: 0 for success or error code
3784  */
3785 static int wma_mgmt_rx_process(void *handle, uint8_t *data,
3786 				  uint32_t data_len)
3787 {
3788 	tp_wma_handle wma_handle = (tp_wma_handle) handle;
3789 	struct mgmt_rx_event_params *mgmt_rx_params;
3790 	struct wlan_objmgr_psoc *psoc;
3791 	uint8_t *bufp;
3792 	qdf_nbuf_t wbuf;
3793 	QDF_STATUS status;
3794 
3795 	if (wma_validate_handle(wma_handle))
3796 		return -EINVAL;
3797 
3798 	mgmt_rx_params = qdf_mem_malloc(sizeof(*mgmt_rx_params));
3799 	if (!mgmt_rx_params) {
3800 		return -ENOMEM;
3801 	}
3802 
3803 	if (wmi_extract_mgmt_rx_params(wma_handle->wmi_handle,
3804 			data, mgmt_rx_params, &bufp) != QDF_STATUS_SUCCESS) {
3805 		wma_err_rl("Extraction of mgmt rx params failed");
3806 		qdf_mem_free(mgmt_rx_params);
3807 		return -EINVAL;
3808 	}
3809 
3810 	if (mgmt_rx_params->buf_len > data_len ||
3811 	    !mgmt_rx_params->buf_len ||
3812 	    !bufp) {
3813 		wma_err_rl("Invalid data_len %u, buf_len %u bufp %pK",
3814 			   data_len, mgmt_rx_params->buf_len, bufp);
3815 		qdf_mem_free(mgmt_rx_params);
3816 		return -EINVAL;
3817 	}
3818 
3819 	if (!mgmt_rx_params->chan_freq) {
3820 		/*
3821 		 * It indicates that FW is legacy and is operating on
3822 		 * channel numbers and it also indicates that BAND_6G support
3823 		 * is not there as BAND_6G works only on frequencies and channel
3824 		 * numbers can be treated as unique.
3825 		 */
3826 		mgmt_rx_params->chan_freq = wlan_reg_legacy_chan_to_freq(
3827 					    wma_handle->pdev,
3828 					    mgmt_rx_params->channel);
3829 	}
3830 
3831 	mgmt_rx_params->pdev_id = 0;
3832 	mgmt_rx_params->rx_params = NULL;
3833 
3834 	/*
3835 	 * Allocate the memory for this rx packet, add extra 100 bytes for:-
3836 	 *
3837 	 * 1.  Filling the missing RSN capabilities by some APs, which fill the
3838 	 *     RSN IE length as extra 2 bytes but dont fill the IE data with
3839 	 *     capabilities, resulting in failure in unpack core due to length
3840 	 *     mismatch. Check sir_validate_and_rectify_ies for more info.
3841 	 *
3842 	 * 2.  In the API wma_process_rmf_frame(), the driver trims the CCMP
3843 	 *     header by overwriting the IEEE header to memory occupied by CCMP
3844 	 *     header, but an overflow is possible if the memory allocated to
3845 	 *     frame is less than the sizeof(struct ieee80211_frame) +CCMP
3846 	 *     HEADER len, so allocating 100 bytes would solve this issue too.
3847 	 *
3848 	 * 3.  CCMP header is pointing to orig_hdr +
3849 	 *     sizeof(struct ieee80211_frame) which could also result in OOB
3850 	 *     access, if the data len is less than
3851 	 *     sizeof(struct ieee80211_frame), allocating extra bytes would
3852 	 *     result in solving this issue too.
3853 	 */
3854 	wbuf = qdf_nbuf_alloc(NULL, roundup(mgmt_rx_params->buf_len +
3855 							RESERVE_BYTES,
3856 							4), 0, 4, false);
3857 	if (!wbuf) {
3858 		qdf_mem_free(mgmt_rx_params);
3859 		return -ENOMEM;
3860 	}
3861 
3862 	qdf_nbuf_put_tail(wbuf, mgmt_rx_params->buf_len);
3863 	qdf_nbuf_set_protocol(wbuf, ETH_P_CONTROL);
3864 
3865 	qdf_mem_zero(((uint8_t *)qdf_nbuf_data(wbuf) + mgmt_rx_params->buf_len),
3866 		     (roundup(mgmt_rx_params->buf_len + RESERVE_BYTES, 4) -
3867 		     mgmt_rx_params->buf_len));
3868 
3869 	wma_mem_endianness_based_copy(qdf_nbuf_data(wbuf),
3870 			bufp, mgmt_rx_params->buf_len);
3871 
3872 	psoc = (struct wlan_objmgr_psoc *)
3873 				wma_handle->psoc;
3874 	if (!psoc) {
3875 		wma_err("psoc ctx is NULL");
3876 		qdf_nbuf_free(wbuf);
3877 		qdf_mem_free(mgmt_rx_params);
3878 		return -EINVAL;
3879 	}
3880 
3881 	status = mgmt_txrx_rx_handler(psoc, wbuf, mgmt_rx_params);
3882 	if (status != QDF_STATUS_SUCCESS) {
3883 		qdf_mem_free(mgmt_rx_params);
3884 		return -EINVAL;
3885 	}
3886 
3887 	qdf_mem_free(mgmt_rx_params);
3888 	return 0;
3889 }
3890 
3891 /**
3892  * wma_de_register_mgmt_frm_client() - deregister management frame
3893  *
3894  * This function deregisters the event handler registered for
3895  * WMI_MGMT_RX_EVENTID.
3896  *
3897  * Return: QDF status
3898  */
3899 QDF_STATUS wma_de_register_mgmt_frm_client(void)
3900 {
3901 	tp_wma_handle wma_handle = (tp_wma_handle)
3902 				cds_get_context(QDF_MODULE_ID_WMA);
3903 
3904 	if (!wma_handle)
3905 		return QDF_STATUS_E_NULL_VALUE;
3906 
3907 #ifdef QCA_WIFI_FTM
3908 	if (cds_get_conparam() == QDF_GLOBAL_FTM_MODE)
3909 		return QDF_STATUS_SUCCESS;
3910 #endif
3911 
3912 	if (wmi_unified_unregister_event_handler(wma_handle->wmi_handle,
3913 						 wmi_mgmt_rx_event_id) != 0) {
3914 		wma_err("Failed to Unregister rx mgmt handler with wmi");
3915 		return QDF_STATUS_E_FAILURE;
3916 	}
3917 	return QDF_STATUS_SUCCESS;
3918 }
3919 
3920 #ifdef WLAN_FEATURE_ROAM_OFFLOAD
3921 /**
3922  * wma_register_roaming_callbacks() - Register roaming callbacks
3923  * @csr_roam_auth_event_handle_cb: CSR callback routine pointer
3924  * @pe_roam_synch_cb: PE roam synch callback routine pointer
3925  *
3926  * Register the SME and PE callback routines with WMA for
3927  * handling roaming
3928  *
3929  * Return: Success or Failure Status
3930  */
3931 QDF_STATUS wma_register_roaming_callbacks(
3932 	QDF_STATUS (*csr_roam_auth_event_handle_cb)(struct mac_context *mac,
3933 						    uint8_t vdev_id,
3934 						    struct qdf_mac_addr bssid,
3935 						    uint32_t akm),
3936 	pe_roam_synch_fn_t pe_roam_synch_cb,
3937 	QDF_STATUS (*pe_disconnect_cb) (struct mac_context *mac,
3938 					uint8_t vdev_id,
3939 					uint8_t *deauth_disassoc_frame,
3940 					uint16_t deauth_disassoc_frame_len,
3941 					uint16_t reason_code),
3942 	set_ies_fn_t pe_roam_set_ie_cb)
3943 {
3944 
3945 	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
3946 
3947 	if (!wma)
3948 		return QDF_STATUS_E_FAILURE;
3949 
3950 	wma->csr_roam_auth_event_handle_cb = csr_roam_auth_event_handle_cb;
3951 	wma->pe_roam_synch_cb = pe_roam_synch_cb;
3952 	wma->pe_disconnect_cb = pe_disconnect_cb;
3953 	wma->pe_roam_set_ie_cb = pe_roam_set_ie_cb;
3954 	wma_debug("Registered roam synch callbacks with WMA successfully");
3955 
3956 	return QDF_STATUS_SUCCESS;
3957 }
3958 #endif
3959 
3960 /**
3961  * wma_register_mgmt_frm_client() - register management frame callback
3962  *
3963  * This function registers event handler for WMI_MGMT_RX_EVENTID.
3964  *
3965  * Return: QDF status
3966  */
3967 QDF_STATUS wma_register_mgmt_frm_client(void)
3968 {
3969 	tp_wma_handle wma_handle = (tp_wma_handle)
3970 				cds_get_context(QDF_MODULE_ID_WMA);
3971 
3972 	if (!wma_handle)
3973 		return QDF_STATUS_E_NULL_VALUE;
3974 
3975 	if (wmi_unified_register_event_handler(wma_handle->wmi_handle,
3976 					       wmi_mgmt_rx_event_id,
3977 					       wma_mgmt_rx_process,
3978 					       WMA_RX_WORK_CTX) != 0) {
3979 		wma_err("Failed to register rx mgmt handler with wmi");
3980 		return QDF_STATUS_E_FAILURE;
3981 	}
3982 
3983 	return QDF_STATUS_SUCCESS;
3984 }
3985 
3986 /**
3987  * wma_register_packetdump_callback() - stores tx and rx mgmt packet dump
3988  *   callback handler
3989  * @tx_cb: tx mgmt packetdump cb
3990  * @rx_cb: rx mgmt packetdump cb
3991  *
3992  * This function is used to store tx and rx mgmt. packet dump callback
3993  *
3994  * Return: None
3995  *
3996  */
3997 void wma_register_packetdump_callback(
3998 	ol_txrx_pktdump_cb tx_cb,
3999 	ol_txrx_pktdump_cb rx_cb)
4000 {
4001 	tp_wma_handle wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
4002 
4003 	if (!wma_handle)
4004 		return;
4005 
4006 	wma_handle->wma_mgmt_tx_packetdump_cb = tx_cb;
4007 	wma_handle->wma_mgmt_rx_packetdump_cb = rx_cb;
4008 }
4009 
4010 /**
4011  * wma_deregister_packetdump_callback() - removes tx and rx mgmt packet dump
4012  *   callback handler
4013  *
4014  * This function is used to remove tx and rx mgmt. packet dump callback
4015  *
4016  * Return: None
4017  *
4018  */
4019 void wma_deregister_packetdump_callback(void)
4020 {
4021 	tp_wma_handle wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
4022 
4023 	if (!wma_handle)
4024 		return;
4025 
4026 	wma_handle->wma_mgmt_tx_packetdump_cb = NULL;
4027 	wma_handle->wma_mgmt_rx_packetdump_cb = NULL;
4028 }
4029 
4030 QDF_STATUS wma_mgmt_unified_cmd_send(struct wlan_objmgr_vdev *vdev,
4031 				qdf_nbuf_t buf, uint32_t desc_id,
4032 				void *mgmt_tx_params)
4033 {
4034 	tp_wma_handle wma_handle;
4035 	int ret;
4036 	QDF_STATUS status = QDF_STATUS_E_INVAL;
4037 	struct wmi_mgmt_params *mgmt_params =
4038 			(struct wmi_mgmt_params *)mgmt_tx_params;
4039 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
4040 
4041 	if (!mgmt_params) {
4042 		wma_err("mgmt_params ptr passed is NULL");
4043 		return QDF_STATUS_E_INVAL;
4044 	}
4045 	mgmt_params->desc_id = desc_id;
4046 
4047 	if (!vdev) {
4048 		wma_err("vdev ptr passed is NULL");
4049 		return QDF_STATUS_E_INVAL;
4050 	}
4051 
4052 	wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
4053 	if (!wma_handle)
4054 		return QDF_STATUS_E_INVAL;
4055 
4056 	if (wmi_service_enabled(wma_handle->wmi_handle,
4057 				   wmi_service_mgmt_tx_wmi)) {
4058 		status = wmi_mgmt_unified_cmd_send(wma_handle->wmi_handle,
4059 						   mgmt_params);
4060 	} else {
4061 		QDF_NBUF_CB_MGMT_TXRX_DESC_ID(buf)
4062 						= mgmt_params->desc_id;
4063 
4064 		ret = cdp_mgmt_send_ext(soc, mgmt_params->vdev_id, buf,
4065 					mgmt_params->tx_type,
4066 					mgmt_params->use_6mbps,
4067 					mgmt_params->chanfreq);
4068 		status = qdf_status_from_os_return(ret);
4069 	}
4070 
4071 	if (status != QDF_STATUS_SUCCESS) {
4072 		wma_err("mgmt tx failed");
4073 		return status;
4074 	}
4075 
4076 	return QDF_STATUS_SUCCESS;
4077 }
4078 
4079 #ifndef CONFIG_HL_SUPPORT
4080 void wma_mgmt_nbuf_unmap_cb(struct wlan_objmgr_pdev *pdev,
4081 			    qdf_nbuf_t buf)
4082 {
4083 	struct wlan_objmgr_psoc *psoc;
4084 	qdf_device_t dev;
4085 
4086 	if (!buf)
4087 		return;
4088 
4089 	psoc = wlan_pdev_get_psoc(pdev);
4090 	if (!psoc) {
4091 		wma_err("Psoc handle NULL");
4092 		return;
4093 	}
4094 
4095 	dev = wlan_psoc_get_qdf_dev(psoc);
4096 	qdf_nbuf_unmap_single(dev, buf, QDF_DMA_TO_DEVICE);
4097 }
4098 
4099 QDF_STATUS wma_mgmt_frame_fill_peer_cb(struct wlan_objmgr_peer *peer,
4100 				       qdf_nbuf_t buf)
4101 {
4102 	struct wlan_objmgr_psoc *psoc;
4103 	struct wlan_objmgr_pdev *pdev;
4104 
4105 	psoc = wlan_peer_get_psoc(peer);
4106 	if (!psoc) {
4107 		wma_err("Psoc handle NULL");
4108 		return QDF_STATUS_E_INVAL;
4109 	}
4110 
4111 	pdev = wlan_objmgr_get_pdev_by_id((struct wlan_objmgr_psoc *)psoc,
4112 					  wlan_peer_get_pdev_id(peer),
4113 					  WLAN_LEGACY_WMA_ID);
4114 	if (!pdev) {
4115 		wma_err("Pdev handle NULL");
4116 		return QDF_STATUS_E_INVAL;
4117 	}
4118 	wma_mgmt_nbuf_unmap_cb(pdev, buf);
4119 	wlan_objmgr_pdev_release_ref(pdev, WLAN_LEGACY_WMA_ID);
4120 
4121 	return QDF_STATUS_SUCCESS;
4122 }
4123 
4124 QDF_STATUS
4125 wma_update_edca_pifs_param(WMA_HANDLE handle,
4126 			   struct edca_pifs_vparam *edca_pifs_param)
4127 {
4128 	tp_wma_handle wma_handle = (tp_wma_handle) handle;
4129 	QDF_STATUS status;
4130 
4131 	status = wmi_unified_update_edca_pifs_param(wma_handle->wmi_handle,
4132 						    edca_pifs_param);
4133 
4134 	if (QDF_IS_STATUS_ERROR(status))
4135 		wma_err("Failed to set EDCA/PIFS Parameters");
4136 
4137 	return status;
4138 }
4139 #endif
4140 
4141 QDF_STATUS
4142 wma_update_bss_peer_phy_mode(struct wlan_channel *des_chan,
4143 			     struct wlan_objmgr_vdev *vdev)
4144 {
4145 	struct wlan_objmgr_peer *bss_peer;
4146 	enum wlan_phymode old_peer_phymode, new_phymode;
4147 	tSirNwType nw_type;
4148 	struct vdev_mlme_obj *mlme_obj;
4149 
4150 	bss_peer = wlan_objmgr_vdev_try_get_bsspeer(vdev, WLAN_LEGACY_WMA_ID);
4151 	if (!bss_peer) {
4152 		wma_err("not able to find bss peer for vdev %d",
4153 			wlan_vdev_get_id(vdev));
4154 		return QDF_STATUS_E_INVAL;
4155 	}
4156 
4157 	old_peer_phymode = wlan_peer_get_phymode(bss_peer);
4158 
4159 	if (WLAN_REG_IS_24GHZ_CH_FREQ(des_chan->ch_freq)) {
4160 		if (des_chan->ch_phymode == WLAN_PHYMODE_11B ||
4161 		    old_peer_phymode == WLAN_PHYMODE_11B)
4162 			nw_type = eSIR_11B_NW_TYPE;
4163 		else
4164 			nw_type = eSIR_11G_NW_TYPE;
4165 	} else {
4166 		nw_type = eSIR_11A_NW_TYPE;
4167 	}
4168 
4169 	new_phymode = wma_peer_phymode(nw_type, STA_ENTRY_PEER,
4170 				       IS_WLAN_PHYMODE_HT(old_peer_phymode),
4171 				       des_chan->ch_width,
4172 				       IS_WLAN_PHYMODE_VHT(old_peer_phymode),
4173 				       IS_WLAN_PHYMODE_HE(old_peer_phymode),
4174 				       wma_is_phymode_eht(old_peer_phymode));
4175 
4176 	if (new_phymode == old_peer_phymode) {
4177 		wma_debug("Ignore update, old %d and new %d phymode are same, vdev_id : %d",
4178 			  old_peer_phymode, new_phymode,
4179 			  wlan_vdev_get_id(vdev));
4180 		wlan_objmgr_peer_release_ref(bss_peer, WLAN_LEGACY_WMA_ID);
4181 		return QDF_STATUS_SUCCESS;
4182 	}
4183 
4184 	mlme_obj = wlan_vdev_mlme_get_cmpt_obj(vdev);
4185 	if (!mlme_obj) {
4186 		wma_err("not able to get mlme_obj");
4187 		wlan_objmgr_peer_release_ref(bss_peer, WLAN_LEGACY_WMA_ID);
4188 		return QDF_STATUS_E_INVAL;
4189 	}
4190 
4191 	wlan_peer_obj_lock(bss_peer);
4192 	wlan_peer_set_phymode(bss_peer, new_phymode);
4193 	wlan_peer_obj_unlock(bss_peer);
4194 
4195 	wlan_objmgr_peer_release_ref(bss_peer, WLAN_LEGACY_WMA_ID);
4196 
4197 	mlme_obj->mgmt.generic.phy_mode = wmi_host_to_fw_phymode(new_phymode);
4198 	des_chan->ch_phymode = new_phymode;
4199 
4200 	return QDF_STATUS_SUCCESS;
4201 }
4202 
4203 QDF_STATUS
4204 cm_send_ies_for_roam_invoke(struct wlan_objmgr_vdev *vdev, uint16_t dot11_mode)
4205 {
4206 	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
4207 	enum QDF_OPMODE op_mode;
4208 	QDF_STATUS status;
4209 	uint8_t vdev_id;
4210 
4211 	if (!wma)
4212 		return QDF_STATUS_E_FAILURE;
4213 
4214 	vdev_id = wlan_vdev_get_id(vdev);
4215 	op_mode = wlan_vdev_mlme_get_opmode(vdev);
4216 
4217 	status = wma->pe_roam_set_ie_cb(wma->mac_context, vdev_id, dot11_mode,
4218 					op_mode);
4219 	return status;
4220 }
4221