xref: /wlan-dirver/qcacld-3.0/core/wma/src/wma_mgmt.c (revision bc1d0241e54731b3c898668b1f9d07f070bc456c)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  *  DOC:  wma_mgmt.c
22  *
23  *  This file contains STA/SAP and protocol related functions.
24  */
25 
26 /* Header files */
27 
28 #include "wma.h"
29 #include "wma_api.h"
30 #include "cds_api.h"
31 #include "wmi_unified_api.h"
32 #include "wlan_qct_sys.h"
33 #include "wni_api.h"
34 #include "ani_global.h"
35 #include "wmi_unified.h"
36 #include "wni_cfg.h"
37 
38 #include "qdf_nbuf.h"
39 #include "qdf_types.h"
40 #include "qdf_mem.h"
41 
42 #include "wma_types.h"
43 #include "lim_api.h"
44 #include "lim_session_utils.h"
45 
46 #include "cds_utils.h"
47 #include "wlan_dlm_api.h"
48 #if defined(CONNECTIVITY_PKTLOG) || !defined(REMOVE_PKT_LOG)
49 #include "pktlog_ac.h"
50 #else
51 #include "pktlog_ac_fmt.h"
52 #endif /* REMOVE_PKT_LOG */
53 
54 #include "dbglog_host.h"
55 #include "csr_api.h"
56 #include "ol_fw.h"
57 #include "wma_internal.h"
58 #include "wlan_policy_mgr_api.h"
59 #include "cdp_txrx_flow_ctrl_legacy.h"
60 #include <cdp_txrx_peer_ops.h>
61 #include <cdp_txrx_pmf.h>
62 #include <cdp_txrx_cfg.h>
63 #include <cdp_txrx_cmn.h>
64 #include <cdp_txrx_misc.h>
65 #include <cdp_txrx_misc.h>
66 #include "wlan_mgmt_txrx_tgt_api.h"
67 #include "wlan_objmgr_psoc_obj.h"
68 #include "wlan_objmgr_pdev_obj.h"
69 #include "wlan_objmgr_vdev_obj.h"
70 #include "wlan_lmac_if_api.h"
71 #include <cdp_txrx_handle.h>
72 #include "wma_he.h"
73 #include "wma_eht.h"
74 #include <qdf_crypto.h>
75 #include "wma_twt.h"
76 #include "wlan_p2p_cfg_api.h"
77 #include "cfg_ucfg_api.h"
78 #include "cfg_mlme_sta.h"
79 #include "wlan_mlme_api.h"
80 #include "wmi_unified_bcn_api.h"
81 #include <wlan_crypto_global_api.h>
82 #include <wlan_mlme_main.h>
83 #include <../../core/src/vdev_mgr_ops.h>
84 #include "wlan_pkt_capture_ucfg_api.h"
85 
86 #if defined(CONNECTIVITY_PKTLOG) || !defined(REMOVE_PKT_LOG)
87 #include <wlan_logging_sock_svc.h>
88 #endif
89 #include "wlan_cm_roam_api.h"
90 #include "wlan_cm_api.h"
91 #include "wlan_mlo_link_force.h"
92 #include <target_if_spatial_reuse.h>
93 
94 /* Max debug string size for WMM in bytes */
95 #define WMA_WMM_DEBUG_STRING_SIZE    512
96 
97 /**
98  * wma_send_bcn_buf_ll() - prepare and send beacon buffer to fw for LL
99  * @wma: wma handle
100  * @vdev_id: vdev id
101  * @param_buf: SWBA parameters
102  *
103  * Return: none
104  */
105 #ifdef WLAN_WMI_BCN
106 static void wma_send_bcn_buf_ll(tp_wma_handle wma,
107 				uint8_t vdev_id,
108 				WMI_HOST_SWBA_EVENTID_param_tlvs *param_buf)
109 {
110 	struct ieee80211_frame *wh;
111 	struct beacon_info *bcn;
112 	wmi_tim_info *tim_info = param_buf->tim_info;
113 	uint8_t *bcn_payload;
114 	QDF_STATUS ret;
115 	struct beacon_tim_ie *tim_ie;
116 	wmi_p2p_noa_info *p2p_noa_info = param_buf->p2p_noa_info;
117 	struct p2p_sub_element_noa noa_ie;
118 	struct wmi_bcn_send_from_host params;
119 	uint8_t i;
120 
121 	bcn = wma->interfaces[vdev_id].beacon;
122 	if (!bcn || !bcn->buf) {
123 		wma_err("Invalid beacon buffer");
124 		return;
125 	}
126 
127 	if (!param_buf->tim_info || !param_buf->p2p_noa_info) {
128 		wma_err("Invalid tim info or p2p noa info");
129 		return;
130 	}
131 
132 	if (WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET(p2p_noa_info) >
133 			WMI_P2P_MAX_NOA_DESCRIPTORS) {
134 		wma_err("Too many descriptors %d",
135 			WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET(p2p_noa_info));
136 		return;
137 	}
138 
139 	qdf_spin_lock_bh(&bcn->lock);
140 
141 	bcn_payload = qdf_nbuf_data(bcn->buf);
142 
143 	tim_ie = (struct beacon_tim_ie *)(&bcn_payload[bcn->tim_ie_offset]);
144 
145 	if (tim_info->tim_changed) {
146 		if (tim_info->tim_num_ps_pending)
147 			qdf_mem_copy(&tim_ie->tim_bitmap, tim_info->tim_bitmap,
148 				     WMA_TIM_SUPPORTED_PVB_LENGTH);
149 		else
150 			qdf_mem_zero(&tim_ie->tim_bitmap,
151 				     WMA_TIM_SUPPORTED_PVB_LENGTH);
152 		/*
153 		 * Currently we support fixed number of
154 		 * peers as limited by HAL_NUM_STA.
155 		 * tim offset is always 0
156 		 */
157 		tim_ie->tim_bitctl = 0;
158 	}
159 
160 	/* Update DTIM Count */
161 	if (tim_ie->dtim_count == 0)
162 		tim_ie->dtim_count = tim_ie->dtim_period - 1;
163 	else
164 		tim_ie->dtim_count--;
165 
166 	/*
167 	 * DTIM count needs to be backedup so that
168 	 * when umac updates the beacon template
169 	 * current dtim count can be updated properly
170 	 */
171 	bcn->dtim_count = tim_ie->dtim_count;
172 
173 	/* update state for buffered multicast frames on DTIM */
174 	if (tim_info->tim_mcast && (tim_ie->dtim_count == 0 ||
175 				    tim_ie->dtim_period == 1))
176 		tim_ie->tim_bitctl |= 1;
177 	else
178 		tim_ie->tim_bitctl &= ~1;
179 
180 	/* To avoid sw generated frame sequence the same as H/W generated frame,
181 	 * the value lower than min_sw_seq is reserved for HW generated frame
182 	 */
183 	if ((bcn->seq_no & IEEE80211_SEQ_MASK) < MIN_SW_SEQ)
184 		bcn->seq_no = MIN_SW_SEQ;
185 
186 	wh = (struct ieee80211_frame *)bcn_payload;
187 	*(uint16_t *) &wh->i_seq[0] = htole16(bcn->seq_no
188 					      << IEEE80211_SEQ_SEQ_SHIFT);
189 	bcn->seq_no++;
190 
191 	if (WMI_UNIFIED_NOA_ATTR_IS_MODIFIED(p2p_noa_info)) {
192 		qdf_mem_zero(&noa_ie, sizeof(noa_ie));
193 
194 		noa_ie.index =
195 			(uint8_t) WMI_UNIFIED_NOA_ATTR_INDEX_GET(p2p_noa_info);
196 		noa_ie.oppPS =
197 			(uint8_t) WMI_UNIFIED_NOA_ATTR_OPP_PS_GET(p2p_noa_info);
198 		noa_ie.ctwindow =
199 			(uint8_t) WMI_UNIFIED_NOA_ATTR_CTWIN_GET(p2p_noa_info);
200 		noa_ie.num_descriptors = (uint8_t)
201 				WMI_UNIFIED_NOA_ATTR_NUM_DESC_GET(p2p_noa_info);
202 		wma_debug("index %u, oppPs %u, ctwindow %u, num_descriptors = %u",
203 			 noa_ie.index,
204 			 noa_ie.oppPS, noa_ie.ctwindow, noa_ie.num_descriptors);
205 		for (i = 0; i < noa_ie.num_descriptors; i++) {
206 			noa_ie.noa_descriptors[i].type_count =
207 				(uint8_t) p2p_noa_info->noa_descriptors[i].
208 				type_count;
209 			noa_ie.noa_descriptors[i].duration =
210 				p2p_noa_info->noa_descriptors[i].duration;
211 			noa_ie.noa_descriptors[i].interval =
212 				p2p_noa_info->noa_descriptors[i].interval;
213 			noa_ie.noa_descriptors[i].start_time =
214 				p2p_noa_info->noa_descriptors[i].start_time;
215 			wma_debug("NoA descriptor[%d] type_count %u, duration %u, interval %u, start_time = %u",
216 				 i,
217 				 noa_ie.noa_descriptors[i].type_count,
218 				 noa_ie.noa_descriptors[i].duration,
219 				 noa_ie.noa_descriptors[i].interval,
220 				 noa_ie.noa_descriptors[i].start_time);
221 		}
222 		wma_update_noa(bcn, &noa_ie);
223 
224 		/* Send a msg to LIM to update the NoA IE in probe response
225 		 * frames transmitted by the host
226 		 */
227 		wma_update_probe_resp_noa(wma, &noa_ie);
228 	}
229 
230 	if (bcn->dma_mapped) {
231 		qdf_nbuf_unmap_single(wma->qdf_dev, bcn->buf, QDF_DMA_TO_DEVICE);
232 		bcn->dma_mapped = 0;
233 	}
234 	ret = qdf_nbuf_map_single(wma->qdf_dev, bcn->buf, QDF_DMA_TO_DEVICE);
235 	if (ret != QDF_STATUS_SUCCESS) {
236 		wma_err("failed map beacon buf to DMA region");
237 		qdf_spin_unlock_bh(&bcn->lock);
238 		return;
239 	}
240 
241 	bcn->dma_mapped = 1;
242 	params.vdev_id = vdev_id;
243 	params.data_len = bcn->len;
244 	params.frame_ctrl = *((A_UINT16 *) wh->i_fc);
245 	params.frag_ptr = qdf_nbuf_get_frag_paddr(bcn->buf, 0);
246 	params.dtim_flag = 0;
247 	/* notify Firmware of DTM and mcast/bcast traffic */
248 	if (tim_ie->dtim_count == 0) {
249 		params.dtim_flag |= WMI_BCN_SEND_DTIM_ZERO;
250 		/* deliver mcast/bcast traffic in next DTIM beacon */
251 		if (tim_ie->tim_bitctl & 0x01)
252 			params.dtim_flag |= WMI_BCN_SEND_DTIM_BITCTL_SET;
253 	}
254 
255 	wmi_unified_bcn_buf_ll_cmd(wma->wmi_handle,
256 					&params);
257 
258 	qdf_spin_unlock_bh(&bcn->lock);
259 }
260 #else
261 static inline void
262 wma_send_bcn_buf_ll(tp_wma_handle wma,
263 		    uint8_t vdev_id,
264 		    WMI_HOST_SWBA_EVENTID_param_tlvs *param_buf)
265 {
266 }
267 #endif
268 /**
269  * wma_beacon_swba_handler() - swba event handler
270  * @handle: wma handle
271  * @event: event data
272  * @len: data length
273  *
274  * SWBA event is alert event to Host requesting host to Queue a beacon
275  * for transmission use only in host beacon mode
276  *
277  * Return: 0 for success or error code
278  */
279 #ifdef WLAN_WMI_BCN
280 int wma_beacon_swba_handler(void *handle, uint8_t *event, uint32_t len)
281 {
282 	tp_wma_handle wma = (tp_wma_handle) handle;
283 	WMI_HOST_SWBA_EVENTID_param_tlvs *param_buf;
284 	wmi_host_swba_event_fixed_param *swba_event;
285 	uint32_t vdev_map;
286 	uint8_t vdev_id = 0;
287 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
288 
289 	param_buf = (WMI_HOST_SWBA_EVENTID_param_tlvs *) event;
290 	if (!param_buf) {
291 		wma_err("Invalid swba event buffer");
292 		return -EINVAL;
293 	}
294 	swba_event = param_buf->fixed_param;
295 	vdev_map = swba_event->vdev_map;
296 
297 	wma_debug("vdev_map = %d", vdev_map);
298 	for (; vdev_map && vdev_id < wma->max_bssid;
299 			vdev_id++, vdev_map >>= 1) {
300 		if (!(vdev_map & 0x1))
301 			continue;
302 		if (!cdp_cfg_is_high_latency(soc,
303 			(struct cdp_cfg *)cds_get_context(QDF_MODULE_ID_CFG)))
304 			wma_send_bcn_buf_ll(wma, vdev_id, param_buf);
305 		break;
306 	}
307 	return 0;
308 }
309 #else
310 static inline int
311 wma_beacon_swba_handler(void *handle, uint8_t *event, uint32_t len)
312 {
313 	return 0;
314 }
315 #endif
316 
317 #ifdef FEATURE_WLAN_DIAG_SUPPORT
318 void wma_sta_kickout_event(uint32_t kickout_reason, uint8_t vdev_id,
319 			   uint8_t *macaddr)
320 {
321 	WLAN_HOST_DIAG_EVENT_DEF(sta_kickout, struct host_event_wlan_kickout);
322 	qdf_mem_zero(&sta_kickout, sizeof(sta_kickout));
323 	sta_kickout.reasoncode = kickout_reason;
324 	sta_kickout.vdev_id = vdev_id;
325 	if (macaddr)
326 		qdf_mem_copy(sta_kickout.peer_mac, macaddr,
327 			     QDF_MAC_ADDR_SIZE);
328 	WLAN_HOST_DIAG_EVENT_REPORT(&sta_kickout, EVENT_WLAN_STA_KICKOUT);
329 }
330 #endif
331 
332 int wma_peer_sta_kickout_event_handler(void *handle, uint8_t *event,
333 				       uint32_t len)
334 {
335 	tp_wma_handle wma = (tp_wma_handle) handle;
336 	WMI_PEER_STA_KICKOUT_EVENTID_param_tlvs *param_buf = NULL;
337 	wmi_peer_sta_kickout_event_fixed_param *kickout_event = NULL;
338 	uint8_t vdev_id, macaddr[QDF_MAC_ADDR_SIZE];
339 	tpDeleteStaContext del_sta_ctx;
340 	uint8_t *addr, *bssid;
341 	struct wlan_objmgr_vdev *vdev;
342 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
343 
344 	param_buf = (WMI_PEER_STA_KICKOUT_EVENTID_param_tlvs *) event;
345 	kickout_event = param_buf->fixed_param;
346 	WMI_MAC_ADDR_TO_CHAR_ARRAY(&kickout_event->peer_macaddr, macaddr);
347 	if (cdp_peer_get_vdevid(soc, macaddr, &vdev_id) !=
348 			QDF_STATUS_SUCCESS) {
349 		wma_err("Not able to find BSSID for peer ["QDF_MAC_ADDR_FMT"]",
350 			 QDF_MAC_ADDR_REF(macaddr));
351 		return -EINVAL;
352 	}
353 
354 	if (!wma_is_vdev_valid(vdev_id))
355 		return -EINVAL;
356 
357 	vdev = wma->interfaces[vdev_id].vdev;
358 	if (!vdev) {
359 		wma_err("Not able to find vdev for VDEV_%d", vdev_id);
360 		return -EINVAL;
361 	}
362 	addr = wlan_vdev_mlme_get_macaddr(vdev);
363 
364 	wma_nofl_info("STA kickout for "QDF_MAC_ADDR_FMT", on mac "QDF_MAC_ADDR_FMT", vdev %d, reason:%d",
365 		      QDF_MAC_ADDR_REF(macaddr), QDF_MAC_ADDR_REF(addr),
366 		      vdev_id, kickout_event->reason);
367 
368 	if (wma_is_roam_in_progress(vdev_id)) {
369 		wma_err("vdev_id %d: Ignore STA kick out since roaming is in progress",
370 			vdev_id);
371 		return -EINVAL;
372 	}
373 	bssid = wma_get_vdev_bssid(vdev);
374 	if (!bssid) {
375 		wma_err("Failed to get bssid for vdev_%d", vdev_id);
376 		return -ENOMEM;
377 	}
378 
379 	switch (kickout_event->reason) {
380 	case WMI_PEER_STA_KICKOUT_REASON_IBSS_DISCONNECT:
381 		goto exit_handler;
382 #ifdef FEATURE_WLAN_TDLS
383 	case WMI_PEER_STA_KICKOUT_REASON_TDLS_DISCONNECT:
384 		del_sta_ctx = (tpDeleteStaContext)
385 			qdf_mem_malloc(sizeof(tDeleteStaContext));
386 		if (!del_sta_ctx) {
387 			wma_err("mem alloc failed for struct del_sta_context for TDLS peer: "QDF_MAC_ADDR_FMT,
388 				QDF_MAC_ADDR_REF(macaddr));
389 			return -ENOMEM;
390 		}
391 
392 		del_sta_ctx->is_tdls = true;
393 		del_sta_ctx->vdev_id = vdev_id;
394 		qdf_mem_copy(del_sta_ctx->addr2, macaddr, QDF_MAC_ADDR_SIZE);
395 		qdf_mem_copy(del_sta_ctx->bssId, bssid,
396 			     QDF_MAC_ADDR_SIZE);
397 		del_sta_ctx->reasonCode = HAL_DEL_STA_REASON_CODE_KEEP_ALIVE;
398 		wma_send_msg(wma, SIR_LIM_DELETE_STA_CONTEXT_IND,
399 			     (void *)del_sta_ctx, 0);
400 		goto exit_handler;
401 #endif /* FEATURE_WLAN_TDLS */
402 
403 	case WMI_PEER_STA_KICKOUT_REASON_UNSPECIFIED:
404 		/*
405 		 * Default legacy value used by original firmware implementation
406 		 */
407 		if (wma->interfaces[vdev_id].type == WMI_VDEV_TYPE_STA &&
408 		    (wma->interfaces[vdev_id].sub_type == 0 ||
409 		     wma->interfaces[vdev_id].sub_type ==
410 		     WMI_UNIFIED_VDEV_SUBTYPE_P2P_CLIENT) &&
411 		    !qdf_mem_cmp(bssid,
412 				    macaddr, QDF_MAC_ADDR_SIZE)) {
413 			wma_sta_kickout_event(
414 			HOST_STA_KICKOUT_REASON_UNSPECIFIED, vdev_id, macaddr);
415 			/*
416 			 * KICKOUT event is for current station-AP connection.
417 			 * Treat it like final beacon miss. Station may not have
418 			 * missed beacons but not able to transmit frames to AP
419 			 * for a long time. Must disconnect to get out of
420 			 * this sticky situation.
421 			 * In future implementation, roaming module will also
422 			 * handle this event and perform a scan.
423 			 */
424 			wma_warn("WMI_PEER_STA_KICKOUT_REASON_UNSPECIFIED event for STA");
425 			wma_beacon_miss_handler(wma, vdev_id,
426 						kickout_event->rssi);
427 			goto exit_handler;
428 		}
429 		break;
430 
431 	case WMI_PEER_STA_KICKOUT_REASON_XRETRY:
432 	case WMI_PEER_STA_KICKOUT_REASON_INACTIVITY:
433 	/*
434 	 * Handle SA query kickout is same as inactivity kickout.
435 	 * This could be for STA or SAP role
436 	 */
437 	case WMI_PEER_STA_KICKOUT_REASON_SA_QUERY_TIMEOUT:
438 	default:
439 		break;
440 	}
441 
442 	/*
443 	 * default action is to send delete station context indication to LIM
444 	 */
445 	del_sta_ctx =
446 		(tDeleteStaContext *) qdf_mem_malloc(sizeof(tDeleteStaContext));
447 	if (!del_sta_ctx) {
448 		wma_err("QDF MEM Alloc Failed for struct del_sta_context");
449 		return -ENOMEM;
450 	}
451 
452 	del_sta_ctx->is_tdls = false;
453 	del_sta_ctx->vdev_id = vdev_id;
454 	qdf_mem_copy(del_sta_ctx->addr2, macaddr, QDF_MAC_ADDR_SIZE);
455 	qdf_mem_copy(del_sta_ctx->bssId, addr, QDF_MAC_ADDR_SIZE);
456 	if (kickout_event->reason ==
457 		WMI_PEER_STA_KICKOUT_REASON_SA_QUERY_TIMEOUT)
458 		del_sta_ctx->reasonCode =
459 			HAL_DEL_STA_REASON_CODE_SA_QUERY_TIMEOUT;
460 	else if (kickout_event->reason == WMI_PEER_STA_KICKOUT_REASON_XRETRY)
461 		del_sta_ctx->reasonCode = HAL_DEL_STA_REASON_CODE_XRETRY;
462 	else
463 		del_sta_ctx->reasonCode = HAL_DEL_STA_REASON_CODE_KEEP_ALIVE;
464 
465 	if (wmi_service_enabled(wma->wmi_handle,
466 				wmi_service_hw_db2dbm_support))
467 		del_sta_ctx->rssi = kickout_event->rssi;
468 	else
469 		del_sta_ctx->rssi = kickout_event->rssi +
470 					WMA_TGT_NOISE_FLOOR_DBM;
471 	wma_sta_kickout_event(del_sta_ctx->reasonCode, vdev_id, macaddr);
472 	wma_send_msg(wma, SIR_LIM_DELETE_STA_CONTEXT_IND, (void *)del_sta_ctx,
473 		     0);
474 	wma_lost_link_info_handler(wma, vdev_id, del_sta_ctx->rssi);
475 
476 exit_handler:
477 	return 0;
478 }
479 
480 int wma_unified_bcntx_status_event_handler(void *handle,
481 					   uint8_t *cmd_param_info,
482 					   uint32_t len)
483 {
484 	tp_wma_handle wma = (tp_wma_handle) handle;
485 	WMI_OFFLOAD_BCN_TX_STATUS_EVENTID_param_tlvs *param_buf;
486 	wmi_offload_bcn_tx_status_event_fixed_param *resp_event;
487 	tSirFirstBeaconTxCompleteInd *beacon_tx_complete_ind;
488 
489 	param_buf =
490 		(WMI_OFFLOAD_BCN_TX_STATUS_EVENTID_param_tlvs *) cmd_param_info;
491 	if (!param_buf) {
492 		wma_err("Invalid bcn tx response event buffer");
493 		return -EINVAL;
494 	}
495 
496 	resp_event = param_buf->fixed_param;
497 
498 	if (resp_event->vdev_id >= wma->max_bssid) {
499 		wma_err("received invalid vdev_id %d", resp_event->vdev_id);
500 		return -EINVAL;
501 	}
502 
503 	/* Check for valid handle to ensure session is not
504 	 * deleted in any race
505 	 */
506 	if (!wma->interfaces[resp_event->vdev_id].vdev) {
507 		wma_err("vdev is NULL for vdev_%d", resp_event->vdev_id);
508 		return -EINVAL;
509 	}
510 
511 	/* Beacon Tx Indication supports only AP mode. Ignore in other modes */
512 	if (wma_is_vdev_in_ap_mode(wma, resp_event->vdev_id) == false) {
513 		wma_debug("Beacon Tx Indication does not support type %d and sub_type %d",
514 			 wma->interfaces[resp_event->vdev_id].type,
515 			 wma->interfaces[resp_event->vdev_id].sub_type);
516 		return 0;
517 	}
518 
519 	beacon_tx_complete_ind = (tSirFirstBeaconTxCompleteInd *)
520 			qdf_mem_malloc(sizeof(tSirFirstBeaconTxCompleteInd));
521 	if (!beacon_tx_complete_ind) {
522 		wma_err("Failed to alloc beacon_tx_complete_ind");
523 		return -ENOMEM;
524 	}
525 
526 	beacon_tx_complete_ind->messageType = WMA_DFS_BEACON_TX_SUCCESS_IND;
527 	beacon_tx_complete_ind->length = sizeof(tSirFirstBeaconTxCompleteInd);
528 	beacon_tx_complete_ind->bss_idx = resp_event->vdev_id;
529 
530 	wma_send_msg(wma, WMA_DFS_BEACON_TX_SUCCESS_IND,
531 		     (void *)beacon_tx_complete_ind, 0);
532 	return 0;
533 }
534 
535 /**
536  * wma_get_go_probe_timeout() - get P2P GO probe timeout
537  * @mac: UMAC handler
538  * @max_inactive_time: return max inactive time
539  * @max_unresponsive_time: return max unresponsive time
540  *
541  * Return: none
542  */
543 #ifdef CONVERGED_P2P_ENABLE
544 static inline void
545 wma_get_go_probe_timeout(struct mac_context *mac,
546 			 uint32_t *max_inactive_time,
547 			 uint32_t *max_unresponsive_time)
548 {
549 	uint32_t keep_alive;
550 	QDF_STATUS status;
551 
552 	status = cfg_p2p_get_go_link_monitor_period(mac->psoc,
553 						    max_inactive_time);
554 	if (QDF_IS_STATUS_ERROR(status)) {
555 		wma_err("Failed to go monitor period");
556 		*max_inactive_time = WMA_LINK_MONITOR_DEFAULT_TIME_SECS;
557 	}
558 	status = cfg_p2p_get_go_keepalive_period(mac->psoc,
559 						 &keep_alive);
560 	if (QDF_IS_STATUS_ERROR(status)) {
561 		wma_err("Failed to read go keep alive");
562 		keep_alive = WMA_KEEP_ALIVE_DEFAULT_TIME_SECS;
563 	}
564 
565 	*max_unresponsive_time = *max_inactive_time + keep_alive;
566 }
567 #else
568 static inline void
569 wma_get_go_probe_timeout(struct mac_context *mac,
570 			 uint32_t *max_inactive_time,
571 			 uint32_t *max_unresponsive_time)
572 {
573 }
574 #endif
575 
576 /**
577  * wma_get_link_probe_timeout() - get link timeout based on sub type
578  * @mac: UMAC handler
579  * @sub_type: vdev syb type
580  * @max_inactive_time: return max inactive time
581  * @max_unresponsive_time: return max unresponsive time
582  *
583  * Return: none
584  */
585 static inline void
586 wma_get_link_probe_timeout(struct mac_context *mac,
587 			   uint32_t sub_type,
588 			   uint32_t *max_inactive_time,
589 			   uint32_t *max_unresponsive_time)
590 {
591 	if (sub_type == WMI_UNIFIED_VDEV_SUBTYPE_P2P_GO) {
592 		wma_get_go_probe_timeout(mac, max_inactive_time,
593 					 max_unresponsive_time);
594 	} else {
595 		*max_inactive_time =
596 			mac->mlme_cfg->timeouts.ap_link_monitor_timeout;
597 		*max_unresponsive_time = *max_inactive_time +
598 			mac->mlme_cfg->timeouts.ap_keep_alive_timeout;
599 	}
600 }
601 
602 /**
603  * wma_verify_rate_code() - verify if rate code is valid.
604  * @rate_code:     rate code
605  * @band:     band information
606  *
607  * Return: verify result
608  */
609 static bool wma_verify_rate_code(u_int32_t rate_code, enum cds_band_type band)
610 {
611 	uint8_t preamble, nss, rate;
612 	bool valid = true;
613 
614 	preamble = (rate_code & 0xc0) >> 6;
615 	nss = (rate_code & 0x30) >> 4;
616 	rate = rate_code & 0xf;
617 
618 	switch (preamble) {
619 	case WMI_RATE_PREAMBLE_CCK:
620 		if (nss != 0 || rate > 3 || band == CDS_BAND_5GHZ)
621 			valid = false;
622 		break;
623 	case WMI_RATE_PREAMBLE_OFDM:
624 		if (nss != 0 || rate > 7)
625 			valid = false;
626 		break;
627 	case WMI_RATE_PREAMBLE_HT:
628 		if (nss != 0 || rate > 7)
629 			valid = false;
630 		break;
631 	case WMI_RATE_PREAMBLE_VHT:
632 		if (nss != 0 || rate > 9)
633 			valid = false;
634 		break;
635 	default:
636 		break;
637 	}
638 	return valid;
639 }
640 
641 #define TX_MGMT_RATE_2G_ENABLE_OFFSET 30
642 #define TX_MGMT_RATE_5G_ENABLE_OFFSET 31
643 #define TX_MGMT_RATE_2G_OFFSET 0
644 #define TX_MGMT_RATE_5G_OFFSET 12
645 
646 #define MAX_VDEV_MGMT_RATE_PARAMS 2
647 /* params being sent:
648  * wmi_vdev_param_mgmt_tx_rate
649  * wmi_vdev_param_per_band_mgmt_tx_rate
650  */
651 
652 /**
653  * wma_set_mgmt_rate() - set vdev mgmt rate.
654  * @wma:     wma handle
655  * @vdev_id: vdev id
656  *
657  * Return: None
658  */
659 void wma_set_vdev_mgmt_rate(tp_wma_handle wma, uint8_t vdev_id)
660 {
661 	uint32_t cfg_val;
662 	uint32_t per_band_mgmt_tx_rate = 0;
663 	enum cds_band_type band = 0;
664 	struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE);
665 	struct dev_set_param setparam[MAX_VDEV_MGMT_RATE_PARAMS] = {};
666 	uint8_t index = 0;
667 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
668 
669 	if (!mac) {
670 		wma_err("Failed to get mac");
671 		return;
672 	}
673 
674 	cfg_val = mac->mlme_cfg->sap_cfg.rate_tx_mgmt;
675 	band = CDS_BAND_ALL;
676 	if ((cfg_val == MLME_CFG_TX_MGMT_RATE_DEF) ||
677 	    !wma_verify_rate_code(cfg_val, band)) {
678 		wma_nofl_debug("default WNI_CFG_RATE_FOR_TX_MGMT, ignore");
679 	} else {
680 		status = mlme_check_index_setparam(setparam,
681 						   wmi_vdev_param_mgmt_tx_rate,
682 						   cfg_val, index++,
683 						   MAX_VDEV_MGMT_RATE_PARAMS);
684 		if (QDF_IS_STATUS_ERROR(status)) {
685 			wma_err("failed at wmi_vdev_param_mgmt_tx_rate");
686 			goto error;
687 		}
688 	}
689 
690 	cfg_val = mac->mlme_cfg->sap_cfg.rate_tx_mgmt_2g;
691 	band = CDS_BAND_2GHZ;
692 	if ((cfg_val == MLME_CFG_TX_MGMT_2G_RATE_DEF) ||
693 	    !wma_verify_rate_code(cfg_val, band)) {
694 		wma_nofl_debug("use default 2G MGMT rate.");
695 		per_band_mgmt_tx_rate &=
696 		    ~(1 << TX_MGMT_RATE_2G_ENABLE_OFFSET);
697 	} else {
698 		per_band_mgmt_tx_rate |=
699 		    (1 << TX_MGMT_RATE_2G_ENABLE_OFFSET);
700 		per_band_mgmt_tx_rate |=
701 		    ((cfg_val & 0x7FF) << TX_MGMT_RATE_2G_OFFSET);
702 	}
703 
704 	cfg_val = mac->mlme_cfg->sap_cfg.rate_tx_mgmt;
705 	band = CDS_BAND_5GHZ;
706 	if ((cfg_val == MLME_CFG_TX_MGMT_5G_RATE_DEF) ||
707 	    !wma_verify_rate_code(cfg_val, band)) {
708 		wma_nofl_debug("use default 5G MGMT rate.");
709 		per_band_mgmt_tx_rate &=
710 		    ~(1 << TX_MGMT_RATE_5G_ENABLE_OFFSET);
711 	} else {
712 		per_band_mgmt_tx_rate |=
713 		    (1 << TX_MGMT_RATE_5G_ENABLE_OFFSET);
714 		per_band_mgmt_tx_rate |=
715 		    ((cfg_val & 0x7FF) << TX_MGMT_RATE_5G_OFFSET);
716 	}
717 
718 	status = mlme_check_index_setparam(setparam,
719 					   wmi_vdev_param_per_band_mgmt_tx_rate,
720 					   per_band_mgmt_tx_rate, index++,
721 					   MAX_VDEV_MGMT_RATE_PARAMS);
722 	if (QDF_IS_STATUS_ERROR(status)) {
723 		wma_err("failed at wmi_vdev_param_per_band_mgmt_tx_rate");
724 		goto error;
725 	}
726 
727 	status = wma_send_multi_pdev_vdev_set_params(MLME_VDEV_SETPARAM,
728 						     vdev_id, setparam, index);
729 	if (QDF_IS_STATUS_ERROR(status))
730 		wma_debug("failed to send MGMT_TX_RATE vdev set params stat:%d",
731 			  status);
732 error:
733 	return;
734 }
735 
736 #define MAX_VDEV_SAP_KEEPALIVE_PARAMS 3
737 /* params being sent:
738  * wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs
739  * wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs
740  * wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs
741  */
742 
743 /**
744  * wma_set_sap_keepalive() - set SAP keep alive parameters to fw
745  * @wma: wma handle
746  * @vdev_id: vdev id
747  *
748  * Return: none
749  */
750 void wma_set_sap_keepalive(tp_wma_handle wma, uint8_t vdev_id)
751 {
752 	uint32_t min_inactive_time, max_inactive_time, max_unresponsive_time;
753 	struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE);
754 	QDF_STATUS status;
755 	struct dev_set_param setparam[MAX_VDEV_SAP_KEEPALIVE_PARAMS] = {};
756 	uint8_t index = 0;
757 
758 	if (!mac) {
759 		wma_err("Failed to get mac");
760 		return;
761 	}
762 
763 	wma_get_link_probe_timeout(mac, wma->interfaces[vdev_id].sub_type,
764 				   &max_inactive_time, &max_unresponsive_time);
765 
766 	min_inactive_time = max_inactive_time / 2;
767 	status = mlme_check_index_setparam(
768 			setparam,
769 			wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs,
770 			min_inactive_time, index++,
771 			MAX_VDEV_SAP_KEEPALIVE_PARAMS);
772 	if (QDF_IS_STATUS_ERROR(status)) {
773 		wma_err("failed to set wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs");
774 		goto error;
775 	}
776 	status = mlme_check_index_setparam(
777 			setparam,
778 			wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs,
779 			max_inactive_time, index++,
780 			MAX_VDEV_SAP_KEEPALIVE_PARAMS);
781 	if (QDF_IS_STATUS_ERROR(status)) {
782 		wma_err("failed to set wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs");
783 		goto error;
784 	}
785 	status = mlme_check_index_setparam(
786 			setparam,
787 			wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs,
788 			max_unresponsive_time, index++,
789 			MAX_VDEV_SAP_KEEPALIVE_PARAMS);
790 	if (QDF_IS_STATUS_ERROR(status)) {
791 		wma_err("failed to set wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs");
792 		goto error;
793 	}
794 
795 	status = wma_send_multi_pdev_vdev_set_params(MLME_VDEV_SETPARAM,
796 						     vdev_id, setparam, index);
797 	if (QDF_IS_STATUS_ERROR(status))
798 		wma_err("Failed to Set AP MIN/MAX IDLE INACTIVE TIME, MAX UNRESPONSIVE TIME:%d", status);
799 	else
800 		wma_debug("vdev_id:%d min_inactive_time: %u max_inactive_time: %u max_unresponsive_time: %u",
801 			  vdev_id, min_inactive_time, max_inactive_time,
802 			  max_unresponsive_time);
803 error:
804 	return;
805 }
806 
807 /**
808  * wma_set_sta_sa_query_param() - set sta sa query parameters
809  * @wma: wma handle
810  * @vdev_id: vdev id
811 
812  * This function sets sta query related parameters in fw.
813  *
814  * Return: none
815  */
816 
817 void wma_set_sta_sa_query_param(tp_wma_handle wma,
818 				  uint8_t vdev_id)
819 {
820 	struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE);
821 	uint8_t max_retries;
822 	uint16_t retry_interval;
823 
824 	if (!mac) {
825 		wma_err("mac context is NULL");
826 		return;
827 	}
828 
829 	max_retries = mac->mlme_cfg->gen.pmf_sa_query_max_retries;
830 	retry_interval = mac->mlme_cfg->gen.pmf_sa_query_retry_interval;
831 
832 	wmi_unified_set_sta_sa_query_param_cmd(wma->wmi_handle,
833 						vdev_id,
834 						max_retries,
835 						retry_interval);
836 }
837 
838 /**
839  * wma_set_sta_keep_alive() - set sta keep alive parameters
840  * @wma: wma handle
841  * @vdev_id: vdev id
842  * @method: method for keep alive
843  * @timeperiod: time period
844  * @hostv4addr: host ipv4 address
845  * @destv4addr: dst ipv4 address
846  * @destmac: destination mac
847  *
848  * This function sets keep alive related parameters in fw.
849  *
850  * Return: none
851  */
852 void wma_set_sta_keep_alive(tp_wma_handle wma, uint8_t vdev_id,
853 			    uint32_t method, uint32_t timeperiod,
854 			    uint8_t *hostv4addr, uint8_t *destv4addr,
855 			    uint8_t *destmac)
856 {
857 	struct sta_keep_alive_params params = { 0 };
858 	struct wma_txrx_node *intr;
859 
860 	if (wma_validate_handle(wma))
861 		return;
862 
863 	intr = &wma->interfaces[vdev_id];
864 	if (timeperiod > cfg_max(CFG_INFRA_STA_KEEP_ALIVE_PERIOD)) {
865 		wmi_err("Invalid period %d Max limit %d", timeperiod,
866 			 cfg_max(CFG_INFRA_STA_KEEP_ALIVE_PERIOD));
867 		return;
868 	}
869 
870 	params.vdev_id = vdev_id;
871 	params.method = method;
872 	params.timeperiod = timeperiod;
873 	if (intr) {
874 		if (intr->bss_max_idle_period) {
875 			params.timeperiod = intr->bss_max_idle_period;
876 			if (method == WMI_KEEP_ALIVE_NULL_PKT)
877 				params.method = WMI_KEEP_ALIVE_MGMT_FRAME;
878 		}
879 	}
880 
881 	if (hostv4addr)
882 		qdf_mem_copy(params.hostv4addr, hostv4addr, QDF_IPV4_ADDR_SIZE);
883 	if (destv4addr)
884 		qdf_mem_copy(params.destv4addr, destv4addr, QDF_IPV4_ADDR_SIZE);
885 	if (destmac)
886 		qdf_mem_copy(params.destmac, destmac, QDF_MAC_ADDR_SIZE);
887 
888 	wmi_unified_set_sta_keep_alive_cmd(wma->wmi_handle, &params);
889 }
890 
891 /*
892  * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
893  *   0 for no restriction
894  *   1 for 1/4 us - Our lower layer calculations limit our precision to 1 msec
895  *   2 for 1/2 us - Our lower layer calculations limit our precision to 1 msec
896  *   3 for 1 us
897  *   4 for 2 us
898  *   5 for 4 us
899  *   6 for 8 us
900  *   7 for 16 us
901  */
902 static const uint8_t wma_mpdu_spacing[] = { 0, 1, 1, 1, 2, 4, 8, 16 };
903 
904 /**
905  * wma_parse_mpdudensity() - give mpdu spacing from mpdu density
906  * @mpdudensity: mpdu density
907  *
908  * Return: mpdu spacing or 0 for error
909  */
910 static inline uint8_t wma_parse_mpdudensity(uint8_t mpdudensity)
911 {
912 	if (mpdudensity < sizeof(wma_mpdu_spacing))
913 		return wma_mpdu_spacing[mpdudensity];
914 	else
915 		return 0;
916 }
917 
918 #define CFG_CTRL_MASK              0xFF00
919 #define CFG_DATA_MASK              0x00FF
920 
921 /**
922  * wma_mask_tx_ht_rate() - mask tx ht rate based on config
923  * @wma:     wma handle
924  * @mcs_set  mcs set buffer
925  *
926  * Return: None
927  */
928 static void wma_mask_tx_ht_rate(tp_wma_handle wma, uint8_t *mcs_set)
929 {
930 	uint32_t i, j;
931 	uint16_t mcs_limit;
932 	uint8_t *rate_pos = mcs_set;
933 	struct mac_context *mac = wma->mac_context;
934 
935 	/*
936 	 * Get MCS limit from ini configure, and map it to rate parameters
937 	 * This will limit HT rate upper bound. CFG_CTRL_MASK is used to
938 	 * check whether ini config is enabled and CFG_DATA_MASK to get the
939 	 * MCS value.
940 	 */
941 	mcs_limit = mac->mlme_cfg->rates.max_htmcs_txdata;
942 
943 	if (mcs_limit & CFG_CTRL_MASK) {
944 		wma_debug("set mcs_limit %x", mcs_limit);
945 
946 		mcs_limit &= CFG_DATA_MASK;
947 		for (i = 0, j = 0; i < MAX_SUPPORTED_RATES;) {
948 			if (j < mcs_limit / 8) {
949 				rate_pos[j] = 0xff;
950 				j++;
951 				i += 8;
952 			} else if (j < mcs_limit / 8 + 1) {
953 				if (i <= mcs_limit)
954 					rate_pos[i / 8] |= 1 << (i % 8);
955 				else
956 					rate_pos[i / 8] &= ~(1 << (i % 8));
957 				i++;
958 
959 				if (i >= (j + 1) * 8)
960 					j++;
961 			} else {
962 				rate_pos[j++] = 0;
963 				i += 8;
964 			}
965 		}
966 	}
967 }
968 
969 #if SUPPORT_11AX
970 /**
971  * wma_fw_to_host_phymode_11ax() - convert fw to host phymode for 11ax phymodes
972  * @phymode: phymode to convert
973  *
974  * Return: one of the 11ax values defined in enum wlan_phymode;
975  *         or WLAN_PHYMODE_AUTO if the input is not an 11ax phymode
976  */
977 static enum wlan_phymode
978 wma_fw_to_host_phymode_11ax(WMI_HOST_WLAN_PHY_MODE phymode)
979 {
980 	switch (phymode) {
981 	default:
982 		return WLAN_PHYMODE_AUTO;
983 	case WMI_HOST_MODE_11AX_HE20:
984 		return WLAN_PHYMODE_11AXA_HE20;
985 	case WMI_HOST_MODE_11AX_HE40:
986 		return WLAN_PHYMODE_11AXA_HE40;
987 	case WMI_HOST_MODE_11AX_HE80:
988 		return WLAN_PHYMODE_11AXA_HE80;
989 	case WMI_HOST_MODE_11AX_HE80_80:
990 		return WLAN_PHYMODE_11AXA_HE80_80;
991 	case WMI_HOST_MODE_11AX_HE160:
992 		return WLAN_PHYMODE_11AXA_HE160;
993 	case WMI_HOST_MODE_11AX_HE20_2G:
994 		return WLAN_PHYMODE_11AXG_HE20;
995 	case WMI_HOST_MODE_11AX_HE40_2G:
996 		return WLAN_PHYMODE_11AXG_HE40;
997 	case WMI_HOST_MODE_11AX_HE80_2G:
998 		return WLAN_PHYMODE_11AXG_HE80;
999 	}
1000 	return WLAN_PHYMODE_AUTO;
1001 }
1002 #else
1003 static enum wlan_phymode
1004 wma_fw_to_host_phymode_11ax(WMI_HOST_WLAN_PHY_MODE phymode)
1005 {
1006 	return WLAN_PHYMODE_AUTO;
1007 }
1008 #endif
1009 
1010 #ifdef WLAN_FEATURE_11BE
1011 /**
1012  * wma_fw_to_host_phymode_11be() - convert fw to host phymode for 11be phymodes
1013  * @phymode: phymode to convert
1014  *
1015  * Return: one of the 11be values defined in enum wlan_phymode;
1016  *         or WLAN_PHYMODE_AUTO if the input is not an 11be phymode
1017  */
1018 static enum wlan_phymode
1019 wma_fw_to_host_phymode_11be(WMI_HOST_WLAN_PHY_MODE phymode)
1020 {
1021 	switch (phymode) {
1022 	default:
1023 		return WLAN_PHYMODE_AUTO;
1024 	case WMI_HOST_MODE_11BE_EHT20:
1025 		return WLAN_PHYMODE_11BEA_EHT20;
1026 	case WMI_HOST_MODE_11BE_EHT40:
1027 		return WLAN_PHYMODE_11BEA_EHT40;
1028 	case WMI_HOST_MODE_11BE_EHT80:
1029 		return WLAN_PHYMODE_11BEA_EHT80;
1030 	case WMI_HOST_MODE_11BE_EHT160:
1031 		return WLAN_PHYMODE_11BEA_EHT160;
1032 	case WMI_HOST_MODE_11BE_EHT320:
1033 		return WLAN_PHYMODE_11BEA_EHT320;
1034 	case WMI_HOST_MODE_11BE_EHT20_2G:
1035 		return WLAN_PHYMODE_11BEG_EHT20;
1036 	case WMI_HOST_MODE_11BE_EHT40_2G:
1037 		return WLAN_PHYMODE_11BEG_EHT40;
1038 	}
1039 	return WLAN_PHYMODE_AUTO;
1040 }
1041 
1042 static inline bool wma_is_phymode_eht(enum wlan_phymode phymode)
1043 {
1044 	return IS_WLAN_PHYMODE_EHT(phymode);
1045 }
1046 #else
1047 static enum wlan_phymode
1048 wma_fw_to_host_phymode_11be(WMI_HOST_WLAN_PHY_MODE phymode)
1049 {
1050 	return WLAN_PHYMODE_AUTO;
1051 }
1052 
1053 static inline bool wma_is_phymode_eht(enum wlan_phymode phymode)
1054 {
1055 	return false;
1056 }
1057 #endif
1058 
1059 #ifdef CONFIG_160MHZ_SUPPORT
1060 /**
1061  * wma_fw_to_host_phymode_160() - convert fw to host phymode for 160 mhz
1062  * phymodes
1063  * @phymode: phymode to convert
1064  *
1065  * Return: one of the 160 mhz values defined in enum wlan_phymode;
1066  *         or WLAN_PHYMODE_AUTO if the input is not a 160 mhz phymode
1067  */
1068 static enum wlan_phymode
1069 wma_fw_to_host_phymode_160(WMI_HOST_WLAN_PHY_MODE phymode)
1070 {
1071 	switch (phymode) {
1072 	default:
1073 		return WLAN_PHYMODE_AUTO;
1074 	case WMI_HOST_MODE_11AC_VHT80_80:
1075 		return WLAN_PHYMODE_11AC_VHT80_80;
1076 	case WMI_HOST_MODE_11AC_VHT160:
1077 		return WLAN_PHYMODE_11AC_VHT160;
1078 	}
1079 }
1080 #else
1081 static enum wlan_phymode
1082 wma_fw_to_host_phymode_160(WMI_HOST_WLAN_PHY_MODE phymode)
1083 {
1084 	return WLAN_PHYMODE_AUTO;
1085 }
1086 #endif
1087 
1088 enum wlan_phymode wma_fw_to_host_phymode(WMI_HOST_WLAN_PHY_MODE phymode)
1089 {
1090 	enum wlan_phymode host_phymode;
1091 	switch (phymode) {
1092 	default:
1093 		host_phymode = wma_fw_to_host_phymode_160(phymode);
1094 		if (host_phymode != WLAN_PHYMODE_AUTO)
1095 			return host_phymode;
1096 		host_phymode = wma_fw_to_host_phymode_11ax(phymode);
1097 		if (host_phymode != WLAN_PHYMODE_AUTO)
1098 			return host_phymode;
1099 		return wma_fw_to_host_phymode_11be(phymode);
1100 	case WMI_HOST_MODE_11A:
1101 		return WLAN_PHYMODE_11A;
1102 	case WMI_HOST_MODE_11G:
1103 		return WLAN_PHYMODE_11G;
1104 	case WMI_HOST_MODE_11B:
1105 		return WLAN_PHYMODE_11B;
1106 	case WMI_HOST_MODE_11GONLY:
1107 		return WLAN_PHYMODE_11G_ONLY;
1108 	case WMI_HOST_MODE_11NA_HT20:
1109 		return WLAN_PHYMODE_11NA_HT20;
1110 	case WMI_HOST_MODE_11NG_HT20:
1111 		return WLAN_PHYMODE_11NG_HT20;
1112 	case WMI_HOST_MODE_11NA_HT40:
1113 		return WLAN_PHYMODE_11NA_HT40;
1114 	case WMI_HOST_MODE_11NG_HT40:
1115 		return WLAN_PHYMODE_11NG_HT40;
1116 	case WMI_HOST_MODE_11AC_VHT20:
1117 		return WLAN_PHYMODE_11AC_VHT20;
1118 	case WMI_HOST_MODE_11AC_VHT40:
1119 		return WLAN_PHYMODE_11AC_VHT40;
1120 	case WMI_HOST_MODE_11AC_VHT80:
1121 		return WLAN_PHYMODE_11AC_VHT80;
1122 	case WMI_HOST_MODE_11AC_VHT20_2G:
1123 		return WLAN_PHYMODE_11AC_VHT20_2G;
1124 	case WMI_HOST_MODE_11AC_VHT40_2G:
1125 		return WLAN_PHYMODE_11AC_VHT40_2G;
1126 	case WMI_HOST_MODE_11AC_VHT80_2G:
1127 		return WLAN_PHYMODE_11AC_VHT80_2G;
1128 	}
1129 }
1130 
1131 #ifdef WLAN_FEATURE_11BE
1132 static void wma_populate_peer_puncture(struct peer_assoc_params *peer,
1133 				       struct wlan_channel *des_chan)
1134 {
1135 	peer->puncture_bitmap = des_chan->puncture_bitmap;
1136 	wma_debug("Peer EHT puncture bitmap %d", peer->puncture_bitmap);
1137 }
1138 
1139 static void wma_populate_peer_mlo_cap(struct peer_assoc_params *peer,
1140 				      tpAddStaParams params)
1141 {
1142 	struct peer_assoc_ml_partner_links *ml_links;
1143 	struct peer_assoc_mlo_params *mlo_params;
1144 	struct peer_ml_info *ml_info;
1145 	uint8_t i;
1146 
1147 	ml_info = &params->ml_info;
1148 	mlo_params = &peer->mlo_params;
1149 	ml_links = &peer->ml_links;
1150 
1151 	/* Assoc link info */
1152 	mlo_params->vdev_id = ml_info->vdev_id;
1153 	mlo_params->ieee_link_id = ml_info->link_id;
1154 	qdf_mem_copy(&mlo_params->chan, &ml_info->channel_info,
1155 		     sizeof(struct wlan_channel));
1156 	qdf_mem_copy(&mlo_params->bssid, &ml_info->link_addr,
1157 		     QDF_MAC_ADDR_SIZE);
1158 	qdf_mem_copy(&mlo_params->mac_addr, &ml_info->self_mac_addr,
1159 		     QDF_MAC_ADDR_SIZE);
1160 
1161 	mlo_params->rec_max_simultaneous_links =
1162 		ml_info->rec_max_simultaneous_links;
1163 
1164 	/* Fill partner link info */
1165 	ml_links->num_links = ml_info->num_links;
1166 	for (i = 0; i < ml_links->num_links; i++) {
1167 		ml_links->partner_info[i].vdev_id =
1168 					ml_info->partner_info[i].vdev_id;
1169 		ml_links->partner_info[i].link_id =
1170 					ml_info->partner_info[i].link_id;
1171 		qdf_mem_copy(&ml_links->partner_info[i].chan,
1172 			     &ml_info->partner_info[i].channel_info,
1173 			     sizeof(struct wlan_channel));
1174 		qdf_mem_copy(&ml_links->partner_info[i].bssid,
1175 			     &ml_info->partner_info[i].link_addr,
1176 			     QDF_MAC_ADDR_SIZE);
1177 		qdf_mem_copy(&ml_links->partner_info[i].mac_addr,
1178 			     &ml_info->partner_info[i].self_mac_addr,
1179 			     QDF_MAC_ADDR_SIZE);
1180 	}
1181 }
1182 #else
1183 static void wma_populate_peer_puncture(struct peer_assoc_params *peer,
1184 				       struct wlan_channel *des_chan)
1185 {
1186 }
1187 
1188 static void wma_populate_peer_mlo_cap(struct peer_assoc_params *peer,
1189 				      tpAddStaParams params)
1190 {
1191 }
1192 #endif
1193 
1194 void wma_objmgr_set_peer_mlme_nss(tp_wma_handle wma, uint8_t *mac_addr,
1195 				  uint8_t nss)
1196 {
1197 	uint8_t pdev_id;
1198 	struct wlan_objmgr_peer *peer;
1199 	struct peer_mlme_priv_obj *peer_priv;
1200 	struct wlan_objmgr_psoc *psoc = wma->psoc;
1201 
1202 	pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev);
1203 	peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr,
1204 				    WLAN_LEGACY_WMA_ID);
1205 	if (!peer)
1206 		return;
1207 
1208 	peer_priv = wlan_objmgr_peer_get_comp_private_obj(peer,
1209 							  WLAN_UMAC_COMP_MLME);
1210 	if (!peer_priv) {
1211 		wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1212 		return;
1213 	}
1214 
1215 	peer_priv->nss = nss;
1216 	wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1217 }
1218 
1219 uint8_t wma_objmgr_get_peer_mlme_nss(tp_wma_handle wma, uint8_t *mac_addr)
1220 {
1221 	uint8_t pdev_id;
1222 	struct wlan_objmgr_peer *peer;
1223 	struct peer_mlme_priv_obj *peer_priv;
1224 	struct wlan_objmgr_psoc *psoc = wma->psoc;
1225 	uint8_t nss;
1226 
1227 	pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev);
1228 	peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr,
1229 				    WLAN_LEGACY_WMA_ID);
1230 	if (!peer)
1231 		return 0;
1232 
1233 	peer_priv = wlan_objmgr_peer_get_comp_private_obj(peer,
1234 							  WLAN_UMAC_COMP_MLME);
1235 	if (!peer_priv) {
1236 		wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1237 		return 0;
1238 	}
1239 
1240 	nss = peer_priv->nss;
1241 	wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1242 	return nss;
1243 }
1244 
1245 void wma_objmgr_set_peer_mlme_phymode(tp_wma_handle wma, uint8_t *mac_addr,
1246 				      enum wlan_phymode phymode)
1247 {
1248 	uint8_t pdev_id;
1249 	struct wlan_objmgr_peer *peer;
1250 	struct wlan_objmgr_psoc *psoc = wma->psoc;
1251 
1252 	pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev);
1253 	peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr,
1254 				    WLAN_LEGACY_WMA_ID);
1255 	if (!peer)
1256 		return;
1257 
1258 	wlan_peer_obj_lock(peer);
1259 	wlan_peer_set_phymode(peer, phymode);
1260 	wlan_peer_obj_unlock(peer);
1261 	wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1262 }
1263 
1264 /**
1265  * wma_objmgr_set_peer_mlme_type() - set peer type to peer object
1266  * @wma:      wma handle
1267  * @mac_addr: mac addr of peer
1268  * @peer_type:  peer type value to set
1269  *
1270  * Return: None
1271  */
1272 static void wma_objmgr_set_peer_mlme_type(tp_wma_handle wma,
1273 					  uint8_t *mac_addr,
1274 					  enum wlan_peer_type peer_type)
1275 {
1276 	uint8_t pdev_id;
1277 	struct wlan_objmgr_peer *peer;
1278 	struct wlan_objmgr_psoc *psoc = wma->psoc;
1279 
1280 	pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev);
1281 	peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr,
1282 				    WLAN_LEGACY_WMA_ID);
1283 	if (!peer)
1284 		return;
1285 
1286 	wlan_peer_obj_lock(peer);
1287 	wlan_peer_set_peer_type(peer, peer_type);
1288 	wlan_peer_obj_unlock(peer);
1289 	wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1290 }
1291 
1292 #ifdef WLAN_FEATURE_11BE_MLO
1293 
1294 #define MIN_TIMEOUT_VAL 0
1295 #define MAX_TIMEOUT_VAL 11
1296 
1297 #define TIMEOUT_TO_US 6
1298 
1299 /*
1300  * wma_convert_trans_timeout_us() - API to convert
1301  * emlsr transition timeout to microseconds. Refer Table 9-401h
1302  * of IEEE802.11be specification
1303  * @timeout: EMLSR transition timeout
1304  *
1305  * Return: Timeout value in microseconds
1306  */
1307 static inline uint32_t
1308 wma_convert_trans_timeout_us(uint16_t timeout)
1309 {
1310 	uint32_t us = 0;
1311 
1312 	if (timeout > MIN_TIMEOUT_VAL && timeout < MAX_TIMEOUT_VAL) {
1313 		/* timeout = 1 is for 128us*/
1314 		us = (1 << (timeout + TIMEOUT_TO_US));
1315 	}
1316 
1317 	return us;
1318 }
1319 
1320 /**
1321  * wma_set_mlo_capability() - set MLO caps to the peer assoc request
1322  * @wma: wma handle
1323  * @vdev: vdev object
1324  * @params: Add sta params
1325  * @req: peer assoc request parameters
1326  *
1327  * Return: None
1328  */
1329 static void wma_set_mlo_capability(tp_wma_handle wma,
1330 				   struct wlan_objmgr_vdev *vdev,
1331 				   tpAddStaParams params,
1332 				   struct peer_assoc_params *req)
1333 {
1334 	uint8_t pdev_id;
1335 	struct wlan_objmgr_peer *peer;
1336 	struct wlan_objmgr_psoc *psoc = wma->psoc;
1337 	uint16_t link_id_bitmap;
1338 
1339 	pdev_id = wlan_objmgr_pdev_get_pdev_id(wma->pdev);
1340 	peer = wlan_objmgr_get_peer(psoc, pdev_id, req->peer_mac,
1341 				    WLAN_LEGACY_WMA_ID);
1342 
1343 	if (!peer) {
1344 		wma_err("peer not valid");
1345 		return;
1346 	}
1347 
1348 	if (!qdf_is_macaddr_zero((struct qdf_mac_addr *)peer->mldaddr)) {
1349 		req->mlo_params.mlo_enabled = true;
1350 		req->mlo_params.mlo_assoc_link =
1351 					wlan_peer_mlme_is_assoc_peer(peer);
1352 		WLAN_ADDR_COPY(req->mlo_params.mld_mac, peer->mldaddr);
1353 		if (policy_mgr_ml_link_vdev_need_to_be_disabled(psoc, vdev,
1354 								true) ||
1355 		    policy_mgr_is_emlsr_sta_concurrency_present(psoc)) {
1356 			req->mlo_params.mlo_force_link_inactive = 1;
1357 			link_id_bitmap = 1 << params->link_id;
1358 			ml_nlink_set_curr_force_inactive_state(
1359 					psoc, vdev, link_id_bitmap, LINK_ADD);
1360 		}
1361 		wma_debug("assoc_link %d" QDF_MAC_ADDR_FMT ", force inactive %d link id %d",
1362 			  req->mlo_params.mlo_assoc_link,
1363 			  QDF_MAC_ADDR_REF(peer->mldaddr),
1364 			  req->mlo_params.mlo_force_link_inactive,
1365 			  params->link_id);
1366 
1367 		req->mlo_params.emlsr_support = params->emlsr_support;
1368 		req->mlo_params.ieee_link_id = params->link_id;
1369 		if (req->mlo_params.emlsr_support) {
1370 			req->mlo_params.trans_timeout_us =
1371 			wma_convert_trans_timeout_us(params->emlsr_trans_timeout);
1372 		}
1373 		req->mlo_params.msd_cap_support = params->msd_caps_present;
1374 		req->mlo_params.medium_sync_duration =
1375 				params->msd_caps.med_sync_duration;
1376 		req->mlo_params.medium_sync_ofdm_ed_thresh =
1377 				params->msd_caps.med_sync_ofdm_ed_thresh;
1378 		req->mlo_params.medium_sync_max_txop_num =
1379 				params->msd_caps.med_sync_max_txop_num;
1380 		req->mlo_params.link_switch_in_progress =
1381 			wlan_vdev_mlme_is_mlo_link_switch_in_progress(vdev);
1382 	} else {
1383 		wma_debug("Peer MLO context is NULL");
1384 		req->mlo_params.mlo_enabled = false;
1385 		req->mlo_params.emlsr_support = false;
1386 	}
1387 	wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
1388 }
1389 
1390 static void wma_set_mlo_assoc_vdev(struct wlan_objmgr_vdev *vdev,
1391 				   struct peer_assoc_params *req)
1392 {
1393 	if (wlan_vdev_mlme_is_mlo_vdev(vdev) &&
1394 	    !wlan_vdev_mlme_is_mlo_link_vdev(vdev))
1395 		req->is_assoc_vdev = true;
1396 }
1397 #else
1398 static inline void wma_set_mlo_capability(tp_wma_handle wma,
1399 					  struct wlan_objmgr_vdev *vdev,
1400 					  tpAddStaParams params,
1401 					  struct peer_assoc_params *req)
1402 {
1403 }
1404 
1405 static inline void wma_set_mlo_assoc_vdev(struct wlan_objmgr_vdev *vdev,
1406 					  struct peer_assoc_params *req)
1407 {
1408 }
1409 #endif
1410 
1411 /**
1412  * wmi_unified_send_peer_assoc() - send peer assoc command to fw
1413  * @wma: wma handle
1414  * @nw_type: nw type
1415  * @params: add sta params
1416  *
1417  * This function send peer assoc command to firmware with
1418  * different parameters.
1419  *
1420  * Return: QDF_STATUS
1421  */
1422 QDF_STATUS wma_send_peer_assoc(tp_wma_handle wma,
1423 				    tSirNwType nw_type,
1424 				    tpAddStaParams params)
1425 {
1426 	struct peer_assoc_params *cmd;
1427 	int32_t ret, max_rates, i;
1428 	uint8_t *rate_pos;
1429 	wmi_rate_set peer_legacy_rates, peer_ht_rates;
1430 	uint32_t num_peer_11b_rates = 0;
1431 	uint32_t num_peer_11a_rates = 0;
1432 	enum wlan_phymode phymode, vdev_phymode;
1433 	uint32_t peer_nss = 1;
1434 	struct wma_txrx_node *intr = NULL;
1435 	bool is_he;
1436 	bool is_eht;
1437 	QDF_STATUS status;
1438 	struct mac_context *mac = wma->mac_context;
1439 	struct wlan_channel *des_chan;
1440 	int32_t keymgmt, uccipher, authmode;
1441 
1442 	cmd = qdf_mem_malloc(sizeof(struct peer_assoc_params));
1443 	if (!cmd) {
1444 		wma_err("Failed to allocate peer_assoc_params param");
1445 		return QDF_STATUS_E_NOMEM;
1446 	}
1447 
1448 	intr = &wma->interfaces[params->smesessionId];
1449 
1450 	wma_mask_tx_ht_rate(wma, params->supportedRates.supportedMCSSet);
1451 
1452 	qdf_mem_zero(&peer_legacy_rates, sizeof(wmi_rate_set));
1453 	qdf_mem_zero(&peer_ht_rates, sizeof(wmi_rate_set));
1454 	qdf_mem_zero(cmd, sizeof(struct peer_assoc_params));
1455 
1456 	is_he = wma_is_peer_he_capable(params);
1457 	is_eht = wma_is_peer_eht_capable(params);
1458 	if ((params->ch_width > CH_WIDTH_40MHZ) &&
1459 	    ((nw_type == eSIR_11G_NW_TYPE) ||
1460 	     (nw_type == eSIR_11B_NW_TYPE))) {
1461 		wma_err("ch_width %d sent in 11G, configure to 40MHz",
1462 			params->ch_width);
1463 		params->ch_width = CH_WIDTH_40MHZ;
1464 	}
1465 	phymode = wma_peer_phymode(nw_type, params->staType,
1466 				   params->htCapable, params->ch_width,
1467 				   params->vhtCapable, is_he, is_eht);
1468 
1469 	des_chan = wlan_vdev_mlme_get_des_chan(intr->vdev);
1470 	vdev_phymode = des_chan->ch_phymode;
1471 	if ((intr->type == WMI_VDEV_TYPE_AP) && (phymode > vdev_phymode)) {
1472 		wma_nofl_debug("Peer phymode %d is not allowed. Set it equal to sap/go phymode %d",
1473 			       phymode, vdev_phymode);
1474 		phymode = vdev_phymode;
1475 	}
1476 
1477 	if (!mac->mlme_cfg->rates.disable_abg_rate_txdata &&
1478 	    !WLAN_REG_IS_6GHZ_CHAN_FREQ(des_chan->ch_freq)) {
1479 		/* Legacy Rateset */
1480 		rate_pos = (uint8_t *) peer_legacy_rates.rates;
1481 		for (i = 0; i < SIR_NUM_11B_RATES; i++) {
1482 			if (!params->supportedRates.llbRates[i])
1483 				continue;
1484 			rate_pos[peer_legacy_rates.num_rates++] =
1485 				params->supportedRates.llbRates[i];
1486 			num_peer_11b_rates++;
1487 		}
1488 		for (i = 0; i < SIR_NUM_11A_RATES; i++) {
1489 			if (!params->supportedRates.llaRates[i])
1490 				continue;
1491 			rate_pos[peer_legacy_rates.num_rates++] =
1492 				params->supportedRates.llaRates[i];
1493 			num_peer_11a_rates++;
1494 		}
1495 	}
1496 
1497 	if ((phymode == WLAN_PHYMODE_11A && num_peer_11a_rates == 0) ||
1498 	    (phymode == WLAN_PHYMODE_11B && num_peer_11b_rates == 0)) {
1499 		wma_warn("Invalid phy rates. phymode 0x%x, 11b_rates %d, 11a_rates %d",
1500 			phymode, num_peer_11b_rates,
1501 			num_peer_11a_rates);
1502 		qdf_mem_free(cmd);
1503 		return QDF_STATUS_E_INVAL;
1504 	}
1505 
1506 	/* HT Rateset */
1507 	max_rates = sizeof(peer_ht_rates.rates) /
1508 		    sizeof(peer_ht_rates.rates[0]);
1509 	rate_pos = (uint8_t *) peer_ht_rates.rates;
1510 	for (i = 0; i < MAX_SUPPORTED_RATES; i++) {
1511 		if (params->supportedRates.supportedMCSSet[i / 8] &
1512 		    (1 << (i % 8))) {
1513 			rate_pos[peer_ht_rates.num_rates++] = i;
1514 			if (i >= 8) {
1515 				/* MCS8 or higher rate is present, must be 2x2 */
1516 				peer_nss = 2;
1517 			}
1518 		}
1519 		if (peer_ht_rates.num_rates == max_rates)
1520 			break;
1521 	}
1522 
1523 	if (params->htCapable && !peer_ht_rates.num_rates) {
1524 		uint8_t temp_ni_rates[8] = { 0x0, 0x1, 0x2, 0x3,
1525 					     0x4, 0x5, 0x6, 0x7};
1526 		/*
1527 		 * Workaround for EV 116382: The peer is marked HT but with
1528 		 * supported rx mcs set is set to 0. 11n spec mandates MCS0-7
1529 		 * for a HT STA. So forcing the supported rx mcs rate to
1530 		 * MCS 0-7. This workaround will be removed once we get
1531 		 * clarification from WFA regarding this STA behavior.
1532 		 */
1533 
1534 		/* TODO: Do we really need this? */
1535 		wma_warn("Peer is marked as HT capable but supported mcs rate is 0");
1536 		peer_ht_rates.num_rates = sizeof(temp_ni_rates);
1537 		qdf_mem_copy((uint8_t *) peer_ht_rates.rates, temp_ni_rates,
1538 			     peer_ht_rates.num_rates);
1539 	}
1540 
1541 	/* in ap mode and for tdls peer, use mac address of the peer in
1542 	 * the other end as the new peer address; in sta mode, use bss id to
1543 	 * be the new peer address
1544 	 */
1545 	if ((wma_is_vdev_in_ap_mode(wma, params->smesessionId))
1546 #ifdef FEATURE_WLAN_TDLS
1547 	    || (STA_ENTRY_TDLS_PEER == params->staType)
1548 #endif /* FEATURE_WLAN_TDLS */
1549 	    ) {
1550 		qdf_mem_copy(cmd->peer_mac, params->staMac,
1551 						sizeof(cmd->peer_mac));
1552 	} else {
1553 		qdf_mem_copy(cmd->peer_mac, params->bssId,
1554 						sizeof(cmd->peer_mac));
1555 	}
1556 	wma_objmgr_set_peer_mlme_phymode(wma, cmd->peer_mac, phymode);
1557 
1558 	cmd->vdev_id = params->smesessionId;
1559 	cmd->peer_new_assoc = 1;
1560 	cmd->peer_associd = params->assocId;
1561 
1562 	cmd->is_wme_set = 1;
1563 
1564 	if (params->wmmEnabled)
1565 		cmd->qos_flag = 1;
1566 
1567 	if (params->uAPSD) {
1568 		cmd->apsd_flag = 1;
1569 		wma_nofl_debug("Set WMI_PEER_APSD: uapsd Mask %d",
1570 			       params->uAPSD);
1571 	}
1572 
1573 	if (params->htCapable) {
1574 		cmd->ht_flag = 1;
1575 		cmd->qos_flag = 1;
1576 		cmd->peer_rate_caps |= WMI_RC_HT_FLAG;
1577 	}
1578 
1579 	if (params->vhtCapable) {
1580 		cmd->ht_flag = 1;
1581 		cmd->qos_flag = 1;
1582 		cmd->vht_flag = 1;
1583 		cmd->peer_rate_caps |= WMI_RC_HT_FLAG;
1584 	}
1585 
1586 	if (params->ch_width) {
1587 		cmd->peer_rate_caps |= WMI_RC_CW40_FLAG;
1588 		if (params->fShortGI40Mhz)
1589 			cmd->peer_rate_caps |= WMI_RC_SGI_FLAG;
1590 	} else if (params->fShortGI20Mhz) {
1591 		cmd->peer_rate_caps |= WMI_RC_SGI_FLAG;
1592 	}
1593 
1594 	switch (params->ch_width) {
1595 	case CH_WIDTH_320MHZ:
1596 		wma_set_peer_assoc_params_bw_320(cmd, params->ch_width);
1597 		fallthrough;
1598 	case CH_WIDTH_80P80MHZ:
1599 	case CH_WIDTH_160MHZ:
1600 		cmd->bw_160 = 1;
1601 		fallthrough;
1602 	case CH_WIDTH_80MHZ:
1603 		cmd->bw_80 = 1;
1604 		fallthrough;
1605 	case CH_WIDTH_40MHZ:
1606 		cmd->bw_40 = 1;
1607 		fallthrough;
1608 	default:
1609 		break;
1610 	}
1611 
1612 	cmd->peer_vht_caps = params->vht_caps;
1613 	if (params->p2pCapableSta) {
1614 		cmd->p2p_capable_sta = 1;
1615 		wma_objmgr_set_peer_mlme_type(wma, params->staMac,
1616 					      WLAN_PEER_P2P_CLI);
1617 	}
1618 
1619 	if (params->rmfEnabled)
1620 		cmd->is_pmf_enabled = 1;
1621 
1622 	if (params->stbc_capable)
1623 		cmd->stbc_flag = 1;
1624 
1625 	if (params->htLdpcCapable || params->vhtLdpcCapable)
1626 		cmd->ldpc_flag = 1;
1627 
1628 	switch (params->mimoPS) {
1629 	case eSIR_HT_MIMO_PS_STATIC:
1630 		cmd->static_mimops_flag = 1;
1631 		break;
1632 	case eSIR_HT_MIMO_PS_DYNAMIC:
1633 		cmd->dynamic_mimops_flag = 1;
1634 		break;
1635 	case eSIR_HT_MIMO_PS_NO_LIMIT:
1636 		cmd->spatial_mux_flag = 1;
1637 		break;
1638 	default:
1639 		break;
1640 	}
1641 
1642 	wma_set_twt_peer_caps(params, cmd);
1643 #ifdef FEATURE_WLAN_TDLS
1644 	if (STA_ENTRY_TDLS_PEER == params->staType)
1645 		cmd->auth_flag = 1;
1646 #endif /* FEATURE_WLAN_TDLS */
1647 
1648 	if (params->wpa_rsn
1649 #ifdef FEATURE_WLAN_WAPI
1650 	    || params->encryptType == eSIR_ED_WPI
1651 #endif /* FEATURE_WLAN_WAPI */
1652 	    ) {
1653 		if (!params->no_ptk_4_way) {
1654 			cmd->need_ptk_4_way = 1;
1655 			wlan_acquire_peer_key_wakelock(wma->pdev,
1656 						       cmd->peer_mac);
1657 		}
1658 	}
1659 
1660 	if (params->wpa_rsn >> 1)
1661 		cmd->need_gtk_2_way = 1;
1662 
1663 #ifdef FEATURE_WLAN_WAPI
1664 	if (params->encryptType == eSIR_ED_WPI) {
1665 		ret = wma_vdev_set_param(wma->wmi_handle, params->smesessionId,
1666 				      wmi_vdev_param_drop_unencry, false);
1667 		if (ret) {
1668 			wma_err("Set wmi_vdev_param_drop_unencry Param status:%d",
1669 				ret);
1670 			qdf_mem_free(cmd);
1671 			return ret;
1672 		}
1673 	}
1674 #endif /* FEATURE_WLAN_WAPI */
1675 
1676 	cmd->peer_caps = params->capab_info;
1677 	cmd->peer_listen_intval = params->listenInterval;
1678 	cmd->peer_ht_caps = params->ht_caps;
1679 	cmd->peer_max_mpdu = (1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
1680 				    params->maxAmpduSize)) - 1;
1681 	cmd->peer_mpdu_density = wma_parse_mpdudensity(params->maxAmpduDensity);
1682 
1683 	if (params->supportedRates.supportedMCSSet[1] &&
1684 	    params->supportedRates.supportedMCSSet[2])
1685 		cmd->peer_rate_caps |= WMI_RC_TS_FLAG;
1686 	else if (params->supportedRates.supportedMCSSet[1])
1687 		cmd->peer_rate_caps |= WMI_RC_DS_FLAG;
1688 
1689 	/* Update peer legacy rate information */
1690 	cmd->peer_legacy_rates.num_rates = peer_legacy_rates.num_rates;
1691 	qdf_mem_copy(cmd->peer_legacy_rates.rates, peer_legacy_rates.rates,
1692 		     peer_legacy_rates.num_rates);
1693 
1694 	/* Update peer HT rate information */
1695 	cmd->peer_ht_rates.num_rates = peer_ht_rates.num_rates;
1696 	qdf_mem_copy(cmd->peer_ht_rates.rates, peer_ht_rates.rates,
1697 				 peer_ht_rates.num_rates);
1698 
1699 	/* VHT Rates */
1700 
1701 	cmd->peer_nss = peer_nss;
1702 	/*
1703 	 * Because of DBS a vdev may come up in any of the two MACs with
1704 	 * different capabilities. STBC capab should be fetched for given
1705 	 * hard_mode->MAC_id combo. It is planned that firmware should provide
1706 	 * these dev capabilities. But for now number of tx streams can be used
1707 	 * to identify if Tx STBC needs to be disabled.
1708 	 */
1709 	if (intr->tx_streams < 2) {
1710 		cmd->peer_vht_caps &= ~(1 << SIR_MAC_VHT_CAP_TXSTBC);
1711 		wma_nofl_debug("Num tx_streams: %d, Disabled txSTBC",
1712 			       intr->tx_streams);
1713 	}
1714 
1715 	cmd->vht_capable = params->vhtCapable;
1716 	if (params->vhtCapable) {
1717 #define VHT2x2MCSMASK 0xc
1718 		cmd->rx_max_rate = params->supportedRates.vhtRxHighestDataRate;
1719 		cmd->rx_mcs_set = params->supportedRates.vhtRxMCSMap;
1720 		cmd->tx_max_rate = params->supportedRates.vhtTxHighestDataRate;
1721 		cmd->tx_mcs_set = params->supportedRates.vhtTxMCSMap;
1722 		/*
1723 		 *  tx_mcs_set is intersection of self tx NSS and peer rx mcs map
1724 		 */
1725 		if (params->vhtSupportedRxNss)
1726 			cmd->peer_nss = params->vhtSupportedRxNss;
1727 		else
1728 			cmd->peer_nss = ((cmd->tx_mcs_set & VHT2x2MCSMASK)
1729 					== VHT2x2MCSMASK) ? 1 : 2;
1730 
1731 		if (params->vht_mcs_10_11_supp) {
1732 			WMI_SET_BITS(cmd->tx_mcs_set, 16, cmd->peer_nss,
1733 				     ((1 << cmd->peer_nss) - 1));
1734 			WMI_VHT_MCS_NOTIFY_EXT_SS_SET(cmd->tx_mcs_set, 1);
1735 		}
1736 		if (params->vht_extended_nss_bw_cap &&
1737 		    (params->vht_160mhz_nss || params->vht_80p80mhz_nss)) {
1738 			/*
1739 			 * bit[2:0] : Represents value of Rx NSS for 160 MHz
1740 			 * bit[5:3] : Represents value of Rx NSS for 80_80 MHz
1741 			 *             Extended NSS support
1742 			 * bit[30:6]: Reserved
1743 			 * bit[31]  : MSB(0/1): 1 in case of valid data
1744 			 */
1745 			cmd->peer_bw_rxnss_override |= (1 << 31);
1746 			if (params->vht_160mhz_nss)
1747 				cmd->peer_bw_rxnss_override |=
1748 					(params->vht_160mhz_nss - 1);
1749 			if (params->vht_80p80mhz_nss)
1750 				cmd->peer_bw_rxnss_override |=
1751 					((params->vht_80p80mhz_nss - 1) << 3);
1752 			wma_debug("peer_bw_rxnss_override %0X",
1753 				  cmd->peer_bw_rxnss_override);
1754 		}
1755 	}
1756 
1757 	wma_set_mlo_capability(wma, intr->vdev, params, cmd);
1758 
1759 	wma_set_mlo_assoc_vdev(intr->vdev, cmd);
1760 
1761 	wma_debug("rx_max_rate %d, rx_mcs %x, tx_max_rate %d, tx_mcs: %x num rates %d need 4 way %d",
1762 		  cmd->rx_max_rate, cmd->rx_mcs_set, cmd->tx_max_rate,
1763 		  cmd->tx_mcs_set, peer_ht_rates.num_rates,
1764 		  cmd->need_ptk_4_way);
1765 
1766 	/*
1767 	 * Limit nss to max number of rf chain supported by target
1768 	 * Otherwise Fw will crash
1769 	 */
1770 	if (cmd->peer_nss > WMA_MAX_NSS) {
1771 		wma_err("peer Nss %d is more than supported", cmd->peer_nss);
1772 		cmd->peer_nss = WMA_MAX_NSS;
1773 	}
1774 
1775 	wma_populate_peer_he_cap(cmd, params);
1776 	wma_populate_peer_eht_cap(cmd, params);
1777 	wma_populate_peer_puncture(cmd, des_chan);
1778 	wma_populate_peer_mlo_cap(cmd, params);
1779 	if (!wma_is_vdev_in_ap_mode(wma, params->smesessionId))
1780 		intr->nss = cmd->peer_nss;
1781 	wma_objmgr_set_peer_mlme_nss(wma, cmd->peer_mac, cmd->peer_nss);
1782 
1783 	/* Till conversion is not done in WMI we need to fill fw phy mode */
1784 	cmd->peer_phymode = wmi_host_to_fw_phymode(phymode);
1785 
1786 	keymgmt = wlan_crypto_get_param(intr->vdev, WLAN_CRYPTO_PARAM_KEY_MGMT);
1787 	authmode = wlan_crypto_get_param(intr->vdev,
1788 					 WLAN_CRYPTO_PARAM_AUTH_MODE);
1789 	uccipher = wlan_crypto_get_param(intr->vdev,
1790 					 WLAN_CRYPTO_PARAM_UCAST_CIPHER);
1791 
1792 	cmd->akm = cm_crypto_authmode_to_wmi_authmode(authmode,
1793 						      keymgmt,
1794 						      uccipher);
1795 
1796 	status = wmi_unified_peer_assoc_send(wma->wmi_handle,
1797 					 cmd);
1798 	if (QDF_IS_STATUS_ERROR(status))
1799 		wma_alert("Failed to send peer assoc command status = %d",
1800 			 status);
1801 	qdf_mem_free(cmd);
1802 
1803 	return status;
1804 }
1805 
1806 /**
1807  * wmi_unified_vdev_set_gtx_cfg_send() - set GTX params
1808  * @wmi_handle: wmi handle
1809  * @if_id: vdev id
1810  * @gtx_info: GTX config params
1811  *
1812  * This function set GTX related params in firmware.
1813  *
1814  * Return: 0 for success or error code
1815  */
1816 QDF_STATUS wmi_unified_vdev_set_gtx_cfg_send(wmi_unified_t wmi_handle,
1817 				  uint32_t if_id,
1818 				  gtx_config_t *gtx_info)
1819 {
1820 	struct wmi_gtx_config params;
1821 
1822 	params.gtx_rt_mask[0] = gtx_info->gtxRTMask[0];
1823 	params.gtx_rt_mask[1] = gtx_info->gtxRTMask[1];
1824 	params.gtx_usrcfg = gtx_info->gtxUsrcfg;
1825 	params.gtx_threshold = gtx_info->gtxPERThreshold;
1826 	params.gtx_margin = gtx_info->gtxPERMargin;
1827 	params.gtx_tpcstep = gtx_info->gtxTPCstep;
1828 	params.gtx_tpcmin = gtx_info->gtxTPCMin;
1829 	params.gtx_bwmask = gtx_info->gtxBWMask;
1830 
1831 	return wmi_unified_vdev_set_gtx_cfg_cmd(wmi_handle,
1832 						if_id, &params);
1833 
1834 }
1835 
1836 /**
1837  * wma_update_protection_mode() - update protection mode
1838  * @wma: wma handle
1839  * @vdev_id: vdev id
1840  * @llbcoexist: protection mode info
1841  *
1842  * This function set protection mode(RTS/CTS) to fw for passed vdev id.
1843  *
1844  * Return: none
1845  */
1846 void wma_update_protection_mode(tp_wma_handle wma, uint8_t vdev_id,
1847 			   uint8_t llbcoexist)
1848 {
1849 	QDF_STATUS ret;
1850 	enum ieee80211_protmode prot_mode;
1851 
1852 	prot_mode = llbcoexist ? IEEE80211_PROT_CTSONLY : IEEE80211_PROT_NONE;
1853 
1854 	ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
1855 					      wmi_vdev_param_protection_mode,
1856 					      prot_mode);
1857 
1858 	if (QDF_IS_STATUS_ERROR(ret))
1859 		wma_err("Failed to send wmi protection mode cmd");
1860 	else
1861 		wma_nofl_debug("Updated protection mode %d to target",
1862 			       prot_mode);
1863 }
1864 
1865 void
1866 wma_update_beacon_interval(tp_wma_handle wma, uint8_t vdev_id,
1867 			   uint16_t beaconInterval)
1868 {
1869 	QDF_STATUS ret;
1870 
1871 	ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
1872 					      wmi_vdev_param_beacon_interval,
1873 					      beaconInterval);
1874 
1875 	if (QDF_IS_STATUS_ERROR(ret))
1876 		wma_err("Failed to update beacon interval");
1877 	else
1878 		wma_info("Updated beacon interval %d for vdev %d",
1879 			 beaconInterval, vdev_id);
1880 }
1881 
1882 #ifdef WLAN_FEATURE_11AX_BSS_COLOR
1883 /**
1884  * wma_update_bss_color() - update beacon bss color in fw
1885  * @wma: wma handle
1886  * @vdev_id: vdev id
1887  * @he_ops: HE operation, only the bss_color and bss_color_disabled fields
1888  * are updated.
1889  *
1890  * Return: none
1891  */
1892 static void
1893 wma_update_bss_color(tp_wma_handle wma, uint8_t vdev_id,
1894 		     tUpdateBeaconParams *bcn_params)
1895 {
1896 	QDF_STATUS ret;
1897 	uint32_t dword_he_ops = 0;
1898 
1899 	WMI_HEOPS_COLOR_SET(dword_he_ops, bcn_params->bss_color);
1900 	WMI_HEOPS_BSSCOLORDISABLE_SET(dword_he_ops,
1901 				bcn_params->bss_color_disabled);
1902 	wma_nofl_debug("vdev: %d, update bss color, HE_OPS: 0x%x",
1903 		       vdev_id, dword_he_ops);
1904 	ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
1905 			      wmi_vdev_param_he_bss_color, dword_he_ops);
1906 	if (QDF_IS_STATUS_ERROR(ret))
1907 		wma_err("Failed to update HE operations");
1908 }
1909 #else
1910 static void wma_update_bss_color(tp_wma_handle wma, uint8_t vdev_id,
1911 			   tUpdateBeaconParams *bcn_params)
1912 {
1913 }
1914 #endif
1915 
1916 /**
1917  * wma_process_update_beacon_params() - update beacon parameters to target
1918  * @wma: wma handle
1919  * @bcn_params: beacon parameters
1920  *
1921  * Return: none
1922  */
1923 void
1924 wma_process_update_beacon_params(tp_wma_handle wma,
1925 				 tUpdateBeaconParams *bcn_params)
1926 {
1927 	if (!bcn_params) {
1928 		wma_err("bcn_params NULL");
1929 		return;
1930 	}
1931 
1932 	if (bcn_params->vdev_id >= wma->max_bssid) {
1933 		wma_err("Invalid vdev id %d", bcn_params->vdev_id);
1934 		return;
1935 	}
1936 
1937 	if (bcn_params->paramChangeBitmap & PARAM_BCN_INTERVAL_CHANGED) {
1938 		wma_update_beacon_interval(wma, bcn_params->vdev_id,
1939 					   bcn_params->beaconInterval);
1940 	}
1941 
1942 	if (bcn_params->paramChangeBitmap & PARAM_llBCOEXIST_CHANGED)
1943 		wma_update_protection_mode(wma, bcn_params->vdev_id,
1944 					   bcn_params->llbCoexist);
1945 
1946 	if (bcn_params->paramChangeBitmap & PARAM_BSS_COLOR_CHANGED)
1947 		wma_update_bss_color(wma, bcn_params->vdev_id,
1948 				     bcn_params);
1949 }
1950 
1951 void wma_update_rts_params(tp_wma_handle wma, uint32_t value)
1952 {
1953 	uint8_t vdev_id;
1954 	QDF_STATUS ret;
1955 	struct wlan_objmgr_vdev *vdev;
1956 
1957 	for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) {
1958 		vdev = wma->interfaces[vdev_id].vdev;
1959 		if (!vdev)
1960 			continue;
1961 		ret = wma_vdev_set_param(wma->wmi_handle,
1962 					 vdev_id,
1963 					 wmi_vdev_param_rts_threshold,
1964 					 value);
1965 		if (QDF_IS_STATUS_ERROR(ret))
1966 			wma_err("Update cfg param fail for vdevId %d", vdev_id);
1967 	}
1968 }
1969 
1970 void wma_update_frag_params(tp_wma_handle wma, uint32_t value)
1971 {
1972 	uint8_t vdev_id;
1973 	QDF_STATUS ret;
1974 	struct wlan_objmgr_vdev *vdev;
1975 
1976 	for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) {
1977 		vdev = wma->interfaces[vdev_id].vdev;
1978 		if (!vdev)
1979 			continue;
1980 		ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
1981 					 wmi_vdev_param_fragmentation_threshold,
1982 					 value);
1983 		if (QDF_IS_STATUS_ERROR(ret))
1984 			wma_err("Update cfg params failed for vdevId %d",
1985 				 vdev_id);
1986 	}
1987 }
1988 
1989 /**
1990  * wma_process_update_edca_param_req() - update EDCA params
1991  * @handle: wma handle
1992  * @edca_params: edca parameters
1993  *
1994  * This function updates EDCA parameters to the target
1995  *
1996  * Return: QDF Status
1997  */
1998 QDF_STATUS wma_process_update_edca_param_req(WMA_HANDLE handle,
1999 					     tEdcaParams *edca_params)
2000 {
2001 	tp_wma_handle wma_handle = (tp_wma_handle) handle;
2002 	struct wmi_host_wme_vparams wmm_param[QCA_WLAN_AC_ALL];
2003 	tSirMacEdcaParamRecord *edca_record;
2004 	int ac;
2005 	struct ol_tx_wmm_param_t ol_tx_wmm_param;
2006 	uint8_t vdev_id;
2007 	QDF_STATUS status;
2008 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
2009 	uint8_t *debug_str;
2010 	uint32_t len = 0;
2011 
2012 	vdev_id = edca_params->vdev_id;
2013 	if (!wma_is_vdev_valid(vdev_id)) {
2014 		wma_err("vdev id:%d is not active ", vdev_id);
2015 		goto fail;
2016 	}
2017 
2018 	debug_str = qdf_mem_malloc(WMA_WMM_DEBUG_STRING_SIZE);
2019 	if (!debug_str)
2020 		goto fail;
2021 
2022 	for (ac = 0; ac < QCA_WLAN_AC_ALL; ac++) {
2023 		switch (ac) {
2024 		case QCA_WLAN_AC_BE:
2025 			edca_record = &edca_params->acbe;
2026 			break;
2027 		case QCA_WLAN_AC_BK:
2028 			edca_record = &edca_params->acbk;
2029 			break;
2030 		case QCA_WLAN_AC_VI:
2031 			edca_record = &edca_params->acvi;
2032 			break;
2033 		case QCA_WLAN_AC_VO:
2034 			edca_record = &edca_params->acvo;
2035 			break;
2036 		default:
2037 			qdf_mem_free(debug_str);
2038 			goto fail;
2039 		}
2040 
2041 		wma_update_edca_params_for_ac(edca_record, &wmm_param[ac], ac,
2042 					      edca_params->mu_edca_params,
2043 					      debug_str,
2044 					      WMA_WMM_DEBUG_STRING_SIZE, &len);
2045 
2046 		ol_tx_wmm_param.ac[ac].aifs = wmm_param[ac].aifs;
2047 		ol_tx_wmm_param.ac[ac].cwmin = wmm_param[ac].cwmin;
2048 		ol_tx_wmm_param.ac[ac].cwmax = wmm_param[ac].cwmax;
2049 	}
2050 
2051 	wma_nofl_debug("WMM params: %s", debug_str);
2052 	qdf_mem_free(debug_str);
2053 
2054 	status = wmi_unified_process_update_edca_param(wma_handle->wmi_handle,
2055 						vdev_id,
2056 						edca_params->mu_edca_params,
2057 						wmm_param);
2058 	if (status == QDF_STATUS_E_NOMEM)
2059 		return status;
2060 	else if (status == QDF_STATUS_E_FAILURE)
2061 		goto fail;
2062 
2063 	cdp_set_wmm_param(soc, WMI_PDEV_ID_SOC, ol_tx_wmm_param);
2064 
2065 	return QDF_STATUS_SUCCESS;
2066 
2067 fail:
2068 	wma_err("Failed to set WMM Parameters");
2069 	return QDF_STATUS_E_FAILURE;
2070 }
2071 
2072 /**
2073  * wmi_unified_probe_rsp_tmpl_send() - send probe response template to fw
2074  * @wma: wma handle
2075  * @vdev_id: vdev id
2076  * @probe_rsp_info: probe response info
2077  *
2078  * Return: 0 for success or error code
2079  */
2080 static int wmi_unified_probe_rsp_tmpl_send(tp_wma_handle wma,
2081 				   uint8_t vdev_id,
2082 				   tpSendProbeRespParams probe_rsp_info)
2083 {
2084 	uint64_t adjusted_tsf_le;
2085 	struct ieee80211_frame *wh;
2086 	struct wmi_probe_resp_params params;
2087 
2088 	wma_debug("Send probe response template for vdev %d", vdev_id);
2089 
2090 	/*
2091 	 * Make the TSF offset negative so probe response in the same
2092 	 * staggered batch have the same TSF.
2093 	 */
2094 	adjusted_tsf_le = cpu_to_le64(0ULL -
2095 				      wma->interfaces[vdev_id].tsfadjust);
2096 	/* Update the timstamp in the probe response buffer with adjusted TSF */
2097 	wh = (struct ieee80211_frame *)probe_rsp_info->probeRespTemplate;
2098 	A_MEMCPY(&wh[1], &adjusted_tsf_le, sizeof(adjusted_tsf_le));
2099 
2100 	params.prb_rsp_template_len = probe_rsp_info->probeRespTemplateLen;
2101 	params.prb_rsp_template_frm = probe_rsp_info->probeRespTemplate;
2102 
2103 	return wmi_unified_probe_rsp_tmpl_send_cmd(wma->wmi_handle, vdev_id,
2104 						   &params);
2105 }
2106 
2107 #ifdef WLAN_FEATURE_11BE_MLO
2108 /**
2109  * wma_upt_mlo_partner_info() - update mlo info in beacon template
2110  * @params: beacon template params
2111  * @bcn_param: beacon params
2112  * @bytes_to_strip: bytes to strip
2113  *
2114  * Return: Void
2115  */
2116 static void wma_upt_mlo_partner_info(struct beacon_tmpl_params *params,
2117 				     const tpSendbeaconParams bcn_param,
2118 				     uint8_t bytes_to_strip)
2119 {
2120 	struct ml_bcn_partner_info *bcn_info;
2121 	struct ml_bcn_partner_info *info;
2122 	int link;
2123 
2124 	params->mlo_partner.num_links = bcn_param->mlo_partner.num_links;
2125 	for (link = 0; link < params->mlo_partner.num_links; link++) {
2126 		bcn_info = &bcn_param->mlo_partner.partner_info[link];
2127 		info = &params->mlo_partner.partner_info[link];
2128 		info->vdev_id = bcn_info->vdev_id;
2129 		info->beacon_interval = bcn_info->beacon_interval;
2130 		if (bcn_info->csa_switch_count_offset &&
2131 		    bcn_info->csa_switch_count_offset > bytes_to_strip)
2132 			info->csa_switch_count_offset =
2133 				bcn_info->csa_switch_count_offset -
2134 					bytes_to_strip;
2135 		if (bcn_info->ext_csa_switch_count_offset &&
2136 		    bcn_info->ext_csa_switch_count_offset > bytes_to_strip)
2137 			info->ext_csa_switch_count_offset =
2138 				bcn_info->ext_csa_switch_count_offset -
2139 					bytes_to_strip;
2140 	}
2141 }
2142 #else
2143 static void wma_upt_mlo_partner_info(struct beacon_tmpl_params *params,
2144 				     const tpSendbeaconParams bcn_param,
2145 				     uint8_t bytes_to_strip)
2146 {
2147 }
2148 #endif
2149 
2150 /**
2151  * wma_unified_bcn_tmpl_send() - send beacon template to fw
2152  * @wma:wma handle
2153  * @vdev_id: vdev id
2154  * @bcn_info: beacon info
2155  * @bytes_to_strip: bytes to strip
2156  *
2157  * Return: QDF_STATUS_SUCCESS for success or error code
2158  */
2159 static QDF_STATUS wma_unified_bcn_tmpl_send(tp_wma_handle wma,
2160 				     uint8_t vdev_id,
2161 				     const tpSendbeaconParams bcn_info,
2162 				     uint8_t bytes_to_strip)
2163 {
2164 	struct beacon_tmpl_params params = {0};
2165 	uint32_t tmpl_len, tmpl_len_aligned;
2166 	uint8_t *frm;
2167 	QDF_STATUS ret;
2168 	uint8_t *p2p_ie;
2169 	uint16_t p2p_ie_len = 0;
2170 	uint64_t adjusted_tsf_le;
2171 	struct ieee80211_frame *wh;
2172 
2173 	if (!wma_is_vdev_valid(vdev_id)) {
2174 		wma_err("vdev id:%d is not active ", vdev_id);
2175 		return QDF_STATUS_E_INVAL;
2176 	}
2177 
2178 	wma_nofl_debug("Send beacon template for vdev %d", vdev_id);
2179 
2180 	if (bcn_info->p2pIeOffset) {
2181 		p2p_ie = bcn_info->beacon + bcn_info->p2pIeOffset;
2182 		p2p_ie_len = (uint16_t) p2p_ie[1] + 2;
2183 	}
2184 
2185 	/*
2186 	 * XXX: The first byte of beacon buffer contains beacon length
2187 	 * only when UMAC in sending the beacon template. In othercases
2188 	 * (ex: from tbtt update) beacon length is read from beacon
2189 	 * information.
2190 	 */
2191 	if (bytes_to_strip)
2192 		tmpl_len = *(uint32_t *) &bcn_info->beacon[0];
2193 	else
2194 		tmpl_len = bcn_info->beaconLength;
2195 
2196 	if (tmpl_len > WMI_BEACON_TX_BUFFER_SIZE) {
2197 		wma_err("tmpl_len: %d > %d. Invalid tmpl len", tmpl_len,
2198 			WMI_BEACON_TX_BUFFER_SIZE);
2199 		return -EINVAL;
2200 	}
2201 
2202 	if (p2p_ie_len) {
2203 		if (tmpl_len <= p2p_ie_len) {
2204 			wma_err("tmpl_len %d <= p2p_ie_len %d, Invalid",
2205 				tmpl_len, p2p_ie_len);
2206 			return -EINVAL;
2207 		}
2208 		tmpl_len -= (uint32_t) p2p_ie_len;
2209 	}
2210 
2211 	frm = bcn_info->beacon + bytes_to_strip;
2212 	tmpl_len_aligned = roundup(tmpl_len, sizeof(A_UINT32));
2213 	/*
2214 	 * Make the TSF offset negative so beacons in the same
2215 	 * staggered batch have the same TSF.
2216 	 */
2217 	adjusted_tsf_le = cpu_to_le64(0ULL -
2218 				      wma->interfaces[vdev_id].tsfadjust);
2219 	/* Update the timstamp in the beacon buffer with adjusted TSF */
2220 	wh = (struct ieee80211_frame *)frm;
2221 	A_MEMCPY(&wh[1], &adjusted_tsf_le, sizeof(adjusted_tsf_le));
2222 
2223 
2224 
2225 	params.vdev_id = vdev_id;
2226 	params.tim_ie_offset = bcn_info->timIeOffset - bytes_to_strip;
2227 	params.tmpl_len = tmpl_len;
2228 	params.frm = frm;
2229 	params.tmpl_len_aligned = tmpl_len_aligned;
2230 	params.enable_bigtk =
2231 		mlme_get_bigtk_support(wma->interfaces[vdev_id].vdev);
2232 	if (bcn_info->csa_count_offset &&
2233 	    (bcn_info->csa_count_offset > bytes_to_strip))
2234 		params.csa_switch_count_offset =
2235 			bcn_info->csa_count_offset - bytes_to_strip;
2236 	if (bcn_info->ecsa_count_offset &&
2237 	    (bcn_info->ecsa_count_offset > bytes_to_strip))
2238 		params.ext_csa_switch_count_offset =
2239 			bcn_info->ecsa_count_offset - bytes_to_strip;
2240 
2241 	wma_upt_mlo_partner_info(&params, bcn_info, bytes_to_strip);
2242 
2243 	ret = wmi_unified_beacon_tmpl_send_cmd(wma->wmi_handle,
2244 				 &params);
2245 	if (QDF_IS_STATUS_ERROR(ret))
2246 		wma_err("Failed to send bcn tmpl: %d", ret);
2247 
2248 	return ret;
2249 }
2250 
2251 /**
2252  * wma_store_bcn_tmpl() - store beacon template
2253  * @wma: wma handle
2254  * @vdev_id: vdev id
2255  * @bcn_info: beacon params
2256  *
2257  * This function stores beacon template locally.
2258  * This will send to target on the reception of
2259  * SWBA event.
2260  *
2261  * Return: QDF status
2262  */
2263 static QDF_STATUS wma_store_bcn_tmpl(tp_wma_handle wma, uint8_t vdev_id,
2264 				     tpSendbeaconParams bcn_info)
2265 {
2266 	struct beacon_info *bcn;
2267 	uint32_t len;
2268 	uint8_t *bcn_payload;
2269 	struct beacon_tim_ie *tim_ie;
2270 
2271 	bcn = wma->interfaces[vdev_id].beacon;
2272 	if (!bcn || !bcn->buf) {
2273 		wma_err("Memory is not allocated to hold bcn template");
2274 		return QDF_STATUS_E_INVAL;
2275 	}
2276 
2277 	len = *(uint32_t *) &bcn_info->beacon[0];
2278 	if (len > SIR_MAX_BEACON_SIZE - sizeof(uint32_t)) {
2279 		wma_err("Received beacon len %u exceeding max limit %lu",
2280 			len, (unsigned long)(
2281 			 SIR_MAX_BEACON_SIZE - sizeof(uint32_t)));
2282 		return QDF_STATUS_E_INVAL;
2283 	}
2284 	qdf_spin_lock_bh(&bcn->lock);
2285 
2286 	/*
2287 	 * Copy received beacon template content in local buffer.
2288 	 * this will be send to target on the reception of SWBA
2289 	 * event from target.
2290 	 */
2291 	qdf_nbuf_trim_tail(bcn->buf, qdf_nbuf_len(bcn->buf));
2292 	memcpy(qdf_nbuf_data(bcn->buf),
2293 	       bcn_info->beacon + 4 /* Exclude beacon length field */,
2294 	       len);
2295 	if (bcn_info->timIeOffset > 3)
2296 		bcn->tim_ie_offset = bcn_info->timIeOffset - 4;
2297 	else
2298 		bcn->tim_ie_offset = bcn_info->timIeOffset;
2299 
2300 	if (bcn_info->p2pIeOffset > 3)
2301 		bcn->p2p_ie_offset = bcn_info->p2pIeOffset - 4;
2302 	else
2303 		bcn->p2p_ie_offset = bcn_info->p2pIeOffset;
2304 
2305 	if (bcn_info->csa_count_offset > 3)
2306 		bcn->csa_count_offset = bcn_info->csa_count_offset - 4;
2307 	else
2308 		bcn->csa_count_offset = bcn_info->csa_count_offset;
2309 
2310 	if (bcn_info->ecsa_count_offset > 3)
2311 		bcn->ecsa_count_offset = bcn_info->ecsa_count_offset - 4;
2312 	else
2313 		bcn->ecsa_count_offset = bcn_info->ecsa_count_offset;
2314 
2315 	bcn_payload = qdf_nbuf_data(bcn->buf);
2316 	if (bcn->tim_ie_offset) {
2317 		tim_ie = (struct beacon_tim_ie *)
2318 				(&bcn_payload[bcn->tim_ie_offset]);
2319 		/*
2320 		 * Initial Value of bcn->dtim_count will be 0.
2321 		 * But if the beacon gets updated then current dtim
2322 		 * count will be restored
2323 		 */
2324 		tim_ie->dtim_count = bcn->dtim_count;
2325 		tim_ie->tim_bitctl = 0;
2326 	}
2327 
2328 	qdf_nbuf_put_tail(bcn->buf, len);
2329 	bcn->len = len;
2330 
2331 	qdf_spin_unlock_bh(&bcn->lock);
2332 
2333 	return QDF_STATUS_SUCCESS;
2334 }
2335 
2336 int wma_tbttoffset_update_event_handler(void *handle, uint8_t *event,
2337 					       uint32_t len)
2338 {
2339 	tp_wma_handle wma = (tp_wma_handle) handle;
2340 	WMI_TBTTOFFSET_UPDATE_EVENTID_param_tlvs *param_buf;
2341 	wmi_tbtt_offset_event_fixed_param *tbtt_offset_event;
2342 	struct wma_txrx_node *intf;
2343 	struct beacon_info *bcn;
2344 	tSendbeaconParams bcn_info;
2345 	uint32_t *adjusted_tsf = NULL;
2346 	uint32_t if_id = 0, vdev_map;
2347 
2348 	if (wma_validate_handle(wma))
2349 		return -EINVAL;
2350 
2351 	param_buf = (WMI_TBTTOFFSET_UPDATE_EVENTID_param_tlvs *) event;
2352 	if (!param_buf) {
2353 		wma_err("Invalid tbtt update event buffer");
2354 		return -EINVAL;
2355 	}
2356 
2357 	tbtt_offset_event = param_buf->fixed_param;
2358 	intf = wma->interfaces;
2359 	vdev_map = tbtt_offset_event->vdev_map;
2360 	adjusted_tsf = param_buf->tbttoffset_list;
2361 	if (!adjusted_tsf) {
2362 		wma_err("Invalid adjusted_tsf");
2363 		return -EINVAL;
2364 	}
2365 
2366 	for (; (if_id < wma->max_bssid && vdev_map); vdev_map >>= 1, if_id++) {
2367 		if (!intf[if_id].vdev)
2368 			continue;
2369 
2370 		if (!(vdev_map & 0x1))
2371 			continue;
2372 
2373 		bcn = intf[if_id].beacon;
2374 		if (!bcn) {
2375 			wma_err("Invalid beacon");
2376 			return -EINVAL;
2377 		}
2378 		if (!bcn->buf) {
2379 			wma_err("Invalid beacon buffer");
2380 			return -EINVAL;
2381 		}
2382 		/* Save the adjusted TSF */
2383 		intf[if_id].tsfadjust = adjusted_tsf[if_id];
2384 
2385 		qdf_spin_lock_bh(&bcn->lock);
2386 		qdf_mem_zero(&bcn_info, sizeof(bcn_info));
2387 		qdf_mem_copy(bcn_info.beacon,
2388 			     qdf_nbuf_data(bcn->buf), bcn->len);
2389 		bcn_info.p2pIeOffset = bcn->p2p_ie_offset;
2390 		bcn_info.beaconLength = bcn->len;
2391 		bcn_info.timIeOffset = bcn->tim_ie_offset;
2392 		bcn_info.csa_count_offset = bcn->csa_count_offset;
2393 		bcn_info.ecsa_count_offset = bcn->ecsa_count_offset;
2394 		qdf_spin_unlock_bh(&bcn->lock);
2395 
2396 		wma_err_rl("Update beacon template for vdev %d due to TBTT offset update",
2397 			   if_id);
2398 		/* Update beacon template in firmware */
2399 		wma_unified_bcn_tmpl_send(wma, if_id, &bcn_info, 0);
2400 	}
2401 	return 0;
2402 }
2403 
2404 /**
2405  * wma_p2p_go_set_beacon_ie() - set beacon IE for p2p go
2406  * @wma_handle: wma handle
2407  * @vdev_id: vdev id
2408  * @p2pIe: p2p IE
2409  *
2410  * Return: 0 for success or error code
2411  */
2412 static int wma_p2p_go_set_beacon_ie(t_wma_handle *wma_handle,
2413 				    A_UINT32 vdev_id, uint8_t *p2pIe)
2414 {
2415 	if (wma_validate_handle(wma_handle))
2416 		return QDF_STATUS_E_FAILURE;
2417 
2418 	return wmi_unified_p2p_go_set_beacon_ie_cmd(wma_handle->wmi_handle,
2419 							vdev_id, p2pIe);
2420 }
2421 
2422 /**
2423  * wma_send_probe_rsp_tmpl() - send probe resp template
2424  * @wma: wma handle
2425  * @probe_rsp_info: probe response info
2426  *
2427  * This function sends probe response template to fw which
2428  * firmware will use in case of probe response offload.
2429  *
2430  * Return: none
2431  */
2432 void wma_send_probe_rsp_tmpl(tp_wma_handle wma,
2433 				    tpSendProbeRespParams probe_rsp_info)
2434 {
2435 	uint8_t vdev_id;
2436 	struct sAniProbeRspStruct *probe_rsp;
2437 
2438 	if (!probe_rsp_info) {
2439 		wma_err("probe_rsp_info is NULL");
2440 		return;
2441 	}
2442 
2443 	probe_rsp = (struct sAniProbeRspStruct *)
2444 			(probe_rsp_info->probeRespTemplate);
2445 	if (!probe_rsp) {
2446 		wma_err("probe_rsp is NULL");
2447 		return;
2448 	}
2449 
2450 	if (wma_find_vdev_id_by_addr(wma, probe_rsp->macHdr.sa, &vdev_id)) {
2451 		wma_err("failed to get vdev id");
2452 		return;
2453 	}
2454 
2455 	if (wmi_service_enabled(wma->wmi_handle,
2456 				   wmi_service_beacon_offload)) {
2457 		wma_nofl_debug("Beacon Offload Enabled Sending Unified command");
2458 		if (wmi_unified_probe_rsp_tmpl_send(wma, vdev_id,
2459 						    probe_rsp_info) < 0) {
2460 			wma_err("wmi_unified_probe_rsp_tmpl_send Failed");
2461 			return;
2462 		}
2463 	}
2464 }
2465 
2466 QDF_STATUS wma_set_ap_vdev_up(tp_wma_handle wma, uint8_t vdev_id)
2467 {
2468 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2469 	struct vdev_mlme_obj *mlme_obj;
2470 	struct wlan_objmgr_vdev *vdev;
2471 	struct wma_txrx_node *iface;
2472 
2473 	iface = &wma->interfaces[vdev_id];
2474 	vdev = iface->vdev;
2475 	mlme_obj = wlan_vdev_mlme_get_cmpt_obj(vdev);
2476 	if (!mlme_obj) {
2477 		wma_err("failed to get mlme_obj");
2478 		return QDF_STATUS_E_INVAL;
2479 	}
2480 	mlme_obj->proto.sta.assoc_id = 0;
2481 
2482 	status = vdev_mgr_up_send(mlme_obj);
2483 	if (QDF_IS_STATUS_ERROR(status)) {
2484 		wma_err("failed to send vdev up");
2485 		return status;
2486 	}
2487 	wma_set_sap_keepalive(wma, vdev_id);
2488 	wma_set_vdev_mgmt_rate(wma, vdev_id);
2489 	wma_vdev_set_he_bss_params(wma, vdev_id, &mlme_obj->proto.he_ops_info);
2490 	mlme_sr_update(vdev, true);
2491 
2492 	return status;
2493 }
2494 
2495 /**
2496  * wma_send_beacon() - send beacon template
2497  * @wma: wma handle
2498  * @bcn_info: beacon info
2499  *
2500  * This function store beacon template locally and
2501  * update keep alive parameters
2502  *
2503  * Return: none
2504  */
2505 void wma_send_beacon(tp_wma_handle wma, tpSendbeaconParams bcn_info)
2506 {
2507 	uint8_t vdev_id;
2508 	QDF_STATUS status;
2509 	uint8_t *p2p_ie;
2510 	struct sAniBeaconStruct *beacon;
2511 
2512 	wma_nofl_debug("Beacon update reason %d", bcn_info->reason);
2513 	beacon = (struct sAniBeaconStruct *) (bcn_info->beacon);
2514 	if (wma_find_vdev_id_by_addr(wma, beacon->macHdr.sa, &vdev_id)) {
2515 		wma_err("failed to get vdev id");
2516 		status = QDF_STATUS_E_INVAL;
2517 		goto send_rsp;
2518 	}
2519 
2520 	if (wmi_service_enabled(wma->wmi_handle,
2521 				   wmi_service_beacon_offload)) {
2522 		status = wma_unified_bcn_tmpl_send(wma, vdev_id, bcn_info, 4);
2523 		if (QDF_IS_STATUS_ERROR(status)) {
2524 			wma_err("wmi_unified_bcn_tmpl_send Failed");
2525 			goto send_rsp;
2526 		}
2527 
2528 		if (bcn_info->p2pIeOffset) {
2529 			p2p_ie = bcn_info->beacon + bcn_info->p2pIeOffset;
2530 			wma_debug("p2pIe is present - vdev_id %hu, p2p_ie = %pK, p2p ie len = %hu",
2531 				  vdev_id, p2p_ie, p2p_ie[1]);
2532 			if (wma_p2p_go_set_beacon_ie(wma, vdev_id,
2533 							 p2p_ie) < 0) {
2534 				wma_err("wmi_unified_bcn_tmpl_send Failed");
2535 				status = QDF_STATUS_E_INVAL;
2536 				goto send_rsp;
2537 			}
2538 		}
2539 	}
2540 	status = wma_store_bcn_tmpl(wma, vdev_id, bcn_info);
2541 	if (status != QDF_STATUS_SUCCESS) {
2542 		wma_err("wma_store_bcn_tmpl Failed");
2543 		goto send_rsp;
2544 	}
2545 
2546 send_rsp:
2547 	bcn_info->status = status;
2548 	wma_send_msg(wma, WMA_SEND_BCN_RSP, (void *)bcn_info, 0);
2549 }
2550 
2551 /**
2552  * wma_set_keepalive_req() - send keep alive request to fw
2553  * @wma: wma handle
2554  * @keepalive: keep alive parameters
2555  *
2556  * Return: none
2557  */
2558 void wma_set_keepalive_req(tp_wma_handle wma,
2559 			   struct keep_alive_req *keepalive)
2560 {
2561 	wma_nofl_debug("KEEPALIVE:PacketType:%d", keepalive->packetType);
2562 	wma_set_sta_keep_alive(wma, keepalive->sessionId,
2563 			       keepalive->packetType,
2564 			       keepalive->timePeriod,
2565 			       keepalive->hostIpv4Addr,
2566 			       keepalive->destIpv4Addr,
2567 			       keepalive->dest_macaddr.bytes);
2568 
2569 	qdf_mem_free(keepalive);
2570 }
2571 
2572 /**
2573  * wma_beacon_miss_handler() - beacon miss event handler
2574  * @wma: wma handle
2575  * @vdev_id: vdev id
2576  * @rssi: rssi value
2577  *
2578  * This function send beacon miss indication to upper layers.
2579  *
2580  * Return: none
2581  */
2582 void wma_beacon_miss_handler(tp_wma_handle wma, uint32_t vdev_id, int32_t rssi)
2583 {
2584 	struct missed_beacon_ind *beacon_miss_ind;
2585 	struct mac_context *mac = cds_get_context(QDF_MODULE_ID_PE);
2586 
2587 	beacon_miss_ind = qdf_mem_malloc(sizeof(*beacon_miss_ind));
2588 	if (!beacon_miss_ind)
2589 		return;
2590 
2591 	if (mac && mac->sme.tx_queue_cb)
2592 		mac->sme.tx_queue_cb(mac->hdd_handle, vdev_id,
2593 				     WLAN_STOP_ALL_NETIF_QUEUE,
2594 				     WLAN_CONTROL_PATH);
2595 	beacon_miss_ind->messageType = WMA_MISSED_BEACON_IND;
2596 	beacon_miss_ind->length = sizeof(*beacon_miss_ind);
2597 	beacon_miss_ind->bss_idx = vdev_id;
2598 	beacon_miss_ind->rssi = rssi;
2599 
2600 	wma_send_msg(wma, WMA_MISSED_BEACON_IND, beacon_miss_ind, 0);
2601 	if (!wmi_service_enabled(wma->wmi_handle,
2602 				 wmi_service_hw_db2dbm_support))
2603 		rssi += WMA_TGT_NOISE_FLOOR_DBM;
2604 	wma_lost_link_info_handler(wma, vdev_id, rssi);
2605 }
2606 
2607 void wlan_cm_send_beacon_miss(uint8_t vdev_id, int32_t rssi)
2608 {
2609 	tp_wma_handle wma;
2610 
2611 	wma = cds_get_context(QDF_MODULE_ID_WMA);
2612 	if (!wma)
2613 		return;
2614 
2615 	wma_beacon_miss_handler(wma, vdev_id, rssi);
2616 }
2617 
2618 /**
2619  * wma_get_status_str() - get string of tx status from firmware
2620  * @status: tx status
2621  *
2622  * Return: converted string of tx status
2623  */
2624 static const char *wma_get_status_str(uint32_t status)
2625 {
2626 	switch (status) {
2627 	default:
2628 		return "unknown";
2629 	CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_COMPLETE_OK);
2630 	CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_DISCARD);
2631 	CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_INSPECT);
2632 	CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_COMPLETE_NO_ACK);
2633 	CASE_RETURN_STRING(WMI_MGMT_TX_COMP_TYPE_MAX);
2634 	}
2635 }
2636 
2637 #ifdef CONFIG_HL_SUPPORT
2638 static inline void wma_mgmt_unmap_buf(tp_wma_handle wma_handle, qdf_nbuf_t buf)
2639 {
2640 }
2641 #else
2642 static inline void wma_mgmt_unmap_buf(tp_wma_handle wma_handle, qdf_nbuf_t buf)
2643 {
2644 	qdf_nbuf_unmap_single(wma_handle->qdf_dev, buf, QDF_DMA_TO_DEVICE);
2645 }
2646 #endif
2647 
2648 #if defined(CONNECTIVITY_PKTLOG) || !defined(REMOVE_PKT_LOG)
2649 /**
2650  * wma_mgmt_qdf_status_map() - map MGMT Tx completion status with
2651  * packet dump Tx status
2652  * @status: MGMT Tx completion status
2653  *
2654  * Return: packet dump tx_status enum
2655  */
2656 static inline enum qdf_dp_tx_rx_status
2657 wma_mgmt_qdf_status_map(WMI_MGMT_TX_COMP_STATUS_TYPE status)
2658 {
2659 	enum qdf_dp_tx_rx_status pktdump_status;
2660 
2661 	switch (status) {
2662 	case WMI_MGMT_TX_COMP_TYPE_COMPLETE_OK:
2663 		pktdump_status = QDF_TX_RX_STATUS_OK;
2664 		break;
2665 	case WMI_MGMT_TX_COMP_TYPE_DISCARD:
2666 		pktdump_status = QDF_TX_RX_STATUS_DROP;
2667 		break;
2668 	case WMI_MGMT_TX_COMP_TYPE_COMPLETE_NO_ACK:
2669 		pktdump_status = QDF_TX_RX_STATUS_NO_ACK;
2670 		break;
2671 	default:
2672 		pktdump_status = QDF_TX_RX_STATUS_DROP;
2673 		break;
2674 	}
2675 	return pktdump_status;
2676 }
2677 
2678 /**
2679  * wma_mgmt_pktdump_tx_handler() - calls tx cb if CONNECTIVITY_PKTLOG
2680  * feature is enabled
2681  * @wma_handle: wma handle
2682  * @buf: nbuf
2683  * @vdev_id : vdev id
2684  * @status : status
2685  *
2686  * Return: none
2687  */
2688 static inline void wma_mgmt_pktdump_tx_handler(tp_wma_handle wma_handle,
2689 					       qdf_nbuf_t buf, uint8_t vdev_id,
2690 					       uint32_t status)
2691 {
2692 	ol_txrx_pktdump_cb packetdump_cb;
2693 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
2694 	enum qdf_dp_tx_rx_status pktdump_status;
2695 
2696 	packetdump_cb = wma_handle->wma_mgmt_tx_packetdump_cb;
2697 	pktdump_status = wma_mgmt_qdf_status_map(status);
2698 	if (packetdump_cb)
2699 		packetdump_cb(soc, WMI_PDEV_ID_SOC, vdev_id,
2700 			      buf, pktdump_status, QDF_TX_MGMT_PKT);
2701 }
2702 
2703 /**
2704  * wma_mgmt_pktdump_rx_handler() - calls rx cb if CONNECTIVITY_PKTLOG
2705  * feature is enabled
2706  * @mgmt_rx_params: mgmt rx params
2707  * @rx_pkt: cds packet
2708  * @wma_handle: wma handle
2709  * mgt_type: management type
2710  * mgt_subtype: management subtype
2711  *
2712  * Return: none
2713  */
2714 static inline void wma_mgmt_pktdump_rx_handler(
2715 			struct mgmt_rx_event_params *mgmt_rx_params,
2716 			cds_pkt_t *rx_pkt, tp_wma_handle wma_handle,
2717 			uint8_t mgt_type, uint8_t mgt_subtype)
2718 {
2719 	ol_txrx_pktdump_cb packetdump_cb;
2720 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
2721 
2722 	packetdump_cb = wma_handle->wma_mgmt_rx_packetdump_cb;
2723 	if ((mgt_type == IEEE80211_FC0_TYPE_MGT &&
2724 	     mgt_subtype != MGMT_SUBTYPE_BEACON) &&
2725 	     packetdump_cb)
2726 		packetdump_cb(soc, mgmt_rx_params->pdev_id,
2727 			      rx_pkt->pkt_meta.session_id, rx_pkt->pkt_buf,
2728 			      QDF_TX_RX_STATUS_OK, QDF_RX_MGMT_PKT);
2729 }
2730 
2731 #else
2732 static inline void wma_mgmt_pktdump_tx_handler(tp_wma_handle wma_handle,
2733 					       qdf_nbuf_t buf, uint8_t vdev_id,
2734 					       uint32_t status)
2735 {
2736 }
2737 
2738 static inline void wma_mgmt_pktdump_rx_handler(
2739 			struct mgmt_rx_event_params *mgmt_rx_params,
2740 			cds_pkt_t *rx_pkt, tp_wma_handle wma_handle,
2741 			uint8_t mgt_type, uint8_t mgt_subtype)
2742 {
2743 }
2744 #endif
2745 
2746 /**
2747  * wma_process_mgmt_tx_completion() - process mgmt completion
2748  * @wma_handle: wma handle
2749  * @desc_id: descriptor id
2750  * @status: status
2751  *
2752  * Return: 0 for success or error code
2753  */
2754 static int wma_process_mgmt_tx_completion(tp_wma_handle wma_handle,
2755 					  uint32_t desc_id, uint32_t status)
2756 {
2757 	struct wlan_objmgr_pdev *pdev;
2758 	qdf_nbuf_t buf = NULL;
2759 	QDF_STATUS ret;
2760 	uint8_t vdev_id = 0;
2761 	struct wmi_mgmt_params mgmt_params = {};
2762 
2763 	if (wma_validate_handle(wma_handle))
2764 		return -EINVAL;
2765 
2766 	wma_debug("status: %s wmi_desc_id: %d",
2767 		  wma_get_status_str(status), desc_id);
2768 
2769 	pdev = wma_handle->pdev;
2770 	if (!pdev) {
2771 		wma_err("psoc ptr is NULL");
2772 		return -EINVAL;
2773 	}
2774 
2775 	buf = mgmt_txrx_get_nbuf(pdev, desc_id);
2776 
2777 
2778 	if (buf)
2779 		wma_mgmt_unmap_buf(wma_handle, buf);
2780 
2781 	vdev_id = mgmt_txrx_get_vdev_id(pdev, desc_id);
2782 	mgmt_params.vdev_id = vdev_id;
2783 
2784 	wma_mgmt_pktdump_tx_handler(wma_handle, buf, vdev_id, status);
2785 	ret = mgmt_txrx_tx_completion_handler(pdev, desc_id, status,
2786 					      &mgmt_params);
2787 
2788 	if (ret != QDF_STATUS_SUCCESS) {
2789 		wma_err("Failed to process mgmt tx completion");
2790 		return -EINVAL;
2791 	}
2792 
2793 	return 0;
2794 }
2795 
2796 /**
2797  * wma_extract_mgmt_offload_event_params() - Extract mgmt event params
2798  * @params: Management offload event params
2799  * @hdr: Management header to extract
2800  *
2801  * Return: None
2802  */
2803 static void wma_extract_mgmt_offload_event_params(
2804 				struct mgmt_offload_event_params *params,
2805 				wmi_mgmt_hdr *hdr)
2806 {
2807 	params->tsf_l32 = hdr->tsf_l32;
2808 	params->chan_freq = hdr->chan_freq;
2809 	params->rate_kbps = hdr->rate_kbps;
2810 	params->rssi = hdr->rssi;
2811 	params->buf_len = hdr->buf_len;
2812 	params->tx_status = hdr->tx_status;
2813 	params->tx_retry_cnt = hdr->tx_retry_cnt;
2814 }
2815 
2816 /**
2817  * wma_mgmt_tx_completion_handler() - wma mgmt Tx completion event handler
2818  * @handle: wma handle
2819  * @cmpl_event_params: completion event handler data
2820  * @len: length of @cmpl_event_params
2821  *
2822  * Return: 0 on success; error number otherwise
2823  */
2824 
2825 int wma_mgmt_tx_completion_handler(void *handle, uint8_t *cmpl_event_params,
2826 				   uint32_t len)
2827 {
2828 	tp_wma_handle wma_handle = (tp_wma_handle)handle;
2829 	WMI_MGMT_TX_COMPLETION_EVENTID_param_tlvs *param_buf;
2830 	wmi_mgmt_tx_compl_event_fixed_param *cmpl_params;
2831 
2832 	param_buf = (WMI_MGMT_TX_COMPLETION_EVENTID_param_tlvs *)
2833 		cmpl_event_params;
2834 	if (!param_buf || !wma_handle) {
2835 		wma_err("Invalid mgmt Tx completion event");
2836 		return -EINVAL;
2837 	}
2838 	cmpl_params = param_buf->fixed_param;
2839 
2840 	if ((ucfg_pkt_capture_get_pktcap_mode(wma_handle->psoc) &
2841 	    PKT_CAPTURE_MODE_MGMT_ONLY) && param_buf->mgmt_hdr) {
2842 		struct mgmt_offload_event_params params = {0};
2843 
2844 		wma_extract_mgmt_offload_event_params(
2845 					&params,
2846 					(wmi_mgmt_hdr *)param_buf->mgmt_hdr);
2847 		ucfg_pkt_capture_mgmt_tx_completion(wma_handle->pdev,
2848 						    cmpl_params->desc_id,
2849 						    cmpl_params->status,
2850 						    &params);
2851 	}
2852 
2853 	wma_process_mgmt_tx_completion(wma_handle, cmpl_params->desc_id,
2854 				       cmpl_params->status);
2855 
2856 	return 0;
2857 }
2858 
2859 /**
2860  * wma_mgmt_tx_bundle_completion_handler() - mgmt bundle comp handler
2861  * @handle: wma handle
2862  * @buf: buffer
2863  * @len: length
2864  *
2865  * Return: 0 for success or error code
2866  */
2867 int wma_mgmt_tx_bundle_completion_handler(void *handle, uint8_t *buf,
2868 				   uint32_t len)
2869 {
2870 	tp_wma_handle wma_handle = (tp_wma_handle)handle;
2871 	WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID_param_tlvs *param_buf;
2872 	wmi_mgmt_tx_compl_bundle_event_fixed_param	*cmpl_params;
2873 	uint32_t num_reports;
2874 	uint32_t *desc_ids;
2875 	uint32_t *status;
2876 	uint32_t i, buf_len;
2877 	bool excess_data = false;
2878 
2879 	param_buf = (WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID_param_tlvs *)buf;
2880 	if (!param_buf || !wma_handle) {
2881 		wma_err("Invalid mgmt Tx completion event");
2882 		return -EINVAL;
2883 	}
2884 	cmpl_params = param_buf->fixed_param;
2885 	num_reports = cmpl_params->num_reports;
2886 	desc_ids = (uint32_t *)(param_buf->desc_ids);
2887 	status = (uint32_t *)(param_buf->status);
2888 
2889 	/* buf contains num_reports * sizeof(uint32) len of desc_ids and
2890 	 * num_reports * sizeof(uint32) status,
2891 	 * so (2 x (num_reports * sizeof(uint32)) should not exceed MAX
2892 	 */
2893 	if (cmpl_params->num_reports > (WMI_SVC_MSG_MAX_SIZE /
2894 	    (2 * sizeof(uint32_t))))
2895 		excess_data = true;
2896 	else
2897 		buf_len = cmpl_params->num_reports * (2 * sizeof(uint32_t));
2898 
2899 	if (excess_data || (sizeof(*cmpl_params) > (WMI_SVC_MSG_MAX_SIZE -
2900 	    buf_len))) {
2901 		wma_err("excess wmi buffer: num_reports %d",
2902 			cmpl_params->num_reports);
2903 		return -EINVAL;
2904 	}
2905 
2906 	if ((cmpl_params->num_reports > param_buf->num_desc_ids) ||
2907 	    (cmpl_params->num_reports > param_buf->num_status)) {
2908 		wma_err("Invalid num_reports %d, num_desc_ids %d, num_status %d",
2909 			 cmpl_params->num_reports, param_buf->num_desc_ids,
2910 			 param_buf->num_status);
2911 		return -EINVAL;
2912 	}
2913 
2914 	for (i = 0; i < num_reports; i++) {
2915 		if ((ucfg_pkt_capture_get_pktcap_mode(wma_handle->psoc) &
2916 		    PKT_CAPTURE_MODE_MGMT_ONLY) && param_buf->mgmt_hdr) {
2917 			struct mgmt_offload_event_params params = {0};
2918 
2919 			wma_extract_mgmt_offload_event_params(
2920 				&params,
2921 				&((wmi_mgmt_hdr *)param_buf->mgmt_hdr)[i]);
2922 			ucfg_pkt_capture_mgmt_tx_completion(
2923 				wma_handle->pdev, desc_ids[i],
2924 				status[i], &params);
2925 		}
2926 
2927 		wma_process_mgmt_tx_completion(wma_handle,
2928 					       desc_ids[i], status[i]);
2929 	}
2930 	return 0;
2931 }
2932 
2933 /**
2934  * wma_process_update_opmode() - process update VHT opmode cmd from UMAC
2935  * @wma_handle: wma handle
2936  * @update_vht_opmode: vht opmode
2937  *
2938  * Return: none
2939  */
2940 void wma_process_update_opmode(tp_wma_handle wma_handle,
2941 			       tUpdateVHTOpMode *update_vht_opmode)
2942 {
2943 	wmi_host_channel_width ch_width;
2944 	uint8_t pdev_id;
2945 	struct wlan_objmgr_peer *peer;
2946 	struct wlan_objmgr_psoc *psoc = wma_handle->psoc;
2947 	enum wlan_phymode peer_phymode;
2948 	uint32_t fw_phymode;
2949 	enum wlan_peer_type peer_type;
2950 
2951 	pdev_id = wlan_objmgr_pdev_get_pdev_id(wma_handle->pdev);
2952 	peer = wlan_objmgr_get_peer(psoc, pdev_id,
2953 				    update_vht_opmode->peer_mac,
2954 				    WLAN_LEGACY_WMA_ID);
2955 	if (!peer) {
2956 		wma_err("peer object invalid");
2957 		return;
2958 	}
2959 
2960 	peer_type = wlan_peer_get_peer_type(peer);
2961 	if (peer_type == WLAN_PEER_SELF) {
2962 		wma_err("self peer wrongly used");
2963 		wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
2964 		return;
2965 	}
2966 
2967 	wlan_peer_obj_lock(peer);
2968 	peer_phymode = wlan_peer_get_phymode(peer);
2969 	wlan_peer_obj_unlock(peer);
2970 	wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
2971 
2972 	fw_phymode = wmi_host_to_fw_phymode(peer_phymode);
2973 
2974 	ch_width = wmi_get_ch_width_from_phy_mode(wma_handle->wmi_handle,
2975 						  fw_phymode);
2976 	wma_debug("ch_width: %d, fw phymode: %d peer_phymode %d",
2977 		  ch_width, fw_phymode, peer_phymode);
2978 	if (ch_width < update_vht_opmode->opMode) {
2979 		wma_err("Invalid peer bw update %d, self bw %d",
2980 			update_vht_opmode->opMode, ch_width);
2981 		return;
2982 	}
2983 
2984 	wma_debug("opMode = %d", update_vht_opmode->opMode);
2985 	wma_set_peer_param(wma_handle, update_vht_opmode->peer_mac,
2986 			   WMI_HOST_PEER_CHWIDTH, update_vht_opmode->opMode,
2987 			   update_vht_opmode->smesessionId);
2988 }
2989 
2990 /**
2991  * wma_process_update_rx_nss() - process update RX NSS cmd from UMAC
2992  * @wma_handle: wma handle
2993  * @update_rx_nss: rx nss value
2994  *
2995  * Return: none
2996  */
2997 void wma_process_update_rx_nss(tp_wma_handle wma_handle,
2998 			       tUpdateRxNss *update_rx_nss)
2999 {
3000 	struct target_psoc_info *tgt_hdl;
3001 	struct wma_txrx_node *intr =
3002 		&wma_handle->interfaces[update_rx_nss->smesessionId];
3003 	int rx_nss = update_rx_nss->rxNss;
3004 	int num_rf_chains;
3005 
3006 	tgt_hdl = wlan_psoc_get_tgt_if_handle(wma_handle->psoc);
3007 	if (!tgt_hdl) {
3008 		wma_err("target psoc info is NULL");
3009 		return;
3010 	}
3011 
3012 	num_rf_chains = target_if_get_num_rf_chains(tgt_hdl);
3013 	if (rx_nss > num_rf_chains || rx_nss > WMA_MAX_NSS)
3014 		rx_nss = QDF_MIN(num_rf_chains, WMA_MAX_NSS);
3015 
3016 	intr->nss = (uint8_t)rx_nss;
3017 	update_rx_nss->rxNss = (uint32_t)rx_nss;
3018 
3019 	wma_debug("Rx Nss = %d", update_rx_nss->rxNss);
3020 
3021 	wma_set_peer_param(wma_handle, update_rx_nss->peer_mac,
3022 			   WMI_HOST_PEER_NSS, update_rx_nss->rxNss,
3023 			   update_rx_nss->smesessionId);
3024 }
3025 
3026 /**
3027  * wma_process_update_membership() - process update group membership cmd
3028  * @wma_handle: wma handle
3029  * @membership: group membership info
3030  *
3031  * Return: none
3032  */
3033 void wma_process_update_membership(tp_wma_handle wma_handle,
3034 				   tUpdateMembership *membership)
3035 {
3036 	wma_debug("membership = %x ", membership->membership);
3037 
3038 	wma_set_peer_param(wma_handle, membership->peer_mac,
3039 			   WMI_HOST_PEER_MEMBERSHIP, membership->membership,
3040 			   membership->smesessionId);
3041 }
3042 
3043 /**
3044  * wma_process_update_userpos() - process update user pos cmd from UMAC
3045  * @wma_handle: wma handle
3046  * @userpos: user pos value
3047  *
3048  * Return: none
3049  */
3050 void wma_process_update_userpos(tp_wma_handle wma_handle,
3051 				tUpdateUserPos *userpos)
3052 {
3053 	wma_debug("userPos = %x ", userpos->userPos);
3054 
3055 	wma_set_peer_param(wma_handle, userpos->peer_mac,
3056 			   WMI_HOST_PEER_USERPOS, userpos->userPos,
3057 			   userpos->smesessionId);
3058 
3059 	/* Now that membership/userpos is updated in fw,
3060 	 * enable GID PPS.
3061 	 */
3062 	wma_set_ppsconfig(userpos->smesessionId, WMA_VHT_PPS_GID_MATCH, 1);
3063 
3064 }
3065 
3066 QDF_STATUS wma_set_cts2self_for_p2p_go(void *wma_handle,
3067 				    uint32_t cts2self_for_p2p_go)
3068 {
3069 	int32_t ret;
3070 	tp_wma_handle wma = (tp_wma_handle)wma_handle;
3071 	struct pdev_params pdevparam = {};
3072 
3073 	pdevparam.param_id = wmi_pdev_param_cts2self_for_p2p_go_config;
3074 	pdevparam.param_value = cts2self_for_p2p_go;
3075 
3076 	ret = wmi_unified_pdev_param_send(wma->wmi_handle,
3077 			&pdevparam,
3078 			WMA_WILDCARD_PDEV_ID);
3079 	if (ret) {
3080 		wma_err("Fail to Set CTS2SELF for p2p GO %d",
3081 			cts2self_for_p2p_go);
3082 		return QDF_STATUS_E_FAILURE;
3083 	}
3084 
3085 	wma_nofl_debug("Successfully Set CTS2SELF for p2p GO %d",
3086 		       cts2self_for_p2p_go);
3087 
3088 	return QDF_STATUS_SUCCESS;
3089 }
3090 
3091 
3092 /**
3093  * wma_set_htconfig() - set ht config parameters to target
3094  * @vdev_id: vdev id
3095  * @ht_capab: ht capability
3096  * @value: value of ht param
3097  *
3098  * Return: QDF status
3099  */
3100 QDF_STATUS wma_set_htconfig(uint8_t vdev_id, uint16_t ht_capab, int value)
3101 {
3102 	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
3103 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
3104 
3105 	if (!wma)
3106 		return QDF_STATUS_E_INVAL;
3107 
3108 	switch (ht_capab) {
3109 	case WNI_CFG_HT_CAP_INFO_ADVANCE_CODING:
3110 		ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
3111 						      wmi_vdev_param_ldpc,
3112 						      value);
3113 		break;
3114 	case WNI_CFG_HT_CAP_INFO_TX_STBC:
3115 		ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
3116 						      wmi_vdev_param_tx_stbc,
3117 						      value);
3118 		break;
3119 	case WNI_CFG_HT_CAP_INFO_RX_STBC:
3120 		ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
3121 						      wmi_vdev_param_rx_stbc,
3122 						      value);
3123 		break;
3124 	case WNI_CFG_HT_CAP_INFO_SHORT_GI_20MHZ:
3125 	case WNI_CFG_HT_CAP_INFO_SHORT_GI_40MHZ:
3126 		wma_err("ht_capab = %d, value = %d", ht_capab,
3127 			 value);
3128 		ret = wma_vdev_set_param(wma->wmi_handle, vdev_id,
3129 						wmi_vdev_param_sgi, value);
3130 		if (ret == QDF_STATUS_SUCCESS)
3131 			wma->interfaces[vdev_id].config.shortgi = value;
3132 		break;
3133 	default:
3134 		wma_err("INVALID HT CONFIG");
3135 	}
3136 
3137 	return ret;
3138 }
3139 
3140 /**
3141  * wma_extract_ccmp_pn() - extract 6 byte PN from the CCMP header
3142  * @ccmp_ptr: CCMP header
3143  *
3144  * Return: PN extracted from header.
3145  */
3146 static uint64_t wma_extract_ccmp_pn(uint8_t *ccmp_ptr)
3147 {
3148 	uint8_t rsvd, key, pn[6];
3149 	uint64_t new_pn;
3150 
3151 	/*
3152 	 *   +-----+-----+------+----------+-----+-----+-----+-----+
3153 	 *   | PN0 | PN1 | rsvd | rsvd/key | PN2 | PN3 | PN4 | PN5 |
3154 	 *   +-----+-----+------+----------+-----+-----+-----+-----+
3155 	 *                   CCMP Header Format
3156 	 */
3157 
3158 	/* Extract individual bytes */
3159 	pn[0] = (uint8_t) *ccmp_ptr;
3160 	pn[1] = (uint8_t) *(ccmp_ptr + 1);
3161 	rsvd = (uint8_t) *(ccmp_ptr + 2);
3162 	key = (uint8_t) *(ccmp_ptr + 3);
3163 	pn[2] = (uint8_t) *(ccmp_ptr + 4);
3164 	pn[3] = (uint8_t) *(ccmp_ptr + 5);
3165 	pn[4] = (uint8_t) *(ccmp_ptr + 6);
3166 	pn[5] = (uint8_t) *(ccmp_ptr + 7);
3167 
3168 	/* Form 6 byte PN with 6 individual bytes of PN */
3169 	new_pn = ((uint64_t) pn[5] << 40) |
3170 		 ((uint64_t) pn[4] << 32) |
3171 		 ((uint64_t) pn[3] << 24) |
3172 		 ((uint64_t) pn[2] << 16) |
3173 		 ((uint64_t) pn[1] << 8) | ((uint64_t) pn[0] << 0);
3174 
3175 	return new_pn;
3176 }
3177 
3178 /**
3179  * wma_is_ccmp_pn_replay_attack() - detect replay attacking using PN in CCMP
3180  * @wma: wma context
3181  * @wh: 802.11 frame header
3182  * @ccmp_ptr: CCMP frame header
3183  *
3184  * Return: true/false
3185  */
3186 static bool
3187 wma_is_ccmp_pn_replay_attack(tp_wma_handle wma, struct ieee80211_frame *wh,
3188 			     uint8_t *ccmp_ptr)
3189 {
3190 	uint64_t new_pn;
3191 	bool ret = false;
3192 	struct peer_mlme_priv_obj *peer_priv;
3193 	struct wlan_objmgr_peer *peer;
3194 
3195 	new_pn = wma_extract_ccmp_pn(ccmp_ptr);
3196 
3197 	peer = wlan_objmgr_get_peer_by_mac(wma->psoc, wh->i_addr2,
3198 					   WLAN_LEGACY_WMA_ID);
3199 	if (!peer)
3200 		return ret;
3201 
3202 	peer_priv = wlan_objmgr_peer_get_comp_private_obj(peer,
3203 							  WLAN_UMAC_COMP_MLME);
3204 	if (!peer_priv) {
3205 		wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
3206 		return ret;
3207 	}
3208 
3209 	if (peer_priv->last_pn_valid) {
3210 		if (new_pn > peer_priv->last_pn) {
3211 			peer_priv->last_pn = new_pn;
3212 		} else {
3213 			wma_err_rl("PN Replay attack detected");
3214 			/* per 11W amendment, keeping track of replay attacks */
3215 			peer_priv->rmf_pn_replays += 1;
3216 			ret = true;
3217 		}
3218 	} else {
3219 		peer_priv->last_pn_valid = 1;
3220 		peer_priv->last_pn = new_pn;
3221 	}
3222 
3223 	wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
3224 
3225 	return ret;
3226 }
3227 
3228 /**
3229  * wma_process_bip() - process mmie in rmf frame
3230  * @wma_handle: wma handle
3231  * @iface: txrx node
3232  * @wh: 80211 frame
3233  * @wbuf: Buffer
3234  *
3235  * Return: 0 for success or error code
3236  */
3237 static
3238 int wma_process_bip(tp_wma_handle wma_handle, struct wma_txrx_node *iface,
3239 		    struct ieee80211_frame *wh, qdf_nbuf_t wbuf)
3240 {
3241 	uint16_t mmie_size;
3242 	uint8_t *efrm;
3243 	int32_t mgmtcipherset;
3244 	enum wlan_crypto_cipher_type key_cipher;
3245 
3246 	efrm = qdf_nbuf_data(wbuf) + qdf_nbuf_len(wbuf);
3247 
3248 	mgmtcipherset = wlan_crypto_get_param(iface->vdev,
3249 					      WLAN_CRYPTO_PARAM_MGMT_CIPHER);
3250 	if (mgmtcipherset <= 0) {
3251 		wma_err("Invalid key cipher %d", mgmtcipherset);
3252 		return -EINVAL;
3253 	}
3254 
3255 	if (mgmtcipherset & (1 << WLAN_CRYPTO_CIPHER_AES_CMAC)) {
3256 		key_cipher = WLAN_CRYPTO_CIPHER_AES_CMAC;
3257 		mmie_size = cds_get_mmie_size();
3258 	} else if (mgmtcipherset & (1 << WLAN_CRYPTO_CIPHER_AES_GMAC)) {
3259 		key_cipher = WLAN_CRYPTO_CIPHER_AES_GMAC;
3260 		mmie_size = cds_get_gmac_mmie_size();
3261 	} else if (mgmtcipherset & (1 << WLAN_CRYPTO_CIPHER_AES_GMAC_256)) {
3262 		key_cipher = WLAN_CRYPTO_CIPHER_AES_GMAC_256;
3263 		mmie_size = cds_get_gmac_mmie_size();
3264 	} else {
3265 		wma_err("Invalid key cipher %d", mgmtcipherset);
3266 		return -EINVAL;
3267 	}
3268 
3269 	/* Check if frame is invalid length */
3270 	if (efrm - (uint8_t *)wh < sizeof(*wh) + mmie_size) {
3271 		wma_err("Invalid frame length");
3272 		return -EINVAL;
3273 	}
3274 
3275 	switch (key_cipher) {
3276 	case WLAN_CRYPTO_CIPHER_AES_CMAC:
3277 		if (!wmi_service_enabled(wma_handle->wmi_handle,
3278 					 wmi_service_sta_pmf_offload)) {
3279 			if (!wlan_crypto_is_mmie_valid(iface->vdev,
3280 						       (uint8_t *)wh, efrm)) {
3281 				wma_debug("BC/MC MIC error or MMIE not present, dropping the frame");
3282 				return -EINVAL;
3283 			}
3284 		}
3285 		break;
3286 	case WLAN_CRYPTO_CIPHER_AES_GMAC:
3287 	case WLAN_CRYPTO_CIPHER_AES_GMAC_256:
3288 		if (!wmi_service_enabled(wma_handle->wmi_handle,
3289 					 wmi_service_gmac_offload_support)) {
3290 			if (!wlan_crypto_is_mmie_valid(iface->vdev,
3291 						       (uint8_t *)wh, efrm)) {
3292 				wma_debug("BC/MC GMAC MIC error or MMIE not present, dropping the frame");
3293 				return -EINVAL;
3294 			}
3295 		}
3296 		break;
3297 	default:
3298 		wma_err("Invalid key_type %d", key_cipher);
3299 		return -EINVAL;
3300 	}
3301 
3302 	qdf_nbuf_trim_tail(wbuf, mmie_size);
3303 
3304 	return 0;
3305 }
3306 
3307 /**
3308  * wma_process_rmf_frame() - process rmf frame
3309  * @wma_handle: wma handle
3310  * @iface: txrx node
3311  * @wh: 80211 frame
3312  * @rx_pkt: rx packet
3313  * @wbuf: Buffer
3314  *
3315  * Return: 0 for success or error code
3316  */
3317 static
3318 int wma_process_rmf_frame(tp_wma_handle wma_handle,
3319 	struct wma_txrx_node *iface,
3320 	struct ieee80211_frame *wh,
3321 	cds_pkt_t *rx_pkt,
3322 	qdf_nbuf_t wbuf)
3323 {
3324 	uint8_t *orig_hdr;
3325 	uint8_t *ccmp;
3326 	uint8_t mic_len, hdr_len, pdev_id;
3327 	QDF_STATUS status;
3328 
3329 	if ((wh)->i_fc[1] & IEEE80211_FC1_WEP) {
3330 		if (QDF_IS_ADDR_BROADCAST(wh->i_addr1) ||
3331 		    IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3332 			wma_err("Encrypted BC/MC frame dropping the frame");
3333 			cds_pkt_return_packet(rx_pkt);
3334 			return -EINVAL;
3335 		}
3336 
3337 		if (iface->type == WMI_VDEV_TYPE_NDI) {
3338 			hdr_len = IEEE80211_CCMP_HEADERLEN;
3339 			mic_len = IEEE80211_CCMP_MICLEN;
3340 		} else {
3341 			pdev_id =
3342 				wlan_objmgr_pdev_get_pdev_id(wma_handle->pdev);
3343 			status = mlme_get_peer_mic_len(wma_handle->psoc,
3344 						       pdev_id, wh->i_addr2,
3345 						       &mic_len, &hdr_len);
3346 			if (QDF_IS_STATUS_ERROR(status)) {
3347 				wma_err("Failed to get mic hdr and length");
3348 				cds_pkt_return_packet(rx_pkt);
3349 				return -EINVAL;
3350 			}
3351 		}
3352 
3353 		if (qdf_nbuf_len(wbuf) < (sizeof(*wh) + hdr_len + mic_len)) {
3354 			wma_err("Buffer length less than expected %d",
3355 				 (int)qdf_nbuf_len(wbuf));
3356 			cds_pkt_return_packet(rx_pkt);
3357 			return -EINVAL;
3358 		}
3359 
3360 		orig_hdr = (uint8_t *) qdf_nbuf_data(wbuf);
3361 		/* Pointer to head of CCMP header */
3362 		ccmp = orig_hdr + sizeof(*wh);
3363 		if (wma_is_ccmp_pn_replay_attack(wma_handle, wh, ccmp)) {
3364 			wma_err_rl("Dropping the frame");
3365 			cds_pkt_return_packet(rx_pkt);
3366 			return -EINVAL;
3367 		}
3368 
3369 		/* Strip privacy headers (and trailer)
3370 		 * for a received frame
3371 		 */
3372 		qdf_mem_move(orig_hdr +
3373 			hdr_len, wh,
3374 			sizeof(*wh));
3375 		qdf_nbuf_pull_head(wbuf,
3376 			hdr_len);
3377 		qdf_nbuf_trim_tail(wbuf, mic_len);
3378 		/*
3379 		 * CCMP header has been pulled off
3380 		 * reinitialize the start pointer of mac header
3381 		 * to avoid accessing incorrect address
3382 		 */
3383 		wh = (struct ieee80211_frame *) qdf_nbuf_data(wbuf);
3384 		rx_pkt->pkt_meta.mpdu_hdr_ptr =
3385 				qdf_nbuf_data(wbuf);
3386 		rx_pkt->pkt_meta.mpdu_len = qdf_nbuf_len(wbuf);
3387 		rx_pkt->pkt_buf = wbuf;
3388 		if (rx_pkt->pkt_meta.mpdu_len >=
3389 			rx_pkt->pkt_meta.mpdu_hdr_len) {
3390 			rx_pkt->pkt_meta.mpdu_data_len =
3391 				rx_pkt->pkt_meta.mpdu_len -
3392 				rx_pkt->pkt_meta.mpdu_hdr_len;
3393 		} else {
3394 			wma_err("mpdu len %d less than hdr %d, dropping frame",
3395 				rx_pkt->pkt_meta.mpdu_len,
3396 				rx_pkt->pkt_meta.mpdu_hdr_len);
3397 			cds_pkt_return_packet(rx_pkt);
3398 			return -EINVAL;
3399 		}
3400 
3401 		if (rx_pkt->pkt_meta.mpdu_data_len > MAX_MGMT_MPDU_LEN) {
3402 			wma_err("Data Len %d greater than max, dropping frame",
3403 				rx_pkt->pkt_meta.mpdu_data_len);
3404 			cds_pkt_return_packet(rx_pkt);
3405 			return -EINVAL;
3406 		}
3407 		rx_pkt->pkt_meta.mpdu_data_ptr =
3408 		rx_pkt->pkt_meta.mpdu_hdr_ptr +
3409 		rx_pkt->pkt_meta.mpdu_hdr_len;
3410 		wma_debug("BSSID: "QDF_MAC_ADDR_FMT" tsf_delta: %u",
3411 			  QDF_MAC_ADDR_REF(wh->i_addr3),
3412 			  rx_pkt->pkt_meta.tsf_delta);
3413 	} else {
3414 		if (QDF_IS_ADDR_BROADCAST(wh->i_addr1) ||
3415 		    IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3416 			if (0 != wma_process_bip(wma_handle, iface, wh, wbuf)) {
3417 				cds_pkt_return_packet(rx_pkt);
3418 				return -EINVAL;
3419 			}
3420 		} else {
3421 			wma_err_rl("Rx unprotected unicast mgmt frame");
3422 			rx_pkt->pkt_meta.dpuFeedback =
3423 				DPU_FEEDBACK_UNPROTECTED_ERROR;
3424 		}
3425 	}
3426 	return 0;
3427 }
3428 
3429 /**
3430  * wma_get_peer_pmf_status() - Get the PMF capability of peer
3431  * @wma: wma handle
3432  * @peer_mac: peer mac addr
3433  *
3434  * Return: True if PMF is enabled, false otherwise.
3435  */
3436 static bool
3437 wma_get_peer_pmf_status(tp_wma_handle wma, uint8_t *peer_mac)
3438 {
3439 	struct wlan_objmgr_peer *peer;
3440 	bool is_pmf_enabled;
3441 
3442 	if (!peer_mac) {
3443 		wma_err("peer_mac is NULL");
3444 		return false;
3445 	}
3446 
3447 	peer = wlan_objmgr_get_peer(wma->psoc,
3448 				    wlan_objmgr_pdev_get_pdev_id(wma->pdev),
3449 				    peer_mac, WLAN_LEGACY_WMA_ID);
3450 	if (!peer) {
3451 		wma_debug("Peer of peer_mac " QDF_MAC_ADDR_FMT " not found",
3452 			  QDF_MAC_ADDR_REF(peer_mac));
3453 		return false;
3454 	}
3455 	is_pmf_enabled = mlme_get_peer_pmf_status(peer);
3456 	wlan_objmgr_peer_release_ref(peer, WLAN_LEGACY_WMA_ID);
3457 	wma_nofl_debug("get is_pmf_enabled %d for "QDF_MAC_ADDR_FMT,
3458 		       is_pmf_enabled, QDF_MAC_ADDR_REF(peer_mac));
3459 
3460 	return is_pmf_enabled;
3461 }
3462 
3463 /**
3464  * wma_check_and_process_rmf_frame() - Process the frame if it is of rmf type
3465  * @wma_handle: wma handle
3466  * @vdev_id: vdev id
3467  * @wh: double pointer to 802.11 frame header which will be updated if the
3468  *	frame is of rmf type.
3469  * @rx_pkt: rx packet
3470  * @buf: Buffer
3471  *
3472  * Process the frame as rmf frame only if both DUT and peer are of PMF capable
3473  *
3474  * Return: 0 for success or error code
3475  */
3476 static int
3477 wma_check_and_process_rmf_frame(tp_wma_handle wma_handle,
3478 				uint8_t vdev_id,
3479 				struct ieee80211_frame **wh,
3480 				cds_pkt_t *rx_pkt,
3481 				qdf_nbuf_t buf)
3482 {
3483 	int status;
3484 	struct wma_txrx_node *iface;
3485 	struct ieee80211_frame *hdr = *wh;
3486 
3487 	iface = &(wma_handle->interfaces[vdev_id]);
3488 	if (iface->type != WMI_VDEV_TYPE_NDI && !iface->rmfEnabled)
3489 		return 0;
3490 
3491 	if (qdf_is_macaddr_group((struct qdf_mac_addr *)(hdr->i_addr1)) ||
3492 	    qdf_is_macaddr_broadcast((struct qdf_mac_addr *)(hdr->i_addr1)) ||
3493 	    wma_get_peer_pmf_status(wma_handle, hdr->i_addr2) ||
3494 	    (iface->type == WMI_VDEV_TYPE_NDI &&
3495 	    (hdr->i_fc[1] & IEEE80211_FC1_WEP))) {
3496 		status = wma_process_rmf_frame(wma_handle, iface, hdr,
3497 					       rx_pkt, buf);
3498 		if (status)
3499 			return status;
3500 		/*
3501 		 * CCMP header might have been pulled off reinitialize the
3502 		 * start pointer of mac header
3503 		 */
3504 		*wh = (struct ieee80211_frame *)qdf_nbuf_data(buf);
3505 	}
3506 
3507 	return 0;
3508 }
3509 
3510 /**
3511  * wma_is_pkt_drop_candidate() - check if the mgmt frame should be dropped
3512  * @wma_handle: wma handle
3513  * @peer_addr: peer MAC address
3514  * @bssid: BSSID Address
3515  * @subtype: Management frame subtype
3516  *
3517  * This function is used to decide if a particular management frame should be
3518  * dropped to prevent DOS attack. Timestamp is used to decide the DOS attack.
3519  *
3520  * Return: true if the packet should be dropped and false otherwise
3521  */
3522 static bool wma_is_pkt_drop_candidate(tp_wma_handle wma_handle,
3523 				      uint8_t *peer_addr, uint8_t *bssid,
3524 				      uint8_t subtype)
3525 {
3526 	bool should_drop = false;
3527 	uint8_t nan_addr[] = {0x50, 0x6F, 0x9A, 0x01, 0x00, 0x00};
3528 
3529 	/* Drop the beacons from NAN device */
3530 	if ((subtype == MGMT_SUBTYPE_BEACON) &&
3531 		(!qdf_mem_cmp(nan_addr, bssid, NAN_CLUSTER_ID_BYTES))) {
3532 			should_drop = true;
3533 			goto end;
3534 	}
3535 end:
3536 	return should_drop;
3537 }
3538 
3539 #define RATE_LIMIT 16
3540 
3541 int wma_form_rx_packet(qdf_nbuf_t buf,
3542 			struct mgmt_rx_event_params *mgmt_rx_params,
3543 			cds_pkt_t *rx_pkt)
3544 {
3545 	uint8_t vdev_id = WMA_INVALID_VDEV_ID;
3546 	struct ieee80211_frame *wh;
3547 	uint8_t mgt_type, mgt_subtype;
3548 	int status;
3549 	tp_wma_handle wma_handle = (tp_wma_handle)
3550 				cds_get_context(QDF_MODULE_ID_WMA);
3551 	static uint8_t limit_prints_invalid_len = RATE_LIMIT - 1;
3552 	static uint8_t limit_prints_load_unload = RATE_LIMIT - 1;
3553 	static uint8_t limit_prints_recovery = RATE_LIMIT - 1;
3554 
3555 	if (!wma_handle) {
3556 		qdf_nbuf_free(buf);
3557 		qdf_mem_free(rx_pkt);
3558 		return -EINVAL;
3559 	}
3560 
3561 	if (!mgmt_rx_params) {
3562 		limit_prints_invalid_len++;
3563 		if (limit_prints_invalid_len == RATE_LIMIT) {
3564 			wma_debug("mgmt rx params is NULL");
3565 			limit_prints_invalid_len = 0;
3566 		}
3567 		qdf_nbuf_free(buf);
3568 		qdf_mem_free(rx_pkt);
3569 		return -EINVAL;
3570 	}
3571 
3572 	if (cds_is_load_or_unload_in_progress()) {
3573 		limit_prints_load_unload++;
3574 		if (limit_prints_load_unload == RATE_LIMIT) {
3575 			wma_debug("Load/Unload in progress");
3576 			limit_prints_load_unload = 0;
3577 		}
3578 		qdf_nbuf_free(buf);
3579 		qdf_mem_free(rx_pkt);
3580 		return -EINVAL;
3581 	}
3582 
3583 	if (cds_is_driver_recovering()) {
3584 		limit_prints_recovery++;
3585 		if (limit_prints_recovery == RATE_LIMIT) {
3586 			wma_debug("Recovery in progress");
3587 			limit_prints_recovery = 0;
3588 		}
3589 		qdf_nbuf_free(buf);
3590 		qdf_mem_free(rx_pkt);
3591 		return -EINVAL;
3592 	}
3593 
3594 	if (cds_is_driver_in_bad_state()) {
3595 		limit_prints_recovery++;
3596 		if (limit_prints_recovery == RATE_LIMIT) {
3597 			wma_debug("Driver in bad state");
3598 			limit_prints_recovery = 0;
3599 		}
3600 		qdf_nbuf_free(buf);
3601 		qdf_mem_free(rx_pkt);
3602 		return -EINVAL;
3603 	}
3604 
3605 	/*
3606 	 * Fill in meta information needed by pe/lim
3607 	 * TODO: Try to maintain rx metainfo as part of skb->data.
3608 	 */
3609 	rx_pkt->pkt_meta.frequency = mgmt_rx_params->chan_freq;
3610 	rx_pkt->pkt_meta.scan_src = mgmt_rx_params->flags;
3611 
3612 	/*
3613 	 * Get the rssi value from the current snr value
3614 	 * using standard noise floor of -96.
3615 	 */
3616 	rx_pkt->pkt_meta.rssi = mgmt_rx_params->snr +
3617 				WMA_NOISE_FLOOR_DBM_DEFAULT;
3618 	rx_pkt->pkt_meta.snr = mgmt_rx_params->snr;
3619 
3620 	/* If absolute rssi is available from firmware, use it */
3621 	if (mgmt_rx_params->rssi != 0)
3622 		rx_pkt->pkt_meta.rssi_raw = mgmt_rx_params->rssi;
3623 	else
3624 		rx_pkt->pkt_meta.rssi_raw = rx_pkt->pkt_meta.rssi;
3625 
3626 
3627 	/*
3628 	 * FIXME: Assigning the local timestamp as hw timestamp is not
3629 	 * available. Need to see if pe/lim really uses this data.
3630 	 */
3631 	rx_pkt->pkt_meta.timestamp = (uint32_t) jiffies;
3632 	rx_pkt->pkt_meta.mpdu_hdr_len = sizeof(struct ieee80211_frame);
3633 	rx_pkt->pkt_meta.mpdu_len = mgmt_rx_params->buf_len;
3634 
3635 	/*
3636 	 * The buf_len should be at least 802.11 header len
3637 	 */
3638 	if (mgmt_rx_params->buf_len < rx_pkt->pkt_meta.mpdu_hdr_len) {
3639 		wma_err("MPDU Len %d lesser than header len %d",
3640 			 mgmt_rx_params->buf_len,
3641 			 rx_pkt->pkt_meta.mpdu_hdr_len);
3642 		qdf_nbuf_free(buf);
3643 		qdf_mem_free(rx_pkt);
3644 		return -EINVAL;
3645 	}
3646 
3647 	rx_pkt->pkt_meta.mpdu_data_len = mgmt_rx_params->buf_len -
3648 					 rx_pkt->pkt_meta.mpdu_hdr_len;
3649 
3650 	rx_pkt->pkt_meta.roamCandidateInd = 0;
3651 
3652 	wh = (struct ieee80211_frame *)qdf_nbuf_data(buf);
3653 
3654 	/*
3655 	 * If the mpdu_data_len is greater than Max (2k), drop the frame
3656 	 */
3657 	if (rx_pkt->pkt_meta.mpdu_data_len > MAX_MGMT_MPDU_LEN) {
3658 		wma_err("Data Len %d greater than max, dropping frame from "QDF_MAC_ADDR_FMT,
3659 			 rx_pkt->pkt_meta.mpdu_data_len,
3660 			 QDF_MAC_ADDR_REF(wh->i_addr3));
3661 		qdf_nbuf_free(buf);
3662 		qdf_mem_free(rx_pkt);
3663 		return -EINVAL;
3664 	}
3665 
3666 	rx_pkt->pkt_meta.mpdu_hdr_ptr = qdf_nbuf_data(buf);
3667 	rx_pkt->pkt_meta.mpdu_data_ptr = rx_pkt->pkt_meta.mpdu_hdr_ptr +
3668 					 rx_pkt->pkt_meta.mpdu_hdr_len;
3669 	rx_pkt->pkt_meta.tsf_delta = mgmt_rx_params->tsf_delta;
3670 	rx_pkt->pkt_buf = buf;
3671 	rx_pkt->pkt_meta.pkt_qdf_buf = buf;
3672 
3673 	/* If it is a beacon/probe response, save it for future use */
3674 	mgt_type = (wh)->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3675 	mgt_subtype = (wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3676 
3677 	if (mgt_type == IEEE80211_FC0_TYPE_MGT &&
3678 	    (mgt_subtype == MGMT_SUBTYPE_DISASSOC ||
3679 	     mgt_subtype == MGMT_SUBTYPE_DEAUTH ||
3680 	     mgt_subtype == MGMT_SUBTYPE_ACTION)) {
3681 		if (wma_find_vdev_id_by_bssid(wma_handle, wh->i_addr3,
3682 					      &vdev_id) == QDF_STATUS_SUCCESS) {
3683 			status = wma_check_and_process_rmf_frame(wma_handle,
3684 								 vdev_id,
3685 								 &wh,
3686 								 rx_pkt,
3687 								 buf);
3688 			if (status)
3689 				return status;
3690 		} else if (wma_find_vdev_id_by_addr(wma_handle, wh->i_addr1,
3691 					      &vdev_id) == QDF_STATUS_SUCCESS) {
3692 			status = wma_check_and_process_rmf_frame(wma_handle,
3693 								 vdev_id,
3694 								 &wh,
3695 								 rx_pkt,
3696 								 buf);
3697 			if (status)
3698 				return status;
3699 		}
3700 	}
3701 
3702 	rx_pkt->pkt_meta.session_id =
3703 		(vdev_id == WMA_INVALID_VDEV_ID ? 0 : vdev_id);
3704 
3705 	if (mgt_type == IEEE80211_FC0_TYPE_MGT &&
3706 	    (mgt_subtype == MGMT_SUBTYPE_BEACON ||
3707 	     mgt_subtype == MGMT_SUBTYPE_PROBE_RESP)) {
3708 		if (mgmt_rx_params->buf_len <=
3709 			(sizeof(struct ieee80211_frame) +
3710 			offsetof(struct wlan_bcn_frame, ie))) {
3711 			wma_debug("Dropping frame from "QDF_MAC_ADDR_FMT,
3712 				 QDF_MAC_ADDR_REF(wh->i_addr3));
3713 			cds_pkt_return_packet(rx_pkt);
3714 			return -EINVAL;
3715 		}
3716 	}
3717 
3718 	if (wma_is_pkt_drop_candidate(wma_handle, wh->i_addr2, wh->i_addr3,
3719 					mgt_subtype)) {
3720 		cds_pkt_return_packet(rx_pkt);
3721 		return -EINVAL;
3722 	}
3723 	wma_mgmt_pktdump_rx_handler(mgmt_rx_params, rx_pkt,
3724 				    wma_handle, mgt_type, mgt_subtype);
3725 
3726 	return 0;
3727 }
3728 
3729 /**
3730  * wma_mem_endianness_based_copy() - does memory copy from src to dst
3731  * @dst: destination address
3732  * @src: source address
3733  * @size: size to be copied
3734  *
3735  * This function copies the memory of size passed from source
3736  * address to destination address.
3737  *
3738  * Return: Nothing
3739  */
3740 #ifdef BIG_ENDIAN_HOST
3741 static void wma_mem_endianness_based_copy(
3742 			uint8_t *dst, uint8_t *src, uint32_t size)
3743 {
3744 	/*
3745 	 * For big endian host, copy engine byte_swap is enabled
3746 	 * But the rx mgmt frame buffer content is in network byte order
3747 	 * Need to byte swap the mgmt frame buffer content - so when
3748 	 * copy engine does byte_swap - host gets buffer content in the
3749 	 * correct byte order.
3750 	 */
3751 
3752 	uint32_t i;
3753 	uint32_t *destp, *srcp;
3754 
3755 	destp = (uint32_t *) dst;
3756 	srcp = (uint32_t *) src;
3757 	for (i = 0; i < (roundup(size, sizeof(uint32_t)) / 4); i++) {
3758 		*destp = cpu_to_le32(*srcp);
3759 		destp++;
3760 		srcp++;
3761 	}
3762 }
3763 #else
3764 static void wma_mem_endianness_based_copy(
3765 			uint8_t *dst, uint8_t *src, uint32_t size)
3766 {
3767 	qdf_mem_copy(dst, src, size);
3768 }
3769 #endif
3770 
3771 #define RESERVE_BYTES                   100
3772 /**
3773  * wma_mgmt_rx_process() - process management rx frame.
3774  * @handle: wma handle
3775  * @data: rx data
3776  * @data_len: data length
3777  *
3778  * Return: 0 for success or error code
3779  */
3780 static int wma_mgmt_rx_process(void *handle, uint8_t *data,
3781 				  uint32_t data_len)
3782 {
3783 	tp_wma_handle wma_handle = (tp_wma_handle) handle;
3784 	struct mgmt_rx_event_params *mgmt_rx_params;
3785 	struct wlan_objmgr_psoc *psoc;
3786 	uint8_t *bufp;
3787 	qdf_nbuf_t wbuf;
3788 	QDF_STATUS status;
3789 
3790 	if (wma_validate_handle(wma_handle))
3791 		return -EINVAL;
3792 
3793 	mgmt_rx_params = qdf_mem_malloc(sizeof(*mgmt_rx_params));
3794 	if (!mgmt_rx_params) {
3795 		return -ENOMEM;
3796 	}
3797 
3798 	if (wmi_extract_mgmt_rx_params(wma_handle->wmi_handle,
3799 			data, mgmt_rx_params, &bufp) != QDF_STATUS_SUCCESS) {
3800 		wma_err_rl("Extraction of mgmt rx params failed");
3801 		qdf_mem_free(mgmt_rx_params);
3802 		return -EINVAL;
3803 	}
3804 
3805 	if (mgmt_rx_params->buf_len > data_len ||
3806 	    !mgmt_rx_params->buf_len ||
3807 	    !bufp) {
3808 		wma_err_rl("Invalid data_len %u, buf_len %u bufp %pK",
3809 			   data_len, mgmt_rx_params->buf_len, bufp);
3810 		qdf_mem_free(mgmt_rx_params);
3811 		return -EINVAL;
3812 	}
3813 
3814 	if (!mgmt_rx_params->chan_freq) {
3815 		/*
3816 		 * It indicates that FW is legacy and is operating on
3817 		 * channel numbers and it also indicates that BAND_6G support
3818 		 * is not there as BAND_6G works only on frequencies and channel
3819 		 * numbers can be treated as unique.
3820 		 */
3821 		mgmt_rx_params->chan_freq = wlan_reg_legacy_chan_to_freq(
3822 					    wma_handle->pdev,
3823 					    mgmt_rx_params->channel);
3824 	}
3825 
3826 	mgmt_rx_params->pdev_id = 0;
3827 	mgmt_rx_params->rx_params = NULL;
3828 
3829 	/*
3830 	 * Allocate the memory for this rx packet, add extra 100 bytes for:-
3831 	 *
3832 	 * 1.  Filling the missing RSN capabilities by some APs, which fill the
3833 	 *     RSN IE length as extra 2 bytes but dont fill the IE data with
3834 	 *     capabilities, resulting in failure in unpack core due to length
3835 	 *     mismatch. Check sir_validate_and_rectify_ies for more info.
3836 	 *
3837 	 * 2.  In the API wma_process_rmf_frame(), the driver trims the CCMP
3838 	 *     header by overwriting the IEEE header to memory occupied by CCMP
3839 	 *     header, but an overflow is possible if the memory allocated to
3840 	 *     frame is less than the sizeof(struct ieee80211_frame) +CCMP
3841 	 *     HEADER len, so allocating 100 bytes would solve this issue too.
3842 	 *
3843 	 * 3.  CCMP header is pointing to orig_hdr +
3844 	 *     sizeof(struct ieee80211_frame) which could also result in OOB
3845 	 *     access, if the data len is less than
3846 	 *     sizeof(struct ieee80211_frame), allocating extra bytes would
3847 	 *     result in solving this issue too.
3848 	 */
3849 	wbuf = qdf_nbuf_alloc(NULL, roundup(mgmt_rx_params->buf_len +
3850 							RESERVE_BYTES,
3851 							4), 0, 4, false);
3852 	if (!wbuf) {
3853 		qdf_mem_free(mgmt_rx_params);
3854 		return -ENOMEM;
3855 	}
3856 
3857 	qdf_nbuf_put_tail(wbuf, mgmt_rx_params->buf_len);
3858 	qdf_nbuf_set_protocol(wbuf, ETH_P_CONTROL);
3859 
3860 	qdf_mem_zero(((uint8_t *)qdf_nbuf_data(wbuf) + mgmt_rx_params->buf_len),
3861 		     (roundup(mgmt_rx_params->buf_len + RESERVE_BYTES, 4) -
3862 		     mgmt_rx_params->buf_len));
3863 
3864 	wma_mem_endianness_based_copy(qdf_nbuf_data(wbuf),
3865 			bufp, mgmt_rx_params->buf_len);
3866 
3867 	psoc = (struct wlan_objmgr_psoc *)
3868 				wma_handle->psoc;
3869 	if (!psoc) {
3870 		wma_err("psoc ctx is NULL");
3871 		qdf_nbuf_free(wbuf);
3872 		qdf_mem_free(mgmt_rx_params);
3873 		return -EINVAL;
3874 	}
3875 
3876 	status = mgmt_txrx_rx_handler(psoc, wbuf, mgmt_rx_params);
3877 	if (status != QDF_STATUS_SUCCESS) {
3878 		qdf_mem_free(mgmt_rx_params);
3879 		return -EINVAL;
3880 	}
3881 
3882 	qdf_mem_free(mgmt_rx_params);
3883 	return 0;
3884 }
3885 
3886 /**
3887  * wma_de_register_mgmt_frm_client() - deregister management frame
3888  *
3889  * This function deregisters the event handler registered for
3890  * WMI_MGMT_RX_EVENTID.
3891  *
3892  * Return: QDF status
3893  */
3894 QDF_STATUS wma_de_register_mgmt_frm_client(void)
3895 {
3896 	tp_wma_handle wma_handle = (tp_wma_handle)
3897 				cds_get_context(QDF_MODULE_ID_WMA);
3898 
3899 	if (!wma_handle)
3900 		return QDF_STATUS_E_NULL_VALUE;
3901 
3902 #ifdef QCA_WIFI_FTM
3903 	if (cds_get_conparam() == QDF_GLOBAL_FTM_MODE)
3904 		return QDF_STATUS_SUCCESS;
3905 #endif
3906 
3907 	if (wmi_unified_unregister_event_handler(wma_handle->wmi_handle,
3908 						 wmi_mgmt_rx_event_id) != 0) {
3909 		wma_err("Failed to Unregister rx mgmt handler with wmi");
3910 		return QDF_STATUS_E_FAILURE;
3911 	}
3912 	return QDF_STATUS_SUCCESS;
3913 }
3914 
3915 #ifdef WLAN_FEATURE_ROAM_OFFLOAD
3916 /**
3917  * wma_register_roaming_callbacks() - Register roaming callbacks
3918  * @csr_roam_auth_event_handle_cb: CSR callback routine pointer
3919  * @pe_roam_synch_cb: PE roam synch callback routine pointer
3920  *
3921  * Register the SME and PE callback routines with WMA for
3922  * handling roaming
3923  *
3924  * Return: Success or Failure Status
3925  */
3926 QDF_STATUS wma_register_roaming_callbacks(
3927 	QDF_STATUS (*csr_roam_auth_event_handle_cb)(struct mac_context *mac,
3928 						    uint8_t vdev_id,
3929 						    struct qdf_mac_addr bssid,
3930 						    uint32_t akm),
3931 	pe_roam_synch_fn_t pe_roam_synch_cb,
3932 	QDF_STATUS (*pe_disconnect_cb) (struct mac_context *mac,
3933 					uint8_t vdev_id,
3934 					uint8_t *deauth_disassoc_frame,
3935 					uint16_t deauth_disassoc_frame_len,
3936 					uint16_t reason_code))
3937 {
3938 
3939 	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
3940 
3941 	if (!wma)
3942 		return QDF_STATUS_E_FAILURE;
3943 
3944 	wma->csr_roam_auth_event_handle_cb = csr_roam_auth_event_handle_cb;
3945 	wma->pe_roam_synch_cb = pe_roam_synch_cb;
3946 	wma->pe_disconnect_cb = pe_disconnect_cb;
3947 	wma_debug("Registered roam synch callbacks with WMA successfully");
3948 
3949 	return QDF_STATUS_SUCCESS;
3950 }
3951 #endif
3952 
3953 /**
3954  * wma_register_mgmt_frm_client() - register management frame callback
3955  *
3956  * This function registers event handler for WMI_MGMT_RX_EVENTID.
3957  *
3958  * Return: QDF status
3959  */
3960 QDF_STATUS wma_register_mgmt_frm_client(void)
3961 {
3962 	tp_wma_handle wma_handle = (tp_wma_handle)
3963 				cds_get_context(QDF_MODULE_ID_WMA);
3964 
3965 	if (!wma_handle)
3966 		return QDF_STATUS_E_NULL_VALUE;
3967 
3968 	if (wmi_unified_register_event_handler(wma_handle->wmi_handle,
3969 					       wmi_mgmt_rx_event_id,
3970 					       wma_mgmt_rx_process,
3971 					       WMA_RX_WORK_CTX) != 0) {
3972 		wma_err("Failed to register rx mgmt handler with wmi");
3973 		return QDF_STATUS_E_FAILURE;
3974 	}
3975 
3976 	return QDF_STATUS_SUCCESS;
3977 }
3978 
3979 /**
3980  * wma_register_packetdump_callback() - stores tx and rx mgmt packet dump
3981  *   callback handler
3982  * @tx_cb: tx mgmt packetdump cb
3983  * @rx_cb: rx mgmt packetdump cb
3984  *
3985  * This function is used to store tx and rx mgmt. packet dump callback
3986  *
3987  * Return: None
3988  *
3989  */
3990 void wma_register_packetdump_callback(
3991 	ol_txrx_pktdump_cb tx_cb,
3992 	ol_txrx_pktdump_cb rx_cb)
3993 {
3994 	tp_wma_handle wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
3995 
3996 	if (!wma_handle)
3997 		return;
3998 
3999 	wma_handle->wma_mgmt_tx_packetdump_cb = tx_cb;
4000 	wma_handle->wma_mgmt_rx_packetdump_cb = rx_cb;
4001 }
4002 
4003 /**
4004  * wma_deregister_packetdump_callback() - removes tx and rx mgmt packet dump
4005  *   callback handler
4006  *
4007  * This function is used to remove tx and rx mgmt. packet dump callback
4008  *
4009  * Return: None
4010  *
4011  */
4012 void wma_deregister_packetdump_callback(void)
4013 {
4014 	tp_wma_handle wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
4015 
4016 	if (!wma_handle)
4017 		return;
4018 
4019 	wma_handle->wma_mgmt_tx_packetdump_cb = NULL;
4020 	wma_handle->wma_mgmt_rx_packetdump_cb = NULL;
4021 }
4022 
4023 QDF_STATUS wma_mgmt_unified_cmd_send(struct wlan_objmgr_vdev *vdev,
4024 				qdf_nbuf_t buf, uint32_t desc_id,
4025 				void *mgmt_tx_params)
4026 {
4027 	tp_wma_handle wma_handle;
4028 	int ret;
4029 	QDF_STATUS status = QDF_STATUS_E_INVAL;
4030 	struct wmi_mgmt_params *mgmt_params =
4031 			(struct wmi_mgmt_params *)mgmt_tx_params;
4032 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
4033 
4034 	if (!mgmt_params) {
4035 		wma_err("mgmt_params ptr passed is NULL");
4036 		return QDF_STATUS_E_INVAL;
4037 	}
4038 	mgmt_params->desc_id = desc_id;
4039 
4040 	if (!vdev) {
4041 		wma_err("vdev ptr passed is NULL");
4042 		return QDF_STATUS_E_INVAL;
4043 	}
4044 
4045 	wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
4046 	if (!wma_handle)
4047 		return QDF_STATUS_E_INVAL;
4048 
4049 	if (wmi_service_enabled(wma_handle->wmi_handle,
4050 				   wmi_service_mgmt_tx_wmi)) {
4051 		status = wmi_mgmt_unified_cmd_send(wma_handle->wmi_handle,
4052 						   mgmt_params);
4053 	} else {
4054 		QDF_NBUF_CB_MGMT_TXRX_DESC_ID(buf)
4055 						= mgmt_params->desc_id;
4056 
4057 		ret = cdp_mgmt_send_ext(soc, mgmt_params->vdev_id, buf,
4058 					mgmt_params->tx_type,
4059 					mgmt_params->use_6mbps,
4060 					mgmt_params->chanfreq);
4061 		status = qdf_status_from_os_return(ret);
4062 	}
4063 
4064 	if (status != QDF_STATUS_SUCCESS) {
4065 		wma_err("mgmt tx failed");
4066 		return status;
4067 	}
4068 
4069 	return QDF_STATUS_SUCCESS;
4070 }
4071 
4072 #ifndef CONFIG_HL_SUPPORT
4073 void wma_mgmt_nbuf_unmap_cb(struct wlan_objmgr_pdev *pdev,
4074 			    qdf_nbuf_t buf)
4075 {
4076 	struct wlan_objmgr_psoc *psoc;
4077 	qdf_device_t dev;
4078 
4079 	if (!buf)
4080 		return;
4081 
4082 	psoc = wlan_pdev_get_psoc(pdev);
4083 	if (!psoc) {
4084 		wma_err("Psoc handle NULL");
4085 		return;
4086 	}
4087 
4088 	dev = wlan_psoc_get_qdf_dev(psoc);
4089 	qdf_nbuf_unmap_single(dev, buf, QDF_DMA_TO_DEVICE);
4090 }
4091 
4092 QDF_STATUS wma_mgmt_frame_fill_peer_cb(struct wlan_objmgr_peer *peer,
4093 				       qdf_nbuf_t buf)
4094 {
4095 	struct wlan_objmgr_psoc *psoc;
4096 	struct wlan_objmgr_pdev *pdev;
4097 
4098 	psoc = wlan_peer_get_psoc(peer);
4099 	if (!psoc) {
4100 		wma_err("Psoc handle NULL");
4101 		return QDF_STATUS_E_INVAL;
4102 	}
4103 
4104 	pdev = wlan_objmgr_get_pdev_by_id((struct wlan_objmgr_psoc *)psoc,
4105 					  wlan_peer_get_pdev_id(peer),
4106 					  WLAN_LEGACY_WMA_ID);
4107 	if (!pdev) {
4108 		wma_err("Pdev handle NULL");
4109 		return QDF_STATUS_E_INVAL;
4110 	}
4111 	wma_mgmt_nbuf_unmap_cb(pdev, buf);
4112 	wlan_objmgr_pdev_release_ref(pdev, WLAN_LEGACY_WMA_ID);
4113 
4114 	return QDF_STATUS_SUCCESS;
4115 }
4116 
4117 QDF_STATUS
4118 wma_update_edca_pifs_param(WMA_HANDLE handle,
4119 			   struct edca_pifs_vparam *edca_pifs_param)
4120 {
4121 	tp_wma_handle wma_handle = (tp_wma_handle) handle;
4122 	QDF_STATUS status;
4123 
4124 	status = wmi_unified_update_edca_pifs_param(wma_handle->wmi_handle,
4125 						    edca_pifs_param);
4126 
4127 	if (QDF_IS_STATUS_ERROR(status))
4128 		wma_err("Failed to set EDCA/PIFS Parameters");
4129 
4130 	return status;
4131 }
4132 #endif
4133 
4134 QDF_STATUS
4135 wma_update_bss_peer_phy_mode(struct wlan_channel *des_chan,
4136 			     struct wlan_objmgr_vdev *vdev)
4137 {
4138 	struct wlan_objmgr_peer *bss_peer;
4139 	enum wlan_phymode old_peer_phymode, new_phymode;
4140 	tSirNwType nw_type;
4141 	struct vdev_mlme_obj *mlme_obj;
4142 
4143 	bss_peer = wlan_objmgr_vdev_try_get_bsspeer(vdev, WLAN_LEGACY_WMA_ID);
4144 	if (!bss_peer) {
4145 		wma_err("not able to find bss peer for vdev %d",
4146 			wlan_vdev_get_id(vdev));
4147 		return QDF_STATUS_E_INVAL;
4148 	}
4149 
4150 	old_peer_phymode = wlan_peer_get_phymode(bss_peer);
4151 
4152 	if (WLAN_REG_IS_24GHZ_CH_FREQ(des_chan->ch_freq)) {
4153 		if (des_chan->ch_phymode == WLAN_PHYMODE_11B ||
4154 		    old_peer_phymode == WLAN_PHYMODE_11B)
4155 			nw_type = eSIR_11B_NW_TYPE;
4156 		else
4157 			nw_type = eSIR_11G_NW_TYPE;
4158 	} else {
4159 		nw_type = eSIR_11A_NW_TYPE;
4160 	}
4161 
4162 	new_phymode = wma_peer_phymode(nw_type, STA_ENTRY_PEER,
4163 				       IS_WLAN_PHYMODE_HT(old_peer_phymode),
4164 				       des_chan->ch_width,
4165 				       IS_WLAN_PHYMODE_VHT(old_peer_phymode),
4166 				       IS_WLAN_PHYMODE_HE(old_peer_phymode),
4167 				       wma_is_phymode_eht(old_peer_phymode));
4168 
4169 	if (new_phymode == old_peer_phymode) {
4170 		wma_debug("Ignore update, old %d and new %d phymode are same, vdev_id : %d",
4171 			  old_peer_phymode, new_phymode,
4172 			  wlan_vdev_get_id(vdev));
4173 		wlan_objmgr_peer_release_ref(bss_peer, WLAN_LEGACY_WMA_ID);
4174 		return QDF_STATUS_SUCCESS;
4175 	}
4176 
4177 	mlme_obj = wlan_vdev_mlme_get_cmpt_obj(vdev);
4178 	if (!mlme_obj) {
4179 		wma_err("not able to get mlme_obj");
4180 		wlan_objmgr_peer_release_ref(bss_peer, WLAN_LEGACY_WMA_ID);
4181 		return QDF_STATUS_E_INVAL;
4182 	}
4183 
4184 	wlan_peer_obj_lock(bss_peer);
4185 	wlan_peer_set_phymode(bss_peer, new_phymode);
4186 	wlan_peer_obj_unlock(bss_peer);
4187 
4188 	wlan_objmgr_peer_release_ref(bss_peer, WLAN_LEGACY_WMA_ID);
4189 
4190 	mlme_obj->mgmt.generic.phy_mode = wmi_host_to_fw_phymode(new_phymode);
4191 	des_chan->ch_phymode = new_phymode;
4192 
4193 	return QDF_STATUS_SUCCESS;
4194 }
4195