xref: /wlan-dirver/qcacld-3.0/core/wma/src/wma_data.c (revision 283bc5d0a54ddd1cd54de553e3c2b14b84b3b957)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  *  DOC:    wma_data.c
22  *  This file contains tx/rx and data path related functions.
23  */
24 
25 /* Header files */
26 
27 #include "wma.h"
28 #include "enet.h"
29 #include "wma_api.h"
30 #include "cds_api.h"
31 #include "wmi_unified_api.h"
32 #include "wlan_qct_sys.h"
33 #include "wni_api.h"
34 #include "ani_global.h"
35 #include "wmi_unified.h"
36 #include "wni_cfg.h"
37 #include <cdp_txrx_tx_throttle.h>
38 #if defined(CONFIG_HL_SUPPORT)
39 #include "wlan_tgt_def_config_hl.h"
40 #else
41 #include "wlan_tgt_def_config.h"
42 #endif
43 #include "qdf_nbuf.h"
44 #include "qdf_types.h"
45 #include "qdf_mem.h"
46 #include "qdf_util.h"
47 
48 #include "wma_types.h"
49 #include "lim_api.h"
50 #include "lim_session_utils.h"
51 
52 #include "cds_utils.h"
53 
54 #if !defined(REMOVE_PKT_LOG)
55 #include "pktlog_ac.h"
56 #endif /* REMOVE_PKT_LOG */
57 
58 #include "dbglog_host.h"
59 #include "csr_api.h"
60 #include "ol_fw.h"
61 
62 #include "wma_internal.h"
63 #include "cdp_txrx_flow_ctrl_legacy.h"
64 #include "cdp_txrx_cmn.h"
65 #include "cdp_txrx_misc.h"
66 #include <cdp_txrx_peer_ops.h>
67 #include <cdp_txrx_cfg.h>
68 #include "cdp_txrx_stats.h"
69 #include <cdp_txrx_misc.h>
70 #include "wlan_mgmt_txrx_utils_api.h"
71 #include "wlan_objmgr_psoc_obj.h"
72 #include "wlan_objmgr_pdev_obj.h"
73 #include "wlan_objmgr_vdev_obj.h"
74 #include "wlan_objmgr_peer_obj.h"
75 #include <cdp_txrx_handle.h>
76 #include "cfg_ucfg_api.h"
77 #include "wlan_policy_mgr_ucfg.h"
78 #include <wlan_pmo_ucfg_api.h>
79 #include "wlan_lmac_if_api.h"
80 #include <wlan_cp_stats_mc_ucfg_api.h>
81 #include <wlan_crypto_global_api.h>
82 #include <wlan_mlme_main.h>
83 #include <wlan_cm_api.h>
84 #include "wlan_pkt_capture_ucfg_api.h"
85 #include "wma_eht.h"
86 #include "wlan_mlo_mgr_sta.h"
87 #include "wlan_fw_offload_main.h"
88 #include "target_if_fwol.h"
89 
90 struct wma_search_rate {
91 	int32_t rate;
92 	uint8_t flag;
93 };
94 
95 #define WMA_MAX_OFDM_CCK_RATE_TBL_SIZE 12
96 /* In ofdm_cck_rate_tbl->flag, if bit 7 is 1 it's CCK, otherwise it ofdm.
97  * Lower bit carries the ofdm/cck index for encoding the rate
98  */
99 static struct wma_search_rate ofdm_cck_rate_tbl[WMA_MAX_OFDM_CCK_RATE_TBL_SIZE] = {
100 	{540, 4},               /* 4: OFDM 54 Mbps */
101 	{480, 0},               /* 0: OFDM 48 Mbps */
102 	{360, 5},               /* 5: OFDM 36 Mbps */
103 	{240, 1},               /* 1: OFDM 24 Mbps */
104 	{180, 6},               /* 6: OFDM 18 Mbps */
105 	{120, 2},               /* 2: OFDM 12 Mbps */
106 	{110, (1 << 7)},        /* 0: CCK 11 Mbps Long */
107 	{90, 7},                /* 7: OFDM 9 Mbps  */
108 	{60, 3},                /* 3: OFDM 6 Mbps  */
109 	{55, ((1 << 7) | 1)},   /* 1: CCK 5.5 Mbps Long */
110 	{20, ((1 << 7) | 2)},   /* 2: CCK 2 Mbps Long   */
111 	{10, ((1 << 7) | 3)} /* 3: CCK 1 Mbps Long   */
112 };
113 
114 #define WMA_MAX_VHT20_RATE_TBL_SIZE 9
115 /* In vht20_400ns_rate_tbl flag carries the mcs index for encoding the rate */
116 static struct wma_search_rate vht20_400ns_rate_tbl[WMA_MAX_VHT20_RATE_TBL_SIZE] = {
117 	{867, 8},               /* MCS8 1SS short GI */
118 	{722, 7},               /* MCS7 1SS short GI */
119 	{650, 6},               /* MCS6 1SS short GI */
120 	{578, 5},               /* MCS5 1SS short GI */
121 	{433, 4},               /* MCS4 1SS short GI */
122 	{289, 3},               /* MCS3 1SS short GI */
123 	{217, 2},               /* MCS2 1SS short GI */
124 	{144, 1},               /* MCS1 1SS short GI */
125 	{72, 0} /* MCS0 1SS short GI */
126 };
127 
128 /* In vht20_800ns_rate_tbl flag carries the mcs index for encoding the rate */
129 static struct wma_search_rate vht20_800ns_rate_tbl[WMA_MAX_VHT20_RATE_TBL_SIZE] = {
130 	{780, 8},               /* MCS8 1SS long GI */
131 	{650, 7},               /* MCS7 1SS long GI */
132 	{585, 6},               /* MCS6 1SS long GI */
133 	{520, 5},               /* MCS5 1SS long GI */
134 	{390, 4},               /* MCS4 1SS long GI */
135 	{260, 3},               /* MCS3 1SS long GI */
136 	{195, 2},               /* MCS2 1SS long GI */
137 	{130, 1},               /* MCS1 1SS long GI */
138 	{65, 0} /* MCS0 1SS long GI */
139 };
140 
141 #define WMA_MAX_VHT40_RATE_TBL_SIZE 10
142 /* In vht40_400ns_rate_tbl flag carries the mcs index for encoding the rate */
143 static struct wma_search_rate vht40_400ns_rate_tbl[WMA_MAX_VHT40_RATE_TBL_SIZE] = {
144 	{2000, 9},              /* MCS9 1SS short GI */
145 	{1800, 8},              /* MCS8 1SS short GI */
146 	{1500, 7},              /* MCS7 1SS short GI */
147 	{1350, 6},              /* MCS6 1SS short GI */
148 	{1200, 5},              /* MCS5 1SS short GI */
149 	{900, 4},               /* MCS4 1SS short GI */
150 	{600, 3},               /* MCS3 1SS short GI */
151 	{450, 2},               /* MCS2 1SS short GI */
152 	{300, 1},               /* MCS1 1SS short GI */
153 	{150, 0},               /* MCS0 1SS short GI */
154 };
155 
156 static struct wma_search_rate vht40_800ns_rate_tbl[WMA_MAX_VHT40_RATE_TBL_SIZE] = {
157 	{1800, 9},              /* MCS9 1SS long GI */
158 	{1620, 8},              /* MCS8 1SS long GI */
159 	{1350, 7},              /* MCS7 1SS long GI */
160 	{1215, 6},              /* MCS6 1SS long GI */
161 	{1080, 5},              /* MCS5 1SS long GI */
162 	{810, 4},               /* MCS4 1SS long GI */
163 	{540, 3},               /* MCS3 1SS long GI */
164 	{405, 2},               /* MCS2 1SS long GI */
165 	{270, 1},               /* MCS1 1SS long GI */
166 	{135, 0} /* MCS0 1SS long GI */
167 };
168 
169 #define WMA_MAX_VHT80_RATE_TBL_SIZE 10
170 static struct wma_search_rate vht80_400ns_rate_tbl[WMA_MAX_VHT80_RATE_TBL_SIZE] = {
171 	{4333, 9},              /* MCS9 1SS short GI */
172 	{3900, 8},              /* MCS8 1SS short GI */
173 	{3250, 7},              /* MCS7 1SS short GI */
174 	{2925, 6},              /* MCS6 1SS short GI */
175 	{2600, 5},              /* MCS5 1SS short GI */
176 	{1950, 4},              /* MCS4 1SS short GI */
177 	{1300, 3},              /* MCS3 1SS short GI */
178 	{975, 2},               /* MCS2 1SS short GI */
179 	{650, 1},               /* MCS1 1SS short GI */
180 	{325, 0} /* MCS0 1SS short GI */
181 };
182 
183 static struct wma_search_rate vht80_800ns_rate_tbl[WMA_MAX_VHT80_RATE_TBL_SIZE] = {
184 	{3900, 9},              /* MCS9 1SS long GI */
185 	{3510, 8},              /* MCS8 1SS long GI */
186 	{2925, 7},              /* MCS7 1SS long GI */
187 	{2633, 6},              /* MCS6 1SS long GI */
188 	{2340, 5},              /* MCS5 1SS long GI */
189 	{1755, 4},              /* MCS4 1SS long GI */
190 	{1170, 3},              /* MCS3 1SS long GI */
191 	{878, 2},               /* MCS2 1SS long GI */
192 	{585, 1},               /* MCS1 1SS long GI */
193 	{293, 0} /* MCS0 1SS long GI */
194 };
195 
196 #define WMA_MAX_HT20_RATE_TBL_SIZE 8
197 static struct wma_search_rate ht20_400ns_rate_tbl[WMA_MAX_HT20_RATE_TBL_SIZE] = {
198 	{722, 7},               /* MCS7 1SS short GI */
199 	{650, 6},               /* MCS6 1SS short GI */
200 	{578, 5},               /* MCS5 1SS short GI */
201 	{433, 4},               /* MCS4 1SS short GI */
202 	{289, 3},               /* MCS3 1SS short GI */
203 	{217, 2},               /* MCS2 1SS short GI */
204 	{144, 1},               /* MCS1 1SS short GI */
205 	{72, 0} /* MCS0 1SS short GI */
206 };
207 
208 static struct wma_search_rate ht20_800ns_rate_tbl[WMA_MAX_HT20_RATE_TBL_SIZE] = {
209 	{650, 7},               /* MCS7 1SS long GI */
210 	{585, 6},               /* MCS6 1SS long GI */
211 	{520, 5},               /* MCS5 1SS long GI */
212 	{390, 4},               /* MCS4 1SS long GI */
213 	{260, 3},               /* MCS3 1SS long GI */
214 	{195, 2},               /* MCS2 1SS long GI */
215 	{130, 1},               /* MCS1 1SS long GI */
216 	{65, 0} /* MCS0 1SS long GI */
217 };
218 
219 #define WMA_MAX_HT40_RATE_TBL_SIZE 8
220 static struct wma_search_rate ht40_400ns_rate_tbl[WMA_MAX_HT40_RATE_TBL_SIZE] = {
221 	{1500, 7},              /* MCS7 1SS short GI */
222 	{1350, 6},              /* MCS6 1SS short GI */
223 	{1200, 5},              /* MCS5 1SS short GI */
224 	{900, 4},               /* MCS4 1SS short GI */
225 	{600, 3},               /* MCS3 1SS short GI */
226 	{450, 2},               /* MCS2 1SS short GI */
227 	{300, 1},               /* MCS1 1SS short GI */
228 	{150, 0} /* MCS0 1SS short GI */
229 };
230 
231 static struct wma_search_rate ht40_800ns_rate_tbl[WMA_MAX_HT40_RATE_TBL_SIZE] = {
232 	{1350, 7},              /* MCS7 1SS long GI */
233 	{1215, 6},              /* MCS6 1SS long GI */
234 	{1080, 5},              /* MCS5 1SS long GI */
235 	{810, 4},               /* MCS4 1SS long GI */
236 	{540, 3},               /* MCS3 1SS long GI */
237 	{405, 2},               /* MCS2 1SS long GI */
238 	{270, 1},               /* MCS1 1SS long GI */
239 	{135, 0} /* MCS0 1SS long GI */
240 };
241 
242 /**
243  * wma_bin_search_rate() - binary search function to find rate
244  * @tbl: rate table
245  * @tbl_size: table size
246  * @mbpsx10_rate: return mbps rate
247  * @ret_flag: return flag
248  *
249  * Return: none
250  */
251 static void wma_bin_search_rate(struct wma_search_rate *tbl, int32_t tbl_size,
252 				int32_t *mbpsx10_rate, uint8_t *ret_flag)
253 {
254 	int32_t upper, lower, mid;
255 
256 	/* the table is descenting. index holds the largest value and the
257 	 * bottom index holds the smallest value
258 	 */
259 
260 	upper = 0;              /* index 0 */
261 	lower = tbl_size - 1;   /* last index */
262 
263 	if (*mbpsx10_rate >= tbl[upper].rate) {
264 		/* use the largest rate */
265 		*mbpsx10_rate = tbl[upper].rate;
266 		*ret_flag = tbl[upper].flag;
267 		return;
268 	} else if (*mbpsx10_rate <= tbl[lower].rate) {
269 		/* use the smallest rate */
270 		*mbpsx10_rate = tbl[lower].rate;
271 		*ret_flag = tbl[lower].flag;
272 		return;
273 	}
274 	/* now we do binery search to get the floor value */
275 	while (lower - upper > 1) {
276 		mid = (upper + lower) >> 1;
277 		if (*mbpsx10_rate == tbl[mid].rate) {
278 			/* found the exact match */
279 			*mbpsx10_rate = tbl[mid].rate;
280 			*ret_flag = tbl[mid].flag;
281 			return;
282 		}
283 		/* not found. if mid's rate is larger than input move
284 		 * upper to mid. If mid's rate is larger than input
285 		 * move lower to mid.
286 		 */
287 		if (*mbpsx10_rate > tbl[mid].rate)
288 			lower = mid;
289 		else
290 			upper = mid;
291 	}
292 	/* after the bin search the index is the ceiling of rate */
293 	*mbpsx10_rate = tbl[upper].rate;
294 	*ret_flag = tbl[upper].flag;
295 	return;
296 }
297 
298 /**
299  * wma_fill_ofdm_cck_mcast_rate() - fill ofdm cck mcast rate
300  * @mbpsx10_rate: mbps rates
301  * @nss: nss
302  * @rate: rate
303  *
304  * Return: QDF status
305  */
306 static QDF_STATUS wma_fill_ofdm_cck_mcast_rate(int32_t mbpsx10_rate,
307 					       uint8_t nss, uint8_t *rate)
308 {
309 	uint8_t idx = 0;
310 
311 	wma_bin_search_rate(ofdm_cck_rate_tbl, WMA_MAX_OFDM_CCK_RATE_TBL_SIZE,
312 			    &mbpsx10_rate, &idx);
313 
314 	/* if bit 7 is set it uses CCK */
315 	if (idx & 0x80)
316 		*rate |= (1 << 6) | (idx & 0xF); /* set bit 6 to 1 for CCK */
317 	else
318 		*rate |= (idx & 0xF);
319 	return QDF_STATUS_SUCCESS;
320 }
321 
322 /**
323  * wma_set_ht_vht_mcast_rate() - set ht/vht mcast rate
324  * @shortgi: short guard interval
325  * @mbpsx10_rate: mbps rates
326  * @sgi_idx: shortgi index
327  * @sgi_rate: shortgi rate
328  * @lgi_idx: longgi index
329  * @lgi_rate: longgi rate
330  * @premable: preamble
331  * @rate: rate
332  * @streaming_rate: streaming rate
333  *
334  * Return: none
335  */
336 static void wma_set_ht_vht_mcast_rate(uint32_t shortgi, int32_t mbpsx10_rate,
337 				      uint8_t sgi_idx, int32_t sgi_rate,
338 				      uint8_t lgi_idx, int32_t lgi_rate,
339 				      uint8_t premable, uint8_t *rate,
340 				      int32_t *streaming_rate)
341 {
342 	if (shortgi == 0) {
343 		*rate |= (premable << 6) | (lgi_idx & 0xF);
344 		*streaming_rate = lgi_rate;
345 	} else {
346 		*rate |= (premable << 6) | (sgi_idx & 0xF);
347 		*streaming_rate = sgi_rate;
348 	}
349 }
350 
351 /**
352  * wma_fill_ht20_mcast_rate() - fill ht20 mcast rate
353  * @shortgi: short guard interval
354  * @mbpsx10_rate: mbps rates
355  * @nss: nss
356  * @rate: rate
357  * @streaming_rate: streaming rate
358  *
359  * Return: QDF status
360  */
361 static QDF_STATUS wma_fill_ht20_mcast_rate(uint32_t shortgi,
362 					   int32_t mbpsx10_rate, uint8_t nss,
363 					   uint8_t *rate,
364 					   int32_t *streaming_rate)
365 {
366 	uint8_t sgi_idx = 0, lgi_idx = 0;
367 	int32_t sgi_rate, lgi_rate;
368 
369 	if (nss == 1)
370 		mbpsx10_rate = mbpsx10_rate >> 1;
371 
372 	sgi_rate = mbpsx10_rate;
373 	lgi_rate = mbpsx10_rate;
374 	if (shortgi)
375 		wma_bin_search_rate(ht20_400ns_rate_tbl,
376 				    WMA_MAX_HT20_RATE_TBL_SIZE, &sgi_rate,
377 				    &sgi_idx);
378 	else
379 		wma_bin_search_rate(ht20_800ns_rate_tbl,
380 				    WMA_MAX_HT20_RATE_TBL_SIZE, &lgi_rate,
381 				    &lgi_idx);
382 
383 	wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
384 				  lgi_idx, lgi_rate, 2, rate, streaming_rate);
385 	if (nss == 1)
386 		*streaming_rate = *streaming_rate << 1;
387 	return QDF_STATUS_SUCCESS;
388 }
389 
390 /**
391  * wma_fill_ht40_mcast_rate() - fill ht40 mcast rate
392  * @shortgi: short guard interval
393  * @mbpsx10_rate: mbps rates
394  * @nss: nss
395  * @rate: rate
396  * @streaming_rate: streaming rate
397  *
398  * Return: QDF status
399  */
400 static QDF_STATUS wma_fill_ht40_mcast_rate(uint32_t shortgi,
401 					   int32_t mbpsx10_rate, uint8_t nss,
402 					   uint8_t *rate,
403 					   int32_t *streaming_rate)
404 {
405 	uint8_t sgi_idx = 0, lgi_idx = 0;
406 	int32_t sgi_rate, lgi_rate;
407 
408 	/* for 2x2 divide the rate by 2 */
409 	if (nss == 1)
410 		mbpsx10_rate = mbpsx10_rate >> 1;
411 
412 	sgi_rate = mbpsx10_rate;
413 	lgi_rate = mbpsx10_rate;
414 	if (shortgi)
415 		wma_bin_search_rate(ht40_400ns_rate_tbl,
416 				    WMA_MAX_HT40_RATE_TBL_SIZE, &sgi_rate,
417 				    &sgi_idx);
418 	else
419 		wma_bin_search_rate(ht40_800ns_rate_tbl,
420 				    WMA_MAX_HT40_RATE_TBL_SIZE, &lgi_rate,
421 				    &lgi_idx);
422 
423 	wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
424 				  lgi_idx, lgi_rate, 2, rate, streaming_rate);
425 
426 	return QDF_STATUS_SUCCESS;
427 }
428 
429 /**
430  * wma_fill_vht20_mcast_rate() - fill vht20 mcast rate
431  * @shortgi: short guard interval
432  * @mbpsx10_rate: mbps rates
433  * @nss: nss
434  * @rate: rate
435  * @streaming_rate: streaming rate
436  *
437  * Return: QDF status
438  */
439 static QDF_STATUS wma_fill_vht20_mcast_rate(uint32_t shortgi,
440 					    int32_t mbpsx10_rate, uint8_t nss,
441 					    uint8_t *rate,
442 					    int32_t *streaming_rate)
443 {
444 	uint8_t sgi_idx = 0, lgi_idx = 0;
445 	int32_t sgi_rate, lgi_rate;
446 
447 	/* for 2x2 divide the rate by 2 */
448 	if (nss == 1)
449 		mbpsx10_rate = mbpsx10_rate >> 1;
450 
451 	sgi_rate = mbpsx10_rate;
452 	lgi_rate = mbpsx10_rate;
453 	if (shortgi)
454 		wma_bin_search_rate(vht20_400ns_rate_tbl,
455 				    WMA_MAX_VHT20_RATE_TBL_SIZE, &sgi_rate,
456 				    &sgi_idx);
457 	else
458 		wma_bin_search_rate(vht20_800ns_rate_tbl,
459 				    WMA_MAX_VHT20_RATE_TBL_SIZE, &lgi_rate,
460 				    &lgi_idx);
461 
462 	wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
463 				  lgi_idx, lgi_rate, 3, rate, streaming_rate);
464 	if (nss == 1)
465 		*streaming_rate = *streaming_rate << 1;
466 	return QDF_STATUS_SUCCESS;
467 }
468 
469 /**
470  * wma_fill_vht40_mcast_rate() - fill vht40 mcast rate
471  * @shortgi: short guard interval
472  * @mbpsx10_rate: mbps rates
473  * @nss: nss
474  * @rate: rate
475  * @streaming_rate: streaming rate
476  *
477  * Return: QDF status
478  */
479 static QDF_STATUS wma_fill_vht40_mcast_rate(uint32_t shortgi,
480 					    int32_t mbpsx10_rate, uint8_t nss,
481 					    uint8_t *rate,
482 					    int32_t *streaming_rate)
483 {
484 	uint8_t sgi_idx = 0, lgi_idx = 0;
485 	int32_t sgi_rate, lgi_rate;
486 
487 	/* for 2x2 divide the rate by 2 */
488 	if (nss == 1)
489 		mbpsx10_rate = mbpsx10_rate >> 1;
490 
491 	sgi_rate = mbpsx10_rate;
492 	lgi_rate = mbpsx10_rate;
493 	if (shortgi)
494 		wma_bin_search_rate(vht40_400ns_rate_tbl,
495 				    WMA_MAX_VHT40_RATE_TBL_SIZE, &sgi_rate,
496 				    &sgi_idx);
497 	else
498 		wma_bin_search_rate(vht40_800ns_rate_tbl,
499 				    WMA_MAX_VHT40_RATE_TBL_SIZE, &lgi_rate,
500 				    &lgi_idx);
501 
502 	wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate,
503 				  sgi_idx, sgi_rate, lgi_idx, lgi_rate,
504 				  3, rate, streaming_rate);
505 	if (nss == 1)
506 		*streaming_rate = *streaming_rate << 1;
507 	return QDF_STATUS_SUCCESS;
508 }
509 
510 /**
511  * wma_fill_vht80_mcast_rate() - fill vht80 mcast rate
512  * @shortgi: short guard interval
513  * @mbpsx10_rate: mbps rates
514  * @nss: nss
515  * @rate: rate
516  * @streaming_rate: streaming rate
517  *
518  * Return: QDF status
519  */
520 static QDF_STATUS wma_fill_vht80_mcast_rate(uint32_t shortgi,
521 					    int32_t mbpsx10_rate, uint8_t nss,
522 					    uint8_t *rate,
523 					    int32_t *streaming_rate)
524 {
525 	uint8_t sgi_idx = 0, lgi_idx = 0;
526 	int32_t sgi_rate, lgi_rate;
527 
528 	/* for 2x2 divide the rate by 2 */
529 	if (nss == 1)
530 		mbpsx10_rate = mbpsx10_rate >> 1;
531 
532 	sgi_rate = mbpsx10_rate;
533 	lgi_rate = mbpsx10_rate;
534 	if (shortgi)
535 		wma_bin_search_rate(vht80_400ns_rate_tbl,
536 				    WMA_MAX_VHT80_RATE_TBL_SIZE, &sgi_rate,
537 				    &sgi_idx);
538 	else
539 		wma_bin_search_rate(vht80_800ns_rate_tbl,
540 				    WMA_MAX_VHT80_RATE_TBL_SIZE, &lgi_rate,
541 				    &lgi_idx);
542 
543 	wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
544 				  lgi_idx, lgi_rate, 3, rate, streaming_rate);
545 	if (nss == 1)
546 		*streaming_rate = *streaming_rate << 1;
547 	return QDF_STATUS_SUCCESS;
548 }
549 
550 /**
551  * wma_fill_ht_mcast_rate() - fill ht mcast rate
552  * @shortgi: short guard interval
553  * @chwidth: channel width
554  * @chanmode: channel mode
555  * @mhz: frequency
556  * @mbpsx10_rate: mbps rates
557  * @nss: nss
558  * @rate: rate
559  * @streaming_rate: streaming rate
560  *
561  * Return: QDF status
562  */
563 static QDF_STATUS wma_fill_ht_mcast_rate(uint32_t shortgi,
564 					 uint32_t chwidth, int32_t mbpsx10_rate,
565 					 uint8_t nss, uint8_t *rate,
566 					 int32_t *streaming_rate)
567 {
568 	int32_t ret = 0;
569 
570 	*streaming_rate = 0;
571 	if (chwidth == 0)
572 		ret = wma_fill_ht20_mcast_rate(shortgi, mbpsx10_rate,
573 					       nss, rate, streaming_rate);
574 	else if (chwidth == 1)
575 		ret = wma_fill_ht40_mcast_rate(shortgi, mbpsx10_rate,
576 					       nss, rate, streaming_rate);
577 	else
578 		wma_err("Error, Invalid chwidth enum %d", chwidth);
579 	return (*streaming_rate != 0) ? QDF_STATUS_SUCCESS : QDF_STATUS_E_INVAL;
580 }
581 
582 /**
583  * wma_fill_vht_mcast_rate() - fill vht mcast rate
584  * @shortgi: short guard interval
585  * @chwidth: channel width
586  * @chanmode: channel mode
587  * @mhz: frequency
588  * @mbpsx10_rate: mbps rates
589  * @nss: nss
590  * @rate: rate
591  * @streaming_rate: streaming rate
592  *
593  * Return: QDF status
594  */
595 static QDF_STATUS wma_fill_vht_mcast_rate(uint32_t shortgi,
596 					  uint32_t chwidth,
597 					  int32_t mbpsx10_rate, uint8_t nss,
598 					  uint8_t *rate,
599 					  int32_t *streaming_rate)
600 {
601 	int32_t ret = 0;
602 
603 	*streaming_rate = 0;
604 	if (chwidth == 0)
605 		ret = wma_fill_vht20_mcast_rate(shortgi, mbpsx10_rate, nss,
606 						rate, streaming_rate);
607 	else if (chwidth == 1)
608 		ret = wma_fill_vht40_mcast_rate(shortgi, mbpsx10_rate, nss,
609 						rate, streaming_rate);
610 	else if (chwidth == 2)
611 		ret = wma_fill_vht80_mcast_rate(shortgi, mbpsx10_rate, nss,
612 						rate, streaming_rate);
613 	else
614 		wma_err("chwidth enum %d not supported", chwidth);
615 	return (*streaming_rate != 0) ? QDF_STATUS_SUCCESS : QDF_STATUS_E_INVAL;
616 }
617 
618 #define WMA_MCAST_1X1_CUT_OFF_RATE 2000
619 /**
620  * wma_encode_mc_rate() - fill mc rates
621  * @shortgi: short guard interval
622  * @chwidth: channel width
623  * @chanmode: channel mode
624  * @mhz: frequency
625  * @mbpsx10_rate: mbps rates
626  * @nss: nss
627  * @rate: rate
628  *
629  * Return: QDF status
630  */
631 static QDF_STATUS wma_encode_mc_rate(uint32_t shortgi, uint32_t chwidth,
632 			     A_UINT32 mhz, int32_t mbpsx10_rate, uint8_t nss,
633 			     uint8_t *rate)
634 {
635 	int32_t ret = 0;
636 
637 	/* nss input value: 0 - 1x1; 1 - 2x2; 2 - 3x3
638 	 * the phymode selection is based on following assumption:
639 	 * (1) if the app specifically requested 1x1 or 2x2 we hornor it
640 	 * (2) if mbpsx10_rate <= 540: always use BG
641 	 * (3) 540 < mbpsx10_rate <= 2000: use 1x1 HT/VHT
642 	 * (4) 2000 < mbpsx10_rate: use 2x2 HT/VHT
643 	 */
644 	wma_debug("Input: nss = %d, mbpsx10 = 0x%x, chwidth = %d, shortgi = %d",
645 		  nss, mbpsx10_rate, chwidth, shortgi);
646 	if ((mbpsx10_rate & 0x40000000) && nss > 0) {
647 		/* bit 30 indicates user inputted nss,
648 		 * bit 28 and 29 used to encode nss
649 		 */
650 		uint8_t user_nss = (mbpsx10_rate & 0x30000000) >> 28;
651 
652 		nss = (user_nss < nss) ? user_nss : nss;
653 		/* zero out bits 19 - 21 to recover the actual rate */
654 		mbpsx10_rate &= ~0x70000000;
655 	} else if (mbpsx10_rate <= WMA_MCAST_1X1_CUT_OFF_RATE) {
656 		/* if the input rate is less or equal to the
657 		 * 1x1 cutoff rate we use 1x1 only
658 		 */
659 		nss = 0;
660 	}
661 	/* encode NSS bits (bit 4, bit 5) */
662 	*rate = (nss & 0x3) << 4;
663 	/* if mcast input rate exceeds the ofdm/cck max rate 54mpbs
664 	 * we try to choose best ht/vht mcs rate
665 	 */
666 	if (540 < mbpsx10_rate) {
667 		/* cannot use ofdm/cck, choose closest ht/vht mcs rate */
668 		uint8_t rate_ht = *rate;
669 		uint8_t rate_vht = *rate;
670 		int32_t stream_rate_ht = 0;
671 		int32_t stream_rate_vht = 0;
672 		int32_t stream_rate = 0;
673 
674 		ret = wma_fill_ht_mcast_rate(shortgi, chwidth, mbpsx10_rate,
675 					     nss, &rate_ht,
676 					     &stream_rate_ht);
677 		if (ret != QDF_STATUS_SUCCESS)
678 			stream_rate_ht = 0;
679 		if (mhz < WMA_2_4_GHZ_MAX_FREQ) {
680 			/* not in 5 GHZ frequency */
681 			*rate = rate_ht;
682 			stream_rate = stream_rate_ht;
683 			goto ht_vht_done;
684 		}
685 		/* capable doing 11AC mcast so that search vht tables */
686 		ret = wma_fill_vht_mcast_rate(shortgi, chwidth, mbpsx10_rate,
687 					      nss, &rate_vht,
688 					      &stream_rate_vht);
689 		if (ret != QDF_STATUS_SUCCESS) {
690 			if (stream_rate_ht != 0)
691 				ret = QDF_STATUS_SUCCESS;
692 			*rate = rate_ht;
693 			stream_rate = stream_rate_ht;
694 			goto ht_vht_done;
695 		}
696 		if (stream_rate_ht == 0) {
697 			/* only vht rate available */
698 			*rate = rate_vht;
699 			stream_rate = stream_rate_vht;
700 		} else {
701 			/* set ht as default first */
702 			*rate = rate_ht;
703 			stream_rate = stream_rate_ht;
704 			if (stream_rate < mbpsx10_rate) {
705 				if (mbpsx10_rate <= stream_rate_vht ||
706 				    stream_rate < stream_rate_vht) {
707 					*rate = rate_vht;
708 					stream_rate = stream_rate_vht;
709 				}
710 			} else {
711 				if (stream_rate_vht >= mbpsx10_rate &&
712 				    stream_rate_vht < stream_rate) {
713 					*rate = rate_vht;
714 					stream_rate = stream_rate_vht;
715 				}
716 			}
717 		}
718 ht_vht_done:
719 		wma_debug("NSS = %d, freq = %d", nss, mhz);
720 		wma_debug("input_rate = %d, chwidth = %d rate = 0x%x, streaming_rate = %d",
721 			 mbpsx10_rate, chwidth, *rate, stream_rate);
722 	} else {
723 		if (mbpsx10_rate > 0)
724 			ret = wma_fill_ofdm_cck_mcast_rate(mbpsx10_rate,
725 							   nss, rate);
726 		else
727 			*rate = 0xFF;
728 
729 		wma_debug("NSS = %d, input_rate = %d, rate = 0x%x",
730 			  nss, mbpsx10_rate, *rate);
731 	}
732 	return ret;
733 }
734 
735 /**
736  * wma_cp_stats_set_rate_flag() - set rate flags within cp_stats priv object
737  * @wma: wma handle
738  * @vdev_id: vdev id
739  *
740  * Return: none
741  */
742 static void wma_cp_stats_set_rate_flag(tp_wma_handle wma, uint8_t vdev_id)
743 {
744 	struct wlan_objmgr_vdev *vdev;
745 	struct wlan_objmgr_psoc *psoc = wma->psoc;
746 	struct wma_txrx_node *iface = &wma->interfaces[vdev_id];
747 	uint32_t rate_flag;
748 	QDF_STATUS status;
749 
750 	vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, vdev_id,
751 						    WLAN_LEGACY_WMA_ID);
752 	if (!vdev) {
753 		wma_err("vdev not found for id: %d", vdev_id);
754 		return;
755 	}
756 
757 	status = wma_get_vdev_rate_flag(iface->vdev, &rate_flag);
758 	if (QDF_IS_STATUS_ERROR(status)) {
759 		wma_err("vdev not found for id: %d", vdev_id);
760 		return;
761 	}
762 	ucfg_mc_cp_stats_set_rate_flags(vdev, rate_flag);
763 	wlan_objmgr_vdev_release_ref(vdev, WLAN_LEGACY_WMA_ID);
764 }
765 
766 #ifdef WLAN_FEATURE_11AX
767 /**
768  * wma_set_bss_rate_flags_he() - set rate flags based on BSS capability
769  * @rate_flags: rate_flags pointer
770  * @add_bss: add_bss params
771  *
772  * Return: QDF_STATUS
773  */
774 static QDF_STATUS wma_set_bss_rate_flags_he(enum tx_rate_info *rate_flags,
775 					    struct bss_params *add_bss)
776 {
777 	if (!add_bss->he_capable)
778 		return QDF_STATUS_E_NOSUPPORT;
779 
780 	*rate_flags |= wma_get_he_rate_flags(add_bss->ch_width);
781 
782 	wma_debug("he_capable %d rate_flags 0x%x", add_bss->he_capable,
783 		  *rate_flags);
784 	return QDF_STATUS_SUCCESS;
785 }
786 
787 static bool wma_get_bss_he_capable(struct bss_params *add_bss)
788 {
789 	return add_bss->he_capable;
790 }
791 #else
792 static QDF_STATUS wma_set_bss_rate_flags_he(enum tx_rate_info *rate_flags,
793 					    struct bss_params *add_bss)
794 {
795 	return QDF_STATUS_E_NOSUPPORT;
796 }
797 
798 static bool wma_get_bss_he_capable(struct bss_params *add_bss)
799 {
800 	return false;
801 }
802 #endif
803 
804 enum tx_rate_info wma_get_vht_rate_flags(enum phy_ch_width ch_width)
805 {
806 	enum tx_rate_info rate_flags = 0;
807 
808 	if (ch_width == CH_WIDTH_80P80MHZ)
809 		rate_flags |= TX_RATE_VHT160 | TX_RATE_VHT80 | TX_RATE_VHT40 |
810 				TX_RATE_VHT20;
811 	if (ch_width == CH_WIDTH_160MHZ)
812 		rate_flags |= TX_RATE_VHT160 | TX_RATE_VHT80 | TX_RATE_VHT40 |
813 				TX_RATE_VHT20;
814 	if (ch_width == CH_WIDTH_80MHZ)
815 		rate_flags |= TX_RATE_VHT80 | TX_RATE_VHT40 | TX_RATE_VHT20;
816 	else if (ch_width)
817 		rate_flags |= TX_RATE_VHT40 | TX_RATE_VHT20;
818 	else
819 		rate_flags |= TX_RATE_VHT20;
820 	return rate_flags;
821 }
822 
823 enum tx_rate_info wma_get_ht_rate_flags(enum phy_ch_width ch_width)
824 {
825 	enum tx_rate_info rate_flags = 0;
826 
827 	if (ch_width)
828 		rate_flags |= TX_RATE_HT40 | TX_RATE_HT20;
829 	else
830 		rate_flags |= TX_RATE_HT20;
831 
832 	return rate_flags;
833 }
834 
835 enum tx_rate_info wma_get_he_rate_flags(enum phy_ch_width ch_width)
836 {
837 	enum tx_rate_info rate_flags = 0;
838 
839 	if (ch_width == CH_WIDTH_160MHZ ||
840 	    ch_width == CH_WIDTH_80P80MHZ)
841 		rate_flags |= TX_RATE_HE160 | TX_RATE_HE80 | TX_RATE_HE40 |
842 				TX_RATE_HE20;
843 	else if (ch_width == CH_WIDTH_80MHZ)
844 		rate_flags |= TX_RATE_HE80 | TX_RATE_HE40 | TX_RATE_HE20;
845 	else if (ch_width)
846 		rate_flags |= TX_RATE_HE40 | TX_RATE_HE20;
847 	else
848 		rate_flags |= TX_RATE_HE20;
849 
850 	return rate_flags;
851 }
852 
853 void wma_set_bss_rate_flags(tp_wma_handle wma, uint8_t vdev_id,
854 			    struct bss_params *add_bss)
855 {
856 	struct wma_txrx_node *iface = &wma->interfaces[vdev_id];
857 	struct vdev_mlme_obj *vdev_mlme;
858 	enum tx_rate_info *rate_flags;
859 	QDF_STATUS qdf_status;
860 
861 	vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(iface->vdev);
862 	if (!vdev_mlme) {
863 		wma_err("Failed to get mlme obj for vdev_%d", vdev_id);
864 		return;
865 	}
866 	rate_flags = &vdev_mlme->mgmt.rate_info.rate_flags;
867 	*rate_flags = 0;
868 
869 	qdf_status = wma_set_bss_rate_flags_eht(rate_flags, add_bss);
870 	if (QDF_IS_STATUS_ERROR(qdf_status)) {
871 		if (QDF_STATUS_SUCCESS !=
872 			wma_set_bss_rate_flags_he(rate_flags, add_bss)) {
873 			if (add_bss->vhtCapable)
874 				*rate_flags = wma_get_vht_rate_flags(add_bss->ch_width);
875 			/* avoid to conflict with htCapable flag */
876 			else if (add_bss->htCapable)
877 				*rate_flags |= wma_get_ht_rate_flags(add_bss->ch_width);
878 		}
879 	}
880 
881 	if (add_bss->staContext.fShortGI20Mhz ||
882 	    add_bss->staContext.fShortGI40Mhz)
883 		*rate_flags |= TX_RATE_SGI;
884 
885 	if (!add_bss->htCapable && !add_bss->vhtCapable &&
886 	    !wma_get_bss_he_capable(add_bss) &&
887 	    !wma_get_bss_eht_capable(add_bss))
888 		*rate_flags = TX_RATE_LEGACY;
889 
890 	wma_debug("capable: vht %u, ht %u, rate_flags %x, ch_width %d",
891 		  add_bss->vhtCapable, add_bss->htCapable,
892 		  *rate_flags, add_bss->ch_width);
893 
894 	wma_cp_stats_set_rate_flag(wma, vdev_id);
895 }
896 
897 void wma_set_vht_txbf_cfg(struct mac_context *mac, uint8_t vdev_id)
898 {
899 	wmi_vdev_txbf_en txbf_en = {0};
900 	QDF_STATUS status;
901 	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
902 
903 	if (!wma)
904 		return;
905 
906 	txbf_en.sutxbfee = mac->mlme_cfg->vht_caps.vht_cap_info.su_bformee;
907 	txbf_en.mutxbfee =
908 		mac->mlme_cfg->vht_caps.vht_cap_info.enable_mu_bformee;
909 	txbf_en.sutxbfer = mac->mlme_cfg->vht_caps.vht_cap_info.su_bformer;
910 
911 	status = wma_vdev_set_param(wma->wmi_handle, vdev_id,
912 				    wmi_vdev_param_txbf,
913 				    *((A_UINT8 *)&txbf_en));
914 	if (QDF_IS_STATUS_ERROR(status))
915 		wma_err("failed to set VHT TXBF(status = %d)", status);
916 }
917 
918 /**
919  * wmi_unified_send_txbf() - set txbf parameter to fw
920  * @wma: wma handle
921  * @params: txbf parameters
922  *
923  * Return: 0 for success or error code
924  */
925 int32_t wmi_unified_send_txbf(tp_wma_handle wma, tpAddStaParams params)
926 {
927 	wmi_vdev_txbf_en txbf_en = {0};
928 
929 	/* This is set when Other partner is Bformer
930 	 * and we are capable bformee(enabled both in ini and fw)
931 	 */
932 	txbf_en.sutxbfee = params->vhtTxBFCapable;
933 	txbf_en.mutxbfee = params->vhtTxMUBformeeCapable;
934 	txbf_en.sutxbfer = params->enable_su_tx_bformer;
935 
936 	/* When MU TxBfee is set, SU TxBfee must be set by default */
937 	if (txbf_en.mutxbfee)
938 		txbf_en.sutxbfee = txbf_en.mutxbfee;
939 
940 	wma_debug("txbf_en.sutxbfee %d txbf_en.mutxbfee %d, sutxbfer %d",
941 		 txbf_en.sutxbfee, txbf_en.mutxbfee, txbf_en.sutxbfer);
942 
943 	return wma_vdev_set_param(wma->wmi_handle,
944 						params->smesessionId,
945 						wmi_vdev_param_txbf,
946 						*((A_UINT8 *) &txbf_en));
947 }
948 
949 /**
950  * wma_data_tx_ack_work_handler() - process data tx ack
951  * @ack_work: work structure
952  *
953  * Return: none
954  */
955 static void wma_data_tx_ack_work_handler(void *ack_work)
956 {
957 	struct wma_tx_ack_work_ctx *work;
958 	tp_wma_handle wma_handle;
959 	wma_tx_ota_comp_callback ack_cb;
960 
961 	work = (struct wma_tx_ack_work_ctx *)ack_work;
962 
963 	wma_handle = work->wma_handle;
964 	if (!wma_handle || cds_is_load_or_unload_in_progress()) {
965 		wma_err("Driver load/unload in progress");
966 		goto end;
967 	}
968 	ack_cb = wma_handle->umac_data_ota_ack_cb;
969 
970 	if (work->status)
971 		wma_debug("Data Tx Ack Cb Status %d", work->status);
972 	else
973 		wma_debug("Data Tx Ack Cb Status %d", work->status);
974 
975 	/* Call the Ack Cb registered by UMAC */
976 	if (ack_cb)
977 		ack_cb(wma_handle->mac_context, NULL, work->status, NULL);
978 	else
979 		wma_err("Data Tx Ack Cb is NULL");
980 
981 end:
982 	qdf_mem_free(work);
983 	if (wma_handle) {
984 		wma_handle->umac_data_ota_ack_cb = NULL;
985 		wma_handle->last_umac_data_nbuf = NULL;
986 		wma_handle->ack_work_ctx = NULL;
987 	}
988 }
989 
990 /**
991  * wma_data_tx_ack_comp_hdlr() - handles tx data ack completion
992  * @context: context with which the handler is registered
993  * @netbuf: tx data nbuf
994  * @err: status of tx completion
995  *
996  * This is the cb registered with TxRx for
997  * Ack Complete
998  *
999  * Return: none
1000  */
1001 void
1002 wma_data_tx_ack_comp_hdlr(void *wma_context, qdf_nbuf_t netbuf, int32_t status)
1003 {
1004 	tp_wma_handle wma_handle = (tp_wma_handle) wma_context;
1005 
1006 	if (wma_validate_handle(wma_handle))
1007 		return;
1008 
1009 	/*
1010 	 * if netBuf does not match with pending nbuf then just free the
1011 	 * netbuf and do not call ack cb
1012 	 */
1013 	if (wma_handle->last_umac_data_nbuf != netbuf) {
1014 		if (wma_handle->umac_data_ota_ack_cb) {
1015 			wma_err("nbuf does not match but umac_data_ota_ack_cb is not null");
1016 		} else {
1017 			wma_err("nbuf does not match and umac_data_ota_ack_cb is also null");
1018 		}
1019 		goto free_nbuf;
1020 	}
1021 
1022 	if (wma_handle->umac_data_ota_ack_cb) {
1023 		struct wma_tx_ack_work_ctx *ack_work;
1024 
1025 		ack_work = qdf_mem_malloc(sizeof(struct wma_tx_ack_work_ctx));
1026 		wma_handle->ack_work_ctx = ack_work;
1027 		if (ack_work) {
1028 			ack_work->wma_handle = wma_handle;
1029 			ack_work->sub_type = 0;
1030 			ack_work->status = status;
1031 
1032 			qdf_create_work(0, &ack_work->ack_cmp_work,
1033 					wma_data_tx_ack_work_handler,
1034 					ack_work);
1035 			qdf_sched_work(0, &ack_work->ack_cmp_work);
1036 		}
1037 	}
1038 
1039 free_nbuf:
1040 	/* unmap and freeing the tx buf as txrx is not taking care */
1041 	qdf_nbuf_unmap_single(wma_handle->qdf_dev, netbuf, QDF_DMA_TO_DEVICE);
1042 	qdf_nbuf_free(netbuf);
1043 }
1044 
1045 /**
1046  * wma_check_txrx_chainmask() - check txrx chainmask
1047  * @num_rf_chains: number of rf chains
1048  * @cmd_value: command value
1049  *
1050  * Return: QDF_STATUS_SUCCESS for success or error code
1051  */
1052 QDF_STATUS wma_check_txrx_chainmask(int num_rf_chains, int cmd_value)
1053 {
1054 	if ((cmd_value > WMA_MAX_RF_CHAINS(num_rf_chains)) ||
1055 	    (cmd_value < WMA_MIN_RF_CHAINS)) {
1056 		wma_err("Requested value %d over the range", cmd_value);
1057 		return QDF_STATUS_E_INVAL;
1058 	}
1059 	return QDF_STATUS_SUCCESS;
1060 }
1061 
1062 /**
1063  * wma_set_enable_disable_mcc_adaptive_scheduler() -enable/disable mcc scheduler
1064  * @mcc_adaptive_scheduler: enable/disable
1065  *
1066  * This function enable/disable mcc adaptive scheduler in fw.
1067  *
1068  * Return: QDF_STATUS_SUCCESS for success or error code
1069  */
1070 QDF_STATUS wma_set_enable_disable_mcc_adaptive_scheduler(uint32_t
1071 							 mcc_adaptive_scheduler)
1072 {
1073 	tp_wma_handle wma = NULL;
1074 	uint32_t pdev_id;
1075 
1076 	wma = cds_get_context(QDF_MODULE_ID_WMA);
1077 	if (!wma)
1078 		return QDF_STATUS_E_FAULT;
1079 
1080 	/*
1081 	 * Since there could be up to two instances of OCS in FW (one per MAC),
1082 	 * FW provides the option of enabling and disabling MAS on a per MAC
1083 	 * basis. But, Host does not have enable/disable option for individual
1084 	 * MACs. So, FW agreed for the Host to send down a 'pdev id' of 0.
1085 	 * When 'pdev id' of 0 is used, FW treats this as a SOC level command
1086 	 * and applies the same value to both MACs. Irrespective of the value
1087 	 * of 'WMI_SERVICE_DEPRECATED_REPLACE', the pdev id needs to be '0'
1088 	 * (SOC level) for WMI_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMDID
1089 	 */
1090 	pdev_id = WMI_PDEV_ID_SOC;
1091 
1092 	return wmi_unified_set_enable_disable_mcc_adaptive_scheduler_cmd(
1093 			wma->wmi_handle, mcc_adaptive_scheduler, pdev_id);
1094 }
1095 
1096 /**
1097  * wma_set_mcc_channel_time_latency() -set MCC channel time latency
1098  * @wma: wma handle
1099  * @mcc_channel: mcc channel
1100  * @mcc_channel_time_latency: MCC channel time latency.
1101  *
1102  * Currently used to set time latency for an MCC vdev/adapter using operating
1103  * channel of it and channel number. The info is provided run time using
1104  * iwpriv command: iwpriv <wlan0 | p2p0> setMccLatency <latency in ms>.
1105  *
1106  * Return: QDF status
1107  */
1108 QDF_STATUS wma_set_mcc_channel_time_latency(tp_wma_handle wma,
1109 	uint32_t mcc_channel, uint32_t mcc_channel_time_latency)
1110 {
1111 	bool mcc_adapt_sch = false;
1112 	struct mac_context *mac = NULL;
1113 	uint32_t channel1 = mcc_channel;
1114 	uint32_t chan1_freq = cds_chan_to_freq(channel1);
1115 
1116 	if (!wma) {
1117 		wma_err("NULL wma ptr. Exiting");
1118 		QDF_ASSERT(0);
1119 		return QDF_STATUS_E_FAILURE;
1120 	}
1121 	mac = cds_get_context(QDF_MODULE_ID_PE);
1122 	if (!mac) {
1123 		QDF_ASSERT(0);
1124 		return QDF_STATUS_E_FAILURE;
1125 	}
1126 
1127 	/* First step is to confirm if MCC is active */
1128 	if (!lim_is_in_mcc(mac)) {
1129 		wma_err("MCC is not active. Exiting");
1130 		QDF_ASSERT(0);
1131 		return QDF_STATUS_E_FAILURE;
1132 	}
1133 	/* Confirm MCC adaptive scheduler feature is disabled */
1134 	if (policy_mgr_get_dynamic_mcc_adaptive_sch(mac->psoc,
1135 						    &mcc_adapt_sch) ==
1136 	    QDF_STATUS_SUCCESS) {
1137 		if (mcc_adapt_sch) {
1138 			wma_debug("Can't set channel latency while MCC ADAPTIVE SCHED is enabled. Exit");
1139 			return QDF_STATUS_SUCCESS;
1140 		}
1141 	} else {
1142 		wma_err("Failed to get value for MCC_ADAPTIVE_SCHED, "
1143 			 "Exit w/o setting latency");
1144 		QDF_ASSERT(0);
1145 		return QDF_STATUS_E_FAILURE;
1146 	}
1147 
1148 	return wmi_unified_set_mcc_channel_time_latency_cmd(wma->wmi_handle,
1149 						chan1_freq,
1150 						mcc_channel_time_latency);
1151 }
1152 
1153 /**
1154  * wma_set_mcc_channel_time_quota() -set MCC channel time quota
1155  * @wma: wma handle
1156  * @adapter_1_chan_number: adapter 1 channel number
1157  * @adapter_1_quota: adapter 1 quota
1158  * @adapter_2_chan_number: adapter 2 channel number
1159  *
1160  * Currently used to set time quota for 2 MCC vdevs/adapters using (operating
1161  * channel, quota) for each mode . The info is provided run time using
1162  * iwpriv command: iwpriv <wlan0 | p2p0> setMccQuota <quota in ms>.
1163  * Note: the quota provided in command is for the same mode in cmd. HDD
1164  * checks if MCC mode is active, gets the second mode and its operating chan.
1165  * Quota for the 2nd role is calculated as 100 - quota of first mode.
1166  *
1167  * Return: QDF status
1168  */
1169 QDF_STATUS wma_set_mcc_channel_time_quota(tp_wma_handle wma,
1170 		uint32_t adapter_1_chan_number,	uint32_t adapter_1_quota,
1171 		uint32_t adapter_2_chan_number)
1172 {
1173 	bool mcc_adapt_sch = false;
1174 	struct mac_context *mac = NULL;
1175 	uint32_t chan1_freq = cds_chan_to_freq(adapter_1_chan_number);
1176 	uint32_t chan2_freq = cds_chan_to_freq(adapter_2_chan_number);
1177 
1178 	if (!wma) {
1179 		wma_err("NULL wma ptr. Exiting");
1180 		QDF_ASSERT(0);
1181 		return QDF_STATUS_E_FAILURE;
1182 	}
1183 	mac = cds_get_context(QDF_MODULE_ID_PE);
1184 	if (!mac) {
1185 		QDF_ASSERT(0);
1186 		return QDF_STATUS_E_FAILURE;
1187 	}
1188 
1189 	/* First step is to confirm if MCC is active */
1190 	if (!lim_is_in_mcc(mac)) {
1191 		wma_debug("MCC is not active. Exiting");
1192 		QDF_ASSERT(0);
1193 		return QDF_STATUS_E_FAILURE;
1194 	}
1195 
1196 	/* Confirm MCC adaptive scheduler feature is disabled */
1197 	if (policy_mgr_get_dynamic_mcc_adaptive_sch(mac->psoc,
1198 						    &mcc_adapt_sch) ==
1199 	    QDF_STATUS_SUCCESS) {
1200 		if (mcc_adapt_sch) {
1201 			wma_debug("Can't set channel quota while MCC_ADAPTIVE_SCHED is enabled. Exit");
1202 			return QDF_STATUS_SUCCESS;
1203 		}
1204 	} else {
1205 		wma_err("Failed to retrieve WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED. Exit");
1206 		QDF_ASSERT(0);
1207 		return QDF_STATUS_E_FAILURE;
1208 	}
1209 
1210 	return wmi_unified_set_mcc_channel_time_quota_cmd(wma->wmi_handle,
1211 						chan1_freq,
1212 						adapter_1_quota,
1213 						chan2_freq);
1214 }
1215 
1216 #define MAX_VDEV_PROCESS_RATE_PARAMS 2
1217 /* params being sent:
1218  * wmi_vdev_param_sgi
1219  * wmi_vdev_param_mcast_data_rate
1220  */
1221 QDF_STATUS wma_process_rate_update_indicate(tp_wma_handle wma,
1222 					    tSirRateUpdateInd *
1223 					    pRateUpdateParams)
1224 {
1225 	int32_t ret = 0;
1226 	uint8_t vdev_id = 0;
1227 	int32_t mbpsx10_rate = -1;
1228 	uint32_t paramid;
1229 	uint8_t rate = 0;
1230 	uint32_t short_gi, rate_flag;
1231 	struct wma_txrx_node *intr = wma->interfaces;
1232 	QDF_STATUS status;
1233 	struct dev_set_param setparam[MAX_VDEV_PROCESS_RATE_PARAMS] = {};
1234 	uint8_t index = 0;
1235 
1236 	/* Get the vdev id */
1237 	if (wma_find_vdev_id_by_addr(wma, pRateUpdateParams->bssid.bytes,
1238 				     &vdev_id)) {
1239 		wma_err("vdev handle is invalid for "QDF_MAC_ADDR_FMT,
1240 			 QDF_MAC_ADDR_REF(pRateUpdateParams->bssid.bytes));
1241 		qdf_mem_free(pRateUpdateParams);
1242 		return QDF_STATUS_E_INVAL;
1243 	}
1244 	short_gi = intr[vdev_id].config.shortgi;
1245 
1246 	status = wma_get_vdev_rate_flag(intr[vdev_id].vdev, &rate_flag);
1247 	if (QDF_IS_STATUS_ERROR(status)) {
1248 		wma_err("Failed to get rate_flag for VDEV_%d", vdev_id);
1249 		qdf_mem_free(pRateUpdateParams);
1250 		return QDF_STATUS_E_INVAL;
1251 	}
1252 
1253 	if (short_gi == 0)
1254 		short_gi = (rate_flag & TX_RATE_SGI) ? true : false;
1255 	/* first check if reliable TX mcast rate is used. If not check the bcast
1256 	 * Then is mcast. Mcast rate is saved in mcastDataRate24GHz
1257 	 */
1258 	if (pRateUpdateParams->reliableMcastDataRateTxFlag > 0) {
1259 		mbpsx10_rate = pRateUpdateParams->reliableMcastDataRate;
1260 		paramid = wmi_vdev_param_mcast_data_rate;
1261 		if (pRateUpdateParams->
1262 		    reliableMcastDataRateTxFlag & TX_RATE_SGI)
1263 			short_gi = 1;   /* upper layer specified short GI */
1264 	} else if (pRateUpdateParams->bcastDataRate > -1) {
1265 		mbpsx10_rate = pRateUpdateParams->bcastDataRate;
1266 		paramid = wmi_vdev_param_bcast_data_rate;
1267 	} else {
1268 		mbpsx10_rate = pRateUpdateParams->mcastDataRate24GHz;
1269 		paramid = wmi_vdev_param_mcast_data_rate;
1270 		if (pRateUpdateParams->
1271 		    mcastDataRate24GHzTxFlag & TX_RATE_SGI)
1272 			short_gi = 1;   /* upper layer specified short GI */
1273 	}
1274 	wma_debug("dev_id = %d, dev_type = %d, dev_mode = %d,",
1275 		 vdev_id, intr[vdev_id].type,
1276 		 pRateUpdateParams->dev_mode);
1277 	wma_debug("mac = "QDF_MAC_ADDR_FMT", config.shortgi = %d, rate_flags = 0x%x",
1278 		 QDF_MAC_ADDR_REF(pRateUpdateParams->bssid.bytes),
1279 		 intr[vdev_id].config.shortgi, rate_flag);
1280 	ret = wma_encode_mc_rate(short_gi, intr[vdev_id].config.chwidth,
1281 				 intr[vdev_id].ch_freq, mbpsx10_rate,
1282 				 pRateUpdateParams->nss, &rate);
1283 	if (ret != QDF_STATUS_SUCCESS) {
1284 		wma_err("Error, Invalid input rate value");
1285 		qdf_mem_free(pRateUpdateParams);
1286 		return ret;
1287 	}
1288 
1289 	ret = mlme_check_index_setparam(setparam, wmi_vdev_param_sgi, short_gi,
1290 					index++, MAX_VDEV_PROCESS_RATE_PARAMS);
1291 	if (QDF_IS_STATUS_ERROR(status)) {
1292 		wma_debug("failed at wmi_vdev_param_sgi");
1293 		goto error;
1294 	}
1295 
1296 	ret = mlme_check_index_setparam(setparam, paramid, rate, index++,
1297 					MAX_VDEV_PROCESS_RATE_PARAMS);
1298 	if (QDF_IS_STATUS_ERROR(status)) {
1299 		wma_debug("failed at paramid:%d", paramid);
1300 		goto error;
1301 	}
1302 
1303 	ret = wma_send_multi_pdev_vdev_set_params(MLME_VDEV_SETPARAM,
1304 						  vdev_id, setparam, index);
1305 	if (QDF_IS_STATUS_ERROR(ret))
1306 		wma_debug("failed to send vdev set params");
1307 error:
1308 	qdf_mem_free(pRateUpdateParams);
1309 	return ret;
1310 }
1311 
1312 /**
1313  * wma_mgmt_tx_ack_comp_hdlr() - handles tx ack mgmt completion
1314  * @context: context with which the handler is registered
1315  * @netbuf: tx mgmt nbuf
1316  * @status: status of tx completion
1317  *
1318  * This is callback registered with TxRx for
1319  * Ack Complete.
1320  *
1321  * Return: none
1322  */
1323 static void
1324 wma_mgmt_tx_ack_comp_hdlr(void *wma_context, qdf_nbuf_t netbuf, int32_t status)
1325 {
1326 	tp_wma_handle wma_handle = (tp_wma_handle) wma_context;
1327 	struct wlan_objmgr_pdev *pdev = (struct wlan_objmgr_pdev *)
1328 					wma_handle->pdev;
1329 	struct wmi_mgmt_params mgmt_params = {};
1330 	uint16_t desc_id;
1331 	uint8_t vdev_id;
1332 
1333 	desc_id = QDF_NBUF_CB_MGMT_TXRX_DESC_ID(netbuf);
1334 	vdev_id = mgmt_txrx_get_vdev_id(pdev, desc_id);
1335 
1336 	mgmt_params.vdev_id = vdev_id;
1337 	mgmt_txrx_tx_completion_handler(pdev, desc_id, status, &mgmt_params);
1338 }
1339 
1340 /**
1341  * wma_mgmt_tx_dload_comp_hldr() - handles tx mgmt completion
1342  * @context: context with which the handler is registered
1343  * @netbuf: tx mgmt nbuf
1344  * @status: status of tx completion
1345  *
1346  * This function calls registered download callback while sending mgmt packet.
1347  *
1348  * Return: none
1349  */
1350 static void
1351 wma_mgmt_tx_dload_comp_hldr(void *wma_context, qdf_nbuf_t netbuf,
1352 			    int32_t status)
1353 {
1354 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
1355 
1356 	tp_wma_handle wma_handle = (tp_wma_handle) wma_context;
1357 	void *mac_context = wma_handle->mac_context;
1358 
1359 	wma_debug("Tx Complete Status %d", status);
1360 
1361 	if (!wma_handle->tx_frm_download_comp_cb) {
1362 		wma_err("Tx Complete Cb not registered by umac");
1363 		return;
1364 	}
1365 
1366 	/* Call Tx Mgmt Complete Callback registered by umac */
1367 	wma_handle->tx_frm_download_comp_cb(mac_context, netbuf, 0);
1368 
1369 	/* Reset Callback */
1370 	wma_handle->tx_frm_download_comp_cb = NULL;
1371 
1372 	/* Set the Tx Mgmt Complete Event */
1373 	qdf_status = qdf_event_set(&wma_handle->tx_frm_download_comp_event);
1374 	if (!QDF_IS_STATUS_SUCCESS(qdf_status))
1375 		wma_alert("Event Set failed - tx_frm_comp_event");
1376 }
1377 
1378 /**
1379  * wma_tx_attach() - attach tx related callbacks
1380  * @pwmaCtx: wma context
1381  *
1382  * attaches tx fn with underlying layer.
1383  *
1384  * Return: QDF status
1385  */
1386 QDF_STATUS wma_tx_attach(tp_wma_handle wma_handle)
1387 {
1388 	/* Get the Vos Context */
1389 	struct cds_context *cds_handle =
1390 		(struct cds_context *) (wma_handle->cds_context);
1391 
1392 	/* Get the txRx Pdev ID */
1393 	uint8_t pdev_id = WMI_PDEV_ID_SOC;
1394 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1395 
1396 	/* Register for Tx Management Frames */
1397 	cdp_mgmt_tx_cb_set(soc, pdev_id, 0,
1398 			   wma_mgmt_tx_dload_comp_hldr,
1399 			   wma_mgmt_tx_ack_comp_hdlr, wma_handle);
1400 
1401 	/* Register callback to send PEER_UNMAP_RESPONSE cmd*/
1402 	if (cdp_cfg_get_peer_unmap_conf_support(soc))
1403 		cdp_peer_unmap_sync_cb_set(soc, pdev_id,
1404 					   wma_peer_unmap_conf_cb);
1405 
1406 	/* Store the Mac Context */
1407 	wma_handle->mac_context = cds_handle->mac_context;
1408 
1409 	return QDF_STATUS_SUCCESS;
1410 }
1411 
1412 /**
1413  * wma_tx_detach() - detach tx related callbacks
1414  * @tp_wma_handle: wma context
1415  *
1416  * Deregister with TxRx for Tx Mgmt Download and Ack completion.
1417  *
1418  * Return: QDF status
1419  */
1420 QDF_STATUS wma_tx_detach(tp_wma_handle wma_handle)
1421 {
1422 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1423 
1424 	/* Get the txRx Pdev ID */
1425 	uint8_t pdev_id = WMI_PDEV_ID_SOC;
1426 
1427 	if (!soc)
1428 		return QDF_STATUS_E_FAILURE;
1429 
1430 	if (pdev_id != OL_TXRX_INVALID_PDEV_ID) {
1431 		/* Deregister with TxRx for Tx Mgmt completion call back */
1432 		cdp_mgmt_tx_cb_set(soc, pdev_id, 0, NULL, NULL, NULL);
1433 	}
1434 
1435 	/* Reset Tx Frm Callbacks */
1436 	wma_handle->tx_frm_download_comp_cb = NULL;
1437 
1438 	/* Reset Tx Data Frame Ack Cb */
1439 	wma_handle->umac_data_ota_ack_cb = NULL;
1440 
1441 	/* Reset last Tx Data Frame nbuf ptr */
1442 	wma_handle->last_umac_data_nbuf = NULL;
1443 
1444 	return QDF_STATUS_SUCCESS;
1445 }
1446 
1447 #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \
1448 	defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(CONFIG_HL_SUPPORT)
1449 static void wma_process_vdev_tx_pause_evt(void *soc,
1450 					  tp_wma_handle wma,
1451 					  wmi_tx_pause_event_fixed_param *event,
1452 					  uint8_t vdev_id)
1453 {
1454 	/* PAUSE action, add bitmap */
1455 	if (event->action == ACTION_PAUSE) {
1456 		/* Exclude TDLS_OFFCHAN_CHOP from vdev based pauses */
1457 		if (event->pause_type == PAUSE_TYPE_CHOP_TDLS_OFFCHAN) {
1458 			cdp_fc_vdev_pause(soc, vdev_id,
1459 					  OL_TXQ_PAUSE_REASON_FW,
1460 					  event->pause_type);
1461 		} else {
1462 			/*
1463 			 * Now only support per-dev pause so it is not
1464 			 * necessary to pause a paused queue again.
1465 			 */
1466 			if (!wma_vdev_get_pause_bitmap(vdev_id))
1467 				cdp_fc_vdev_pause(soc, vdev_id,
1468 						  OL_TXQ_PAUSE_REASON_FW,
1469 						  event->pause_type);
1470 
1471 			wma_vdev_set_pause_bit(vdev_id,
1472 					       event->pause_type);
1473 		}
1474 	}
1475 	/* UNPAUSE action, clean bitmap */
1476 	else if (event->action == ACTION_UNPAUSE) {
1477 		/* Exclude TDLS_OFFCHAN_CHOP from vdev based pauses */
1478 		if (event->pause_type == PAUSE_TYPE_CHOP_TDLS_OFFCHAN) {
1479 			cdp_fc_vdev_unpause(soc, vdev_id,
1480 					    OL_TXQ_PAUSE_REASON_FW,
1481 					    event->pause_type);
1482 		} else {
1483 		/* Handle unpause only if already paused */
1484 			if (wma_vdev_get_pause_bitmap(vdev_id)) {
1485 				wma_vdev_clear_pause_bit(vdev_id,
1486 							 event->pause_type);
1487 
1488 				if (wma->interfaces[vdev_id].pause_bitmap)
1489 					return;
1490 
1491 				/* PAUSE BIT MAP is cleared
1492 				 * UNPAUSE VDEV
1493 				 */
1494 				cdp_fc_vdev_unpause(soc, vdev_id,
1495 						    OL_TXQ_PAUSE_REASON_FW,
1496 						    event->pause_type);
1497 			}
1498 		}
1499 	} else {
1500 		wma_err("Not Valid Action Type %d", event->action);
1501 	}
1502 }
1503 
1504 int wma_mcc_vdev_tx_pause_evt_handler(void *handle, uint8_t *event,
1505 				      uint32_t len)
1506 {
1507 	tp_wma_handle wma = (tp_wma_handle) handle;
1508 	WMI_TX_PAUSE_EVENTID_param_tlvs *param_buf;
1509 	wmi_tx_pause_event_fixed_param *wmi_event;
1510 	uint8_t vdev_id;
1511 	A_UINT32 vdev_map;
1512 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1513 
1514 	param_buf = (WMI_TX_PAUSE_EVENTID_param_tlvs *) event;
1515 	if (!param_buf) {
1516 		wma_err("Invalid roam event buffer");
1517 		return -EINVAL;
1518 	}
1519 
1520 	if (ucfg_pmo_get_wow_bus_suspend(wma->psoc)) {
1521 		wma_debug("Suspend is in progress: Pause/Unpause Tx is NoOp");
1522 		return 0;
1523 	}
1524 
1525 	if (!soc)
1526 		return -EINVAL;
1527 
1528 	wmi_event = param_buf->fixed_param;
1529 	vdev_map = wmi_event->vdev_map;
1530 	/* FW mapped vdev from ID
1531 	 * vdev_map = (1 << vdev_id)
1532 	 * So, host should unmap to ID
1533 	 */
1534 	for (vdev_id = 0; vdev_map != 0 && vdev_id < wma->max_bssid;
1535 	     vdev_id++) {
1536 		if (!(vdev_map & 0x1)) {
1537 			/* No Vdev */
1538 		} else {
1539 			if (!wma->interfaces[vdev_id].vdev) {
1540 				wma_err("vdev is NULL for %d", vdev_id);
1541 				/* Test Next VDEV */
1542 				vdev_map >>= 1;
1543 				continue;
1544 			}
1545 
1546 			wma_process_vdev_tx_pause_evt(soc, wma,
1547 						      wmi_event,
1548 						      vdev_id);
1549 
1550 			wma_debug
1551 				("vdev_id %d, pause_map 0x%x, pause type %d, action %d",
1552 				vdev_id, wma_vdev_get_pause_bitmap(vdev_id),
1553 				wmi_event->pause_type, wmi_event->action);
1554 		}
1555 		/* Test Next VDEV */
1556 		vdev_map >>= 1;
1557 	}
1558 
1559 	return 0;
1560 }
1561 
1562 #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
1563 
1564 #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
1565 
1566 /**
1567  * wma_set_peer_rate_report_condition -
1568  *                    this function set peer rate report
1569  *                    condition info to firmware.
1570  * @handle:	Handle of WMA
1571  * @config:	Bad peer configuration from SIR module
1572  *
1573  * It is a wrapper function to sent WMI_PEER_SET_RATE_REPORT_CONDITION_CMDID
1574  * to the firmware\target. If the command sent to firmware failed, free the
1575  * buffer that allocated.
1576  *
1577  * Return: QDF_STATUS based on values sent to firmware
1578  */
1579 static
1580 QDF_STATUS wma_set_peer_rate_report_condition(WMA_HANDLE handle,
1581 			struct t_bad_peer_txtcl_config *config)
1582 {
1583 	tp_wma_handle wma_handle = (tp_wma_handle)handle;
1584 	struct wmi_peer_rate_report_params rate_report_params = {0};
1585 	u_int32_t i, j;
1586 
1587 	rate_report_params.rate_report_enable = config->enable;
1588 	rate_report_params.backoff_time = config->tgt_backoff;
1589 	rate_report_params.timer_period = config->tgt_report_prd;
1590 	for (i = 0; i < WMI_PEER_RATE_REPORT_COND_MAX_NUM; i++) {
1591 		rate_report_params.report_per_phy[i].cond_flags =
1592 			config->threshold[i].cond;
1593 		rate_report_params.report_per_phy[i].delta.delta_min  =
1594 			config->threshold[i].delta;
1595 		rate_report_params.report_per_phy[i].delta.percent =
1596 			config->threshold[i].percentage;
1597 		for (j = 0; j < WMI_MAX_NUM_OF_RATE_THRESH; j++) {
1598 			rate_report_params.report_per_phy[i].
1599 				report_rate_threshold[j] =
1600 					config->threshold[i].thresh[j];
1601 		}
1602 	}
1603 
1604 	return wmi_unified_peer_rate_report_cmd(wma_handle->wmi_handle,
1605 						&rate_report_params);
1606 }
1607 
1608 /**
1609  * wma_process_init_bad_peer_tx_ctl_info -
1610  *                this function to initialize peer rate report config info.
1611  * @handle:	Handle of WMA
1612  * @config:	Bad peer configuration from SIR module
1613  *
1614  * This function initializes the bad peer tx control data structure in WMA,
1615  * sends down the initial configuration to the firmware and configures
1616  * the peer status update setting in the tx_rx module.
1617  *
1618  * Return: QDF_STATUS based on procedure status
1619  */
1620 
1621 QDF_STATUS wma_process_init_bad_peer_tx_ctl_info(tp_wma_handle wma,
1622 					struct t_bad_peer_txtcl_config *config)
1623 {
1624 	/* Parameter sanity check */
1625 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1626 
1627 	if (!wma || !config) {
1628 		wma_err("Invalid input");
1629 		return QDF_STATUS_E_FAILURE;
1630 	}
1631 
1632 	wma_debug("enable %d period %d txq limit %d\n",
1633 		 config->enable,
1634 		 config->period,
1635 		 config->txq_limit);
1636 
1637 	/* Only need to initialize the setting
1638 	 * when the feature is enabled
1639 	 */
1640 	if (config->enable) {
1641 		int i = 0;
1642 
1643 		cdp_bad_peer_txctl_set_setting(soc,
1644 					WMI_PDEV_ID_SOC,
1645 					config->enable,
1646 					config->period,
1647 					config->txq_limit);
1648 
1649 		for (i = 0; i < WLAN_WMA_IEEE80211_MAX_LEVEL; i++) {
1650 			u_int32_t threshold, limit;
1651 
1652 			threshold = config->threshold[i].thresh[0];
1653 			limit =	config->threshold[i].txlimit;
1654 			cdp_bad_peer_txctl_update_threshold(soc,
1655 						WMI_PDEV_ID_SOC,
1656 						i,
1657 						threshold,
1658 						limit);
1659 		}
1660 	}
1661 
1662 	return wma_set_peer_rate_report_condition(wma, config);
1663 }
1664 #endif /* defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL) */
1665 
1666 #ifdef FW_THERMAL_THROTTLE_SUPPORT
1667 /**
1668  * wma_update_thermal_mitigation_to_fw - update thermal mitigation to fw
1669  * @wma: wma handle
1670  * @thermal_level: thermal level
1671  *
1672  * This function sends down thermal mitigation params to the fw
1673  *
1674  * Returns: QDF_STATUS_SUCCESS for success otherwise failure
1675  */
1676 static QDF_STATUS wma_update_thermal_mitigation_to_fw(tp_wma_handle wma,
1677 						      u_int8_t thermal_level)
1678 {
1679 	struct thermal_mitigation_params therm_data = {0};
1680 
1681 	/* Check if vdev is in mcc, if in mcc set dc value as 10, else 100 */
1682 	therm_data.dc = 100;
1683 	therm_data.enable = 1;
1684 	therm_data.levelconf[0].dcoffpercent =
1685 		wma->thermal_mgmt_info.throttle_duty_cycle_tbl[thermal_level];
1686 	therm_data.levelconf[0].priority = 0;
1687 	therm_data.num_thermal_conf = 1;
1688 
1689 	return wmi_unified_thermal_mitigation_param_cmd_send(wma->wmi_handle,
1690 							     &therm_data);
1691 }
1692 #else /* FW_THERMAL_THROTTLE_SUPPORT */
1693 /**
1694  * wma_update_thermal_mitigation_to_fw - update thermal mitigation to fw
1695  * @wma: wma handle
1696  * @thermal_level: thermal level
1697  *
1698  * This function sends down thermal mitigation params to the fw
1699  *
1700  * Returns: QDF_STATUS_SUCCESS for success otherwise failure
1701  */
1702 static QDF_STATUS wma_update_thermal_mitigation_to_fw(tp_wma_handle wma,
1703 						      u_int8_t thermal_level)
1704 {
1705 	return QDF_STATUS_SUCCESS;
1706 }
1707 #endif
1708 
1709 /**
1710  * wma_update_thermal_cfg_to_fw() - update thermal configuration to FW
1711  * @wma: Pointer to WMA handle
1712  *
1713  * This function update thermal configuration to FW
1714  *
1715  * Returns: QDF_STATUS_SUCCESS for success otherwise failure
1716  */
1717 static QDF_STATUS wma_update_thermal_cfg_to_fw(tp_wma_handle wma)
1718 {
1719 	t_thermal_cmd_params thermal_params = {0};
1720 
1721 	/* Get the temperature thresholds to set in firmware */
1722 	thermal_params.minTemp =
1723 		wma->thermal_mgmt_info.thermalLevels[WLAN_WMA_THERMAL_LEVEL_0].
1724 		minTempThreshold;
1725 	thermal_params.maxTemp =
1726 		wma->thermal_mgmt_info.thermalLevels[WLAN_WMA_THERMAL_LEVEL_0].
1727 		maxTempThreshold;
1728 	thermal_params.thermalEnable =
1729 		wma->thermal_mgmt_info.thermalMgmtEnabled;
1730 	thermal_params.thermal_action = wma->thermal_mgmt_info.thermal_action;
1731 
1732 	wma_debug("TM sending to fw: min_temp %d max_temp %d enable %d act %d",
1733 		  thermal_params.minTemp, thermal_params.maxTemp,
1734 		  thermal_params.thermalEnable, thermal_params.thermal_action);
1735 
1736 	return wma_set_thermal_mgmt(wma, thermal_params);
1737 }
1738 
1739 /**
1740  * wma_process_init_thermal_info() - initialize thermal info
1741  * @wma: Pointer to WMA handle
1742  * @pThermalParams: Pointer to thermal mitigation parameters
1743  *
1744  * This function initializes the thermal management table in WMA,
1745  * sends down the initial temperature thresholds to the firmware
1746  * and configures the throttle period in the tx rx module
1747  *
1748  * Returns: QDF_STATUS_SUCCESS for success otherwise failure
1749  */
1750 QDF_STATUS wma_process_init_thermal_info(tp_wma_handle wma,
1751 					 t_thermal_mgmt *pThermalParams)
1752 {
1753 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
1754 #ifdef FW_THERMAL_THROTTLE_SUPPORT
1755 	int i = 0;
1756 #endif /* FW_THERMAL_THROTTLE_SUPPORT */
1757 
1758 	if (!wma || !pThermalParams) {
1759 		wma_err("TM Invalid input");
1760 		return QDF_STATUS_E_FAILURE;
1761 	}
1762 
1763 	wma_debug("TM enable %d period %d action %d",
1764 		  pThermalParams->thermalMgmtEnabled,
1765 		  pThermalParams->throttlePeriod,
1766 		  pThermalParams->thermal_action);
1767 
1768 	wma_nofl_debug("Throttle Duty Cycle Level in percentage:\n"
1769 		 "0 %d\n"
1770 		 "1 %d\n"
1771 		 "2 %d\n"
1772 		 "3 %d\n"
1773 		 "4 %d\n"
1774 		 "5 %d",
1775 		 pThermalParams->throttle_duty_cycle_tbl[0],
1776 		 pThermalParams->throttle_duty_cycle_tbl[1],
1777 		 pThermalParams->throttle_duty_cycle_tbl[2],
1778 		 pThermalParams->throttle_duty_cycle_tbl[3],
1779 		 pThermalParams->throttle_duty_cycle_tbl[4],
1780 		 pThermalParams->throttle_duty_cycle_tbl[5]);
1781 
1782 	wma->thermal_mgmt_info.thermalMgmtEnabled =
1783 		pThermalParams->thermalMgmtEnabled;
1784 	wma->thermal_mgmt_info.thermalLevels[0].minTempThreshold =
1785 		pThermalParams->thermalLevels[0].minTempThreshold;
1786 	wma->thermal_mgmt_info.thermalLevels[0].maxTempThreshold =
1787 		pThermalParams->thermalLevels[0].maxTempThreshold;
1788 	wma->thermal_mgmt_info.thermalLevels[1].minTempThreshold =
1789 		pThermalParams->thermalLevels[1].minTempThreshold;
1790 	wma->thermal_mgmt_info.thermalLevels[1].maxTempThreshold =
1791 		pThermalParams->thermalLevels[1].maxTempThreshold;
1792 	wma->thermal_mgmt_info.thermalLevels[2].minTempThreshold =
1793 		pThermalParams->thermalLevels[2].minTempThreshold;
1794 	wma->thermal_mgmt_info.thermalLevels[2].maxTempThreshold =
1795 		pThermalParams->thermalLevels[2].maxTempThreshold;
1796 	wma->thermal_mgmt_info.thermalLevels[3].minTempThreshold =
1797 		pThermalParams->thermalLevels[3].minTempThreshold;
1798 	wma->thermal_mgmt_info.thermalLevels[3].maxTempThreshold =
1799 		pThermalParams->thermalLevels[3].maxTempThreshold;
1800 	wma->thermal_mgmt_info.thermalLevels[4].minTempThreshold =
1801 		pThermalParams->thermalLevels[4].minTempThreshold;
1802 	wma->thermal_mgmt_info.thermalLevels[4].maxTempThreshold =
1803 		pThermalParams->thermalLevels[4].maxTempThreshold;
1804 	wma->thermal_mgmt_info.thermalLevels[5].minTempThreshold =
1805 		pThermalParams->thermalLevels[5].minTempThreshold;
1806 	wma->thermal_mgmt_info.thermalLevels[5].maxTempThreshold =
1807 		pThermalParams->thermalLevels[5].maxTempThreshold;
1808 	wma->thermal_mgmt_info.thermalCurrLevel = WLAN_WMA_THERMAL_LEVEL_0;
1809 	wma->thermal_mgmt_info.thermal_action = pThermalParams->thermal_action;
1810 	wma_nofl_debug("TM level min max:\n"
1811 		 "0 %d   %d\n"
1812 		 "1 %d   %d\n"
1813 		 "2 %d   %d\n"
1814 		 "3 %d   %d\n"
1815 		 "4 %d   %d\n"
1816 		 "5 %d   %d",
1817 		 wma->thermal_mgmt_info.thermalLevels[0].minTempThreshold,
1818 		 wma->thermal_mgmt_info.thermalLevels[0].maxTempThreshold,
1819 		 wma->thermal_mgmt_info.thermalLevels[1].minTempThreshold,
1820 		 wma->thermal_mgmt_info.thermalLevels[1].maxTempThreshold,
1821 		 wma->thermal_mgmt_info.thermalLevels[2].minTempThreshold,
1822 		 wma->thermal_mgmt_info.thermalLevels[2].maxTempThreshold,
1823 		 wma->thermal_mgmt_info.thermalLevels[3].minTempThreshold,
1824 		 wma->thermal_mgmt_info.thermalLevels[3].maxTempThreshold,
1825 		 wma->thermal_mgmt_info.thermalLevels[4].minTempThreshold,
1826 		 wma->thermal_mgmt_info.thermalLevels[4].maxTempThreshold,
1827 		 wma->thermal_mgmt_info.thermalLevels[5].minTempThreshold,
1828 		 wma->thermal_mgmt_info.thermalLevels[5].maxTempThreshold);
1829 
1830 #ifdef FW_THERMAL_THROTTLE_SUPPORT
1831 	for (i = 0; i < THROTTLE_LEVEL_MAX; i++)
1832 		wma->thermal_mgmt_info.throttle_duty_cycle_tbl[i] =
1833 				pThermalParams->throttle_duty_cycle_tbl[i];
1834 #endif /* FW_THERMAL_THROTTLE_SUPPORT */
1835 
1836 	if (wma->thermal_mgmt_info.thermalMgmtEnabled) {
1837 		if (!wma->fw_therm_throt_support) {
1838 			cdp_throttle_init_period(
1839 				cds_get_context(QDF_MODULE_ID_SOC),
1840 				WMI_PDEV_ID_SOC, pThermalParams->throttlePeriod,
1841 				&pThermalParams->throttle_duty_cycle_tbl[0]);
1842 		} else {
1843 			qdf_status = wma_update_thermal_mitigation_to_fw(
1844 					wma, WLAN_WMA_THERMAL_LEVEL_0);
1845 			if (QDF_STATUS_SUCCESS != qdf_status)
1846 				return qdf_status;
1847 		}
1848 		qdf_status = wma_update_thermal_cfg_to_fw(wma);
1849 	}
1850 	return qdf_status;
1851 }
1852 
1853 /**
1854  * wma_set_thermal_level_ind() - send SME set thermal level indication message
1855  * @level:  thermal level
1856  *
1857  * Send SME SET_THERMAL_LEVEL_IND message
1858  *
1859  * Returns: none
1860  */
1861 static void wma_set_thermal_level_ind(u_int8_t level)
1862 {
1863 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
1864 	struct scheduler_msg sme_msg = {0};
1865 
1866 	wma_info("Thermal level: %d", level);
1867 
1868 	sme_msg.type = eWNI_SME_SET_THERMAL_LEVEL_IND;
1869 	sme_msg.bodyptr = NULL;
1870 	sme_msg.bodyval = level;
1871 
1872 	qdf_status = scheduler_post_message(QDF_MODULE_ID_WMA,
1873 					    QDF_MODULE_ID_SME,
1874 					    QDF_MODULE_ID_SME, &sme_msg);
1875 	if (!QDF_IS_STATUS_SUCCESS(qdf_status))
1876 		wma_err("Fail to post set thermal level ind msg");
1877 }
1878 
1879 /**
1880  * wma_process_set_thermal_level() - sets thermal level
1881  * @wma: Pointer to WMA handle
1882  * @thermal_level : Thermal level
1883  *
1884  * This function sets the new thermal throttle level in the
1885  * txrx module and sends down the corresponding temperature
1886  * thresholds to the firmware
1887  *
1888  * Returns: QDF_STATUS_SUCCESS for success otherwise failure
1889  */
1890 QDF_STATUS wma_process_set_thermal_level(tp_wma_handle wma,
1891 					 uint8_t thermal_level)
1892 {
1893 	if (!wma) {
1894 		wma_err("TM Invalid input");
1895 		return QDF_STATUS_E_FAILURE;
1896 	}
1897 
1898 	wma_debug("TM set level %d", thermal_level);
1899 
1900 	/* Check if thermal mitigation is enabled */
1901 	if (!wma->thermal_mgmt_info.thermalMgmtEnabled) {
1902 		wma_err("Thermal mgmt is not enabled, ignoring set level command");
1903 		return QDF_STATUS_E_FAILURE;
1904 	}
1905 
1906 	if (thermal_level >= WLAN_WMA_MAX_THERMAL_LEVELS) {
1907 		wma_err("Invalid thermal level set %d", thermal_level);
1908 		return QDF_STATUS_E_FAILURE;
1909 	}
1910 
1911 	if (thermal_level == wma->thermal_mgmt_info.thermalCurrLevel) {
1912 		wma_debug("Current level %d is same as the set level, ignoring",
1913 			 wma->thermal_mgmt_info.thermalCurrLevel);
1914 		return QDF_STATUS_SUCCESS;
1915 	}
1916 
1917 	wma->thermal_mgmt_info.thermalCurrLevel = thermal_level;
1918 
1919 	cdp_throttle_set_level(cds_get_context(QDF_MODULE_ID_SOC),
1920 			       WMI_PDEV_ID_SOC, thermal_level);
1921 
1922 	/* Send SME SET_THERMAL_LEVEL_IND message */
1923 	wma_set_thermal_level_ind(thermal_level);
1924 
1925 	return QDF_STATUS_SUCCESS;
1926 }
1927 
1928 
1929 /**
1930  * wma_set_thermal_mgmt() - set thermal mgmt command to fw
1931  * @wma_handle: Pointer to WMA handle
1932  * @thermal_info: Thermal command information
1933  *
1934  * This function sends the thermal management command
1935  * to the firmware
1936  *
1937  * Return: QDF_STATUS_SUCCESS for success otherwise failure
1938  */
1939 QDF_STATUS wma_set_thermal_mgmt(tp_wma_handle wma_handle,
1940 				t_thermal_cmd_params thermal_info)
1941 {
1942 	struct thermal_cmd_params mgmt_thermal_info = {0};
1943 
1944 	if (!wma_handle) {
1945 		wma_err("Invalid input");
1946 		QDF_ASSERT(0);
1947 		return QDF_STATUS_E_FAILURE;
1948 	}
1949 
1950 	mgmt_thermal_info.min_temp = thermal_info.minTemp;
1951 	mgmt_thermal_info.max_temp = thermal_info.maxTemp;
1952 	mgmt_thermal_info.thermal_enable = thermal_info.thermalEnable;
1953 	mgmt_thermal_info.thermal_action = thermal_info.thermal_action;
1954 
1955 	return wmi_unified_set_thermal_mgmt_cmd(wma_handle->wmi_handle,
1956 						&mgmt_thermal_info);
1957 }
1958 
1959 /**
1960  * wma_thermal_mgmt_get_level() - returns throttle level
1961  * @handle: Pointer to WMA handle
1962  * @temp: temperature
1963  *
1964  * This function returns the thermal(throttle) level
1965  * given the temperature
1966  *
1967  * Return: thermal (throttle) level
1968  */
1969 static uint8_t wma_thermal_mgmt_get_level(void *handle, uint32_t temp)
1970 {
1971 	tp_wma_handle wma = (tp_wma_handle) handle;
1972 	int i;
1973 	uint8_t level;
1974 
1975 	level = i = wma->thermal_mgmt_info.thermalCurrLevel;
1976 	while (temp < wma->thermal_mgmt_info.thermalLevels[i].minTempThreshold
1977 	       && i > 0) {
1978 		i--;
1979 		level = i;
1980 	}
1981 
1982 	i = wma->thermal_mgmt_info.thermalCurrLevel;
1983 	while (temp > wma->thermal_mgmt_info.thermalLevels[i].maxTempThreshold
1984 	       && i < (WLAN_WMA_MAX_THERMAL_LEVELS - 1)) {
1985 		i++;
1986 		level = i;
1987 	}
1988 
1989 	wma_warn("Change thermal level from %d -> %d",
1990 		 wma->thermal_mgmt_info.thermalCurrLevel, level);
1991 
1992 	return level;
1993 }
1994 
1995 /**
1996  * wms_thermal_level_to_host() - Convert wma thermal level to host enum
1997  * @level: current thermal throttle level
1998  *
1999  * Return: host thermal throttle level
2000  */
2001 static enum thermal_throttle_level
2002 wma_thermal_level_to_host(uint8_t level)
2003 {
2004 	switch (level) {
2005 	case WLAN_WMA_THERMAL_LEVEL_0:
2006 		return THERMAL_FULLPERF;
2007 	case WLAN_WMA_THERMAL_LEVEL_1:
2008 	case WLAN_WMA_THERMAL_LEVEL_2:
2009 	case WLAN_WMA_THERMAL_LEVEL_3:
2010 		return THERMAL_MITIGATION;
2011 	case WLAN_WMA_THERMAL_LEVEL_4:
2012 		return THERMAL_SHUTOFF;
2013 	case WLAN_WMA_THERMAL_LEVEL_5:
2014 		return THERMAL_SHUTDOWN_TARGET;
2015 	default:
2016 		return THERMAL_UNKNOWN;
2017 	}
2018 }
2019 
2020 /**
2021  * wma_thermal_mgmt_evt_handler() - thermal mgmt event handler
2022  * @wma_handle: Pointer to WMA handle
2023  * @event: Thermal event information
2024  * @len: length of the event
2025  *
2026  * This function handles the thermal mgmt event from the firmware
2027  *
2028  * Return: 0 for success otherwise failure
2029  */
2030 int wma_thermal_mgmt_evt_handler(void *handle, uint8_t *event, uint32_t len)
2031 {
2032 	tp_wma_handle wma;
2033 	wmi_thermal_mgmt_event_fixed_param *tm_event;
2034 	uint8_t thermal_level;
2035 	t_thermal_cmd_params thermal_params = {0};
2036 	WMI_THERMAL_MGMT_EVENTID_param_tlvs *param_buf;
2037 	struct wlan_objmgr_psoc *psoc;
2038 	struct thermal_throttle_info info = {0};
2039 
2040 	if (!event || !handle) {
2041 		wma_err("Invalid thermal mitigation event buffer");
2042 		return -EINVAL;
2043 	}
2044 
2045 	wma = (tp_wma_handle) handle;
2046 
2047 	if (wma_validate_handle(wma))
2048 		return -EINVAL;
2049 
2050 	psoc = wma->psoc;
2051 	if (!psoc) {
2052 		wma_err("NULL psoc");
2053 		return -EINVAL;
2054 	}
2055 
2056 	param_buf = (WMI_THERMAL_MGMT_EVENTID_param_tlvs *) event;
2057 
2058 	/* Check if thermal mitigation is enabled */
2059 	if (!wma->thermal_mgmt_info.thermalMgmtEnabled) {
2060 		wma_err("Thermal mgmt is not enabled, ignoring event");
2061 		return -EINVAL;
2062 	}
2063 
2064 	tm_event = param_buf->fixed_param;
2065 	wma_debug("Thermal mgmt event received with temperature %d",
2066 		 tm_event->temperature_degreeC);
2067 
2068 	/* Get the thermal mitigation level for the reported temperature */
2069 	thermal_level = wma_thermal_mgmt_get_level(handle,
2070 					tm_event->temperature_degreeC);
2071 	wma_debug("Thermal mgmt level  %d", thermal_level);
2072 
2073 	if (thermal_level == wma->thermal_mgmt_info.thermalCurrLevel) {
2074 		wma_debug("Current level %d is same as the set level, ignoring",
2075 			 wma->thermal_mgmt_info.thermalCurrLevel);
2076 		return 0;
2077 	}
2078 
2079 	wma->thermal_mgmt_info.thermalCurrLevel = thermal_level;
2080 	info.level = wma_thermal_level_to_host(thermal_level);
2081 	target_if_fwol_notify_thermal_throttle(psoc, &info);
2082 
2083 	if (!wma->fw_therm_throt_support) {
2084 		/* Inform txrx */
2085 		cdp_throttle_set_level(cds_get_context(QDF_MODULE_ID_SOC),
2086 				       WMI_PDEV_ID_SOC, thermal_level);
2087 	}
2088 
2089 	/* Send SME SET_THERMAL_LEVEL_IND message */
2090 	wma_set_thermal_level_ind(thermal_level);
2091 
2092 	if (wma->fw_therm_throt_support) {
2093 		/* Send duty cycle info to firmware for fw to throttle */
2094 		if (QDF_STATUS_SUCCESS !=
2095 			wma_update_thermal_mitigation_to_fw(wma, thermal_level))
2096 			return QDF_STATUS_E_FAILURE;
2097 	}
2098 
2099 	/* Get the temperature thresholds to set in firmware */
2100 	thermal_params.minTemp =
2101 		wma->thermal_mgmt_info.thermalLevels[thermal_level].
2102 		minTempThreshold;
2103 	thermal_params.maxTemp =
2104 		wma->thermal_mgmt_info.thermalLevels[thermal_level].
2105 		maxTempThreshold;
2106 	thermal_params.thermalEnable =
2107 		wma->thermal_mgmt_info.thermalMgmtEnabled;
2108 	thermal_params.thermal_action = wma->thermal_mgmt_info.thermal_action;
2109 
2110 	if (QDF_STATUS_SUCCESS != wma_set_thermal_mgmt(wma, thermal_params)) {
2111 		wma_err("Could not send thermal mgmt command to the firmware!");
2112 		return -EINVAL;
2113 	}
2114 
2115 	return 0;
2116 }
2117 
2118 /**
2119  * wma_decap_to_8023() - Decapsulate to 802.3 format
2120  * @msdu: skb buffer
2121  * @info: decapsulate info
2122  *
2123  * Return: none
2124  */
2125 static void wma_decap_to_8023(qdf_nbuf_t msdu, struct wma_decap_info_t *info)
2126 {
2127 	struct llc_snap_hdr_t *llc_hdr;
2128 	uint16_t ether_type;
2129 	uint16_t l2_hdr_space;
2130 	struct ieee80211_qosframe_addr4 *wh;
2131 	uint8_t local_buf[ETHERNET_HDR_LEN];
2132 	uint8_t *buf;
2133 	struct ethernet_hdr_t *ethr_hdr;
2134 
2135 	buf = (uint8_t *) qdf_nbuf_data(msdu);
2136 	llc_hdr = (struct llc_snap_hdr_t *)buf;
2137 	ether_type = (llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1];
2138 	/* do llc remove if needed */
2139 	l2_hdr_space = 0;
2140 	if (IS_SNAP(llc_hdr)) {
2141 		if (IS_BTEP(llc_hdr)) {
2142 			/* remove llc */
2143 			l2_hdr_space += sizeof(struct llc_snap_hdr_t);
2144 			llc_hdr = NULL;
2145 		} else if (IS_RFC1042(llc_hdr)) {
2146 			if (!(ether_type == ETHERTYPE_AARP ||
2147 			      ether_type == ETHERTYPE_IPX)) {
2148 				/* remove llc */
2149 				l2_hdr_space += sizeof(struct llc_snap_hdr_t);
2150 				llc_hdr = NULL;
2151 			}
2152 		}
2153 	}
2154 	if (l2_hdr_space > ETHERNET_HDR_LEN)
2155 		buf = qdf_nbuf_pull_head(msdu, l2_hdr_space - ETHERNET_HDR_LEN);
2156 	else if (l2_hdr_space < ETHERNET_HDR_LEN)
2157 		buf = qdf_nbuf_push_head(msdu, ETHERNET_HDR_LEN - l2_hdr_space);
2158 
2159 	/* mpdu hdr should be present in info,re-create ethr_hdr based on
2160 	 * mpdu hdr
2161 	 */
2162 	wh = (struct ieee80211_qosframe_addr4 *)info->hdr;
2163 	ethr_hdr = (struct ethernet_hdr_t *)local_buf;
2164 	switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
2165 	case IEEE80211_FC1_DIR_NODS:
2166 		qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr1,
2167 			     QDF_MAC_ADDR_SIZE);
2168 		qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr2,
2169 			     QDF_MAC_ADDR_SIZE);
2170 		break;
2171 	case IEEE80211_FC1_DIR_TODS:
2172 		qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr3,
2173 			     QDF_MAC_ADDR_SIZE);
2174 		qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr2,
2175 			     QDF_MAC_ADDR_SIZE);
2176 		break;
2177 	case IEEE80211_FC1_DIR_FROMDS:
2178 		qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr1,
2179 			     QDF_MAC_ADDR_SIZE);
2180 		qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr3,
2181 			     QDF_MAC_ADDR_SIZE);
2182 		break;
2183 	case IEEE80211_FC1_DIR_DSTODS:
2184 		qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr3,
2185 			     QDF_MAC_ADDR_SIZE);
2186 		qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr4,
2187 			     QDF_MAC_ADDR_SIZE);
2188 		break;
2189 	}
2190 
2191 	if (!llc_hdr) {
2192 		ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff;
2193 		ethr_hdr->ethertype[1] = (ether_type) & 0xff;
2194 	} else {
2195 		uint32_t pktlen =
2196 			qdf_nbuf_len(msdu) - sizeof(ethr_hdr->ethertype);
2197 		ether_type = (uint16_t) pktlen;
2198 		ether_type = qdf_nbuf_len(msdu) - sizeof(struct ethernet_hdr_t);
2199 		ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff;
2200 		ethr_hdr->ethertype[1] = (ether_type) & 0xff;
2201 	}
2202 	qdf_mem_copy(buf, ethr_hdr, ETHERNET_HDR_LEN);
2203 }
2204 
2205 /**
2206  * wma_ieee80211_hdrsize() - get 802.11 header size
2207  * @data: 80211 frame
2208  *
2209  * Return: size of header
2210  */
2211 static int32_t wma_ieee80211_hdrsize(const void *data)
2212 {
2213 	const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data;
2214 	int32_t size = sizeof(struct ieee80211_frame);
2215 
2216 	if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2217 		size += QDF_MAC_ADDR_SIZE;
2218 	if (IEEE80211_QOS_HAS_SEQ(wh))
2219 		size += sizeof(uint16_t);
2220 	return size;
2221 }
2222 
2223 /**
2224  * rate_pream: Mapping from data rates to preamble.
2225  */
2226 static uint32_t rate_pream[] = {WMI_RATE_PREAMBLE_CCK, WMI_RATE_PREAMBLE_CCK,
2227 				WMI_RATE_PREAMBLE_CCK, WMI_RATE_PREAMBLE_CCK,
2228 				WMI_RATE_PREAMBLE_OFDM, WMI_RATE_PREAMBLE_OFDM,
2229 				WMI_RATE_PREAMBLE_OFDM, WMI_RATE_PREAMBLE_OFDM,
2230 				WMI_RATE_PREAMBLE_OFDM, WMI_RATE_PREAMBLE_OFDM,
2231 				WMI_RATE_PREAMBLE_OFDM, WMI_RATE_PREAMBLE_OFDM};
2232 
2233 /**
2234  * rate_mcs: Mapping from data rates to MCS (+4 for OFDM to keep the sequence).
2235  */
2236 static uint32_t rate_mcs[] = {WMI_MAX_CCK_TX_RATE_1M, WMI_MAX_CCK_TX_RATE_2M,
2237 			      WMI_MAX_CCK_TX_RATE_5_5M, WMI_MAX_CCK_TX_RATE_11M,
2238 			      WMI_MAX_OFDM_TX_RATE_6M + 4,
2239 			      WMI_MAX_OFDM_TX_RATE_9M + 4,
2240 			      WMI_MAX_OFDM_TX_RATE_12M + 4,
2241 			      WMI_MAX_OFDM_TX_RATE_18M + 4,
2242 			      WMI_MAX_OFDM_TX_RATE_24M + 4,
2243 			      WMI_MAX_OFDM_TX_RATE_36M + 4,
2244 			      WMI_MAX_OFDM_TX_RATE_48M + 4,
2245 			      WMI_MAX_OFDM_TX_RATE_54M + 4};
2246 
2247 #define WMA_TX_SEND_MGMT_TYPE 0
2248 #define WMA_TX_SEND_DATA_TYPE 1
2249 
2250 /**
2251  * wma_update_tx_send_params() - Update tx_send_params TLV info
2252  * @tx_param: Pointer to tx_send_params
2253  * @rid: rate ID passed by PE
2254  *
2255  * Return: None
2256  */
2257 static void wma_update_tx_send_params(struct tx_send_params *tx_param,
2258 				      enum rateid rid)
2259 {
2260 	uint8_t  preamble = 0, nss = 0, rix = 0;
2261 
2262 	preamble = rate_pream[rid];
2263 	rix = rate_mcs[rid];
2264 
2265 	tx_param->mcs_mask = (1 << rix);
2266 	tx_param->nss_mask = (1 << nss);
2267 	tx_param->preamble_type = (1 << preamble);
2268 	tx_param->frame_type = WMA_TX_SEND_MGMT_TYPE;
2269 
2270 	wma_debug("rate_id: %d, mcs: %0x, nss: %0x, preamble: %0x",
2271 		 rid, tx_param->mcs_mask, tx_param->nss_mask,
2272 		 tx_param->preamble_type);
2273 }
2274 
2275 QDF_STATUS wma_tx_packet(void *wma_context, void *tx_frame, uint16_t frmLen,
2276 			 eFrameType frmType, eFrameTxDir txDir, uint8_t tid,
2277 			 wma_tx_dwnld_comp_callback tx_frm_download_comp_cb,
2278 			 void *pData,
2279 			 wma_tx_ota_comp_callback tx_frm_ota_comp_cb,
2280 			 uint8_t tx_flag, uint8_t vdev_id, bool tdls_flag,
2281 			 uint16_t channel_freq, enum rateid rid,
2282 			 int8_t peer_rssi, uint16_t action)
2283 {
2284 	tp_wma_handle wma_handle = (tp_wma_handle) (wma_context);
2285 	int32_t status;
2286 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
2287 	int32_t is_high_latency;
2288 	bool is_wmi_mgmt_tx = false;
2289 	enum frame_index tx_frm_index = GENERIC_NODOWNLD_NOACK_COMP_INDEX;
2290 	tpSirMacFrameCtl pFc = (tpSirMacFrameCtl) (qdf_nbuf_data(tx_frame));
2291 	uint8_t use_6mbps = 0;
2292 	uint8_t downld_comp_required = 0;
2293 	uint16_t chanfreq;
2294 	uint8_t *pFrame = NULL;
2295 	void *pPacket = NULL;
2296 	uint16_t newFrmLen = 0;
2297 	struct wma_txrx_node *iface;
2298 	struct mac_context *mac;
2299 	tpSirMacMgmtHdr mHdr;
2300 	struct wmi_mgmt_params mgmt_param = {0};
2301 	struct cdp_cfg *ctrl_pdev;
2302 	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
2303 	struct ieee80211_frame *wh;
2304 	struct wlan_objmgr_peer *peer = NULL;
2305 	struct wlan_objmgr_psoc *psoc;
2306 	struct wlan_objmgr_vdev *vdev = NULL;
2307 	void *mac_addr;
2308 	uint8_t *mld_addr = NULL;
2309 	bool is_5g = false;
2310 	uint8_t pdev_id;
2311 
2312 	if (wma_validate_handle(wma_handle)) {
2313 		cds_packet_free((void *)tx_frame);
2314 		return QDF_STATUS_E_FAILURE;
2315 	}
2316 
2317 	iface = &wma_handle->interfaces[vdev_id];
2318 
2319 	if (!soc) {
2320 		cds_packet_free((void *)tx_frame);
2321 		return QDF_STATUS_E_FAILURE;
2322 	}
2323 
2324 	cdp_hl_tdls_flag_reset(soc, vdev_id, false);
2325 
2326 	if (frmType >= TXRX_FRM_MAX) {
2327 		wma_err("Invalid Frame Type Fail to send Frame");
2328 		cds_packet_free((void *)tx_frame);
2329 		return QDF_STATUS_E_FAILURE;
2330 	}
2331 
2332 	mac = cds_get_context(QDF_MODULE_ID_PE);
2333 	if (!mac) {
2334 		cds_packet_free((void *)tx_frame);
2335 		return QDF_STATUS_E_FAILURE;
2336 	}
2337 	/*
2338 	 * Currently only support to
2339 	 * send 80211 Mgmt and 80211 Data are added.
2340 	 */
2341 	if (!((frmType == TXRX_FRM_802_11_MGMT) ||
2342 	      (frmType == TXRX_FRM_802_11_DATA))) {
2343 		wma_err("No Support to send other frames except 802.11 Mgmt/Data");
2344 		cds_packet_free((void *)tx_frame);
2345 		return QDF_STATUS_E_FAILURE;
2346 	}
2347 
2348 	if ((iface && (iface->rmfEnabled || tx_flag & HAL_USE_PMF)) &&
2349 	    (frmType == TXRX_FRM_802_11_MGMT) &&
2350 	    (pFc->subType == SIR_MAC_MGMT_DISASSOC ||
2351 	     pFc->subType == SIR_MAC_MGMT_DEAUTH ||
2352 	     pFc->subType == SIR_MAC_MGMT_ACTION)) {
2353 		struct ieee80211_frame *wh =
2354 			(struct ieee80211_frame *)qdf_nbuf_data(tx_frame);
2355 		if (!QDF_IS_ADDR_BROADCAST(wh->i_addr1) &&
2356 		    !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2357 			if (pFc->wep) {
2358 				uint8_t mic_len, hdr_len, pdev_id;
2359 
2360 				/* Allocate extra bytes for privacy header and
2361 				 * trailer
2362 				 */
2363 				if (iface->type == WMI_VDEV_TYPE_NDI &&
2364 				    (tx_flag & HAL_USE_PMF)) {
2365 					hdr_len = IEEE80211_CCMP_HEADERLEN;
2366 					mic_len = IEEE80211_CCMP_MICLEN;
2367 				} else {
2368 					pdev_id = wlan_objmgr_pdev_get_pdev_id(
2369 							wma_handle->pdev);
2370 					qdf_status = mlme_get_peer_mic_len(
2371 							wma_handle->psoc,
2372 							pdev_id, wh->i_addr1,
2373 							&mic_len, &hdr_len);
2374 
2375 					if (QDF_IS_STATUS_ERROR(qdf_status)) {
2376 						cds_packet_free(
2377 							(void *)tx_frame);
2378 						goto error;
2379 					}
2380 				}
2381 
2382 				newFrmLen = frmLen + hdr_len + mic_len;
2383 				qdf_status =
2384 					cds_packet_alloc((uint16_t) newFrmLen,
2385 							 (void **)&pFrame,
2386 							 (void **)&pPacket);
2387 
2388 				if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2389 					wma_alert("Failed to allocate %d bytes for RMF status code (%x)",
2390 						newFrmLen,
2391 						qdf_status);
2392 					/* Free the original packet memory */
2393 					cds_packet_free((void *)tx_frame);
2394 					goto error;
2395 				}
2396 
2397 				/*
2398 				 * Initialize the frame with 0's and only fill
2399 				 * MAC header and data, Keep the CCMP header and
2400 				 * trailer as 0's, firmware shall fill this
2401 				 */
2402 				qdf_mem_zero(pFrame, newFrmLen);
2403 				qdf_mem_copy(pFrame, wh, sizeof(*wh));
2404 				qdf_mem_copy(pFrame + sizeof(*wh) +
2405 					     hdr_len,
2406 					     pData + sizeof(*wh),
2407 					     frmLen - sizeof(*wh));
2408 
2409 				cds_packet_free((void *)tx_frame);
2410 				tx_frame = pPacket;
2411 				pData = pFrame;
2412 				frmLen = newFrmLen;
2413 				pFc = (tpSirMacFrameCtl)
2414 						(qdf_nbuf_data(tx_frame));
2415 			}
2416 		} else {
2417 			uint16_t mmie_size;
2418 			int32_t mgmtcipherset;
2419 
2420 			mgmtcipherset = wlan_crypto_get_param(iface->vdev,
2421 						WLAN_CRYPTO_PARAM_MGMT_CIPHER);
2422 			if (mgmtcipherset <= 0) {
2423 				wma_err("Invalid key cipher %d", mgmtcipherset);
2424 				cds_packet_free((void *)tx_frame);
2425 				return -EINVAL;
2426 			}
2427 
2428 			if (mgmtcipherset & (1 << WLAN_CRYPTO_CIPHER_AES_CMAC))
2429 				mmie_size = cds_get_mmie_size();
2430 			else
2431 				mmie_size = cds_get_gmac_mmie_size();
2432 
2433 			/* Allocate extra bytes for MMIE */
2434 			newFrmLen = frmLen + mmie_size;
2435 			qdf_status = cds_packet_alloc((uint16_t) newFrmLen,
2436 						      (void **)&pFrame,
2437 						      (void **)&pPacket);
2438 
2439 			if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2440 				wma_alert("Failed to allocate %d bytes for RMF status code (%x)",
2441 					newFrmLen,
2442 					qdf_status);
2443 				/* Free the original packet memory */
2444 				cds_packet_free((void *)tx_frame);
2445 				goto error;
2446 			}
2447 			/*
2448 			 * Initialize the frame with 0's and only fill
2449 			 * MAC header and data. MMIE field will be
2450 			 * filled by wlan_crypto_add_mmie API
2451 			 */
2452 			qdf_mem_zero(pFrame, newFrmLen);
2453 			qdf_mem_copy(pFrame, wh, sizeof(*wh));
2454 			qdf_mem_copy(pFrame + sizeof(*wh),
2455 				     pData + sizeof(*wh), frmLen - sizeof(*wh));
2456 
2457 			/* The API expect length without the mmie size */
2458 			if (!wlan_crypto_add_mmie(iface->vdev, pFrame,
2459 						  frmLen)) {
2460 				wma_alert("Failed to attach MMIE");
2461 				/* Free the original packet memory */
2462 				cds_packet_free((void *)tx_frame);
2463 				cds_packet_free((void *)pPacket);
2464 				goto error;
2465 			}
2466 			cds_packet_free((void *)tx_frame);
2467 			tx_frame = pPacket;
2468 			pData = pFrame;
2469 			frmLen = newFrmLen;
2470 			pFc = (tpSirMacFrameCtl) (qdf_nbuf_data(tx_frame));
2471 		}
2472 		/*
2473 		 * Some target which support sending mgmt frame based on htt
2474 		 * would DMA write this PMF tx frame buffer, it may cause smmu
2475 		 * check permission fault, set a flag to do bi-direction DMA
2476 		 * map, normal tx unmap is enough for this case.
2477 		 */
2478 		QDF_NBUF_CB_TX_DMA_BI_MAP((qdf_nbuf_t)tx_frame) = 1;
2479 	}
2480 	mHdr = (tpSirMacMgmtHdr)qdf_nbuf_data(tx_frame);
2481 	if ((frmType == TXRX_FRM_802_11_MGMT) &&
2482 	    (pFc->subType == SIR_MAC_MGMT_PROBE_RSP)) {
2483 		uint64_t adjusted_tsf_le;
2484 		struct ieee80211_frame *wh =
2485 			(struct ieee80211_frame *)qdf_nbuf_data(tx_frame);
2486 
2487 		/* Make the TSF offset negative to match TSF in beacons */
2488 		adjusted_tsf_le = cpu_to_le64(0ULL -
2489 					      wma_handle->interfaces[vdev_id].
2490 					      tsfadjust);
2491 		A_MEMCPY(&wh[1], &adjusted_tsf_le, sizeof(adjusted_tsf_le));
2492 	}
2493 	if (frmType == TXRX_FRM_802_11_DATA) {
2494 		qdf_nbuf_t ret;
2495 		qdf_nbuf_t skb = (qdf_nbuf_t) tx_frame;
2496 
2497 		struct wma_decap_info_t decap_info;
2498 		struct ieee80211_frame *wh =
2499 			(struct ieee80211_frame *)qdf_nbuf_data(skb);
2500 		unsigned long curr_timestamp = qdf_mc_timer_get_system_ticks();
2501 
2502 		/*
2503 		 * 1) TxRx Module expects data input to be 802.3 format
2504 		 * So Decapsulation has to be done.
2505 		 * 2) Only one Outstanding Data pending for Ack is allowed
2506 		 */
2507 		if (tx_frm_ota_comp_cb) {
2508 			if (wma_handle->umac_data_ota_ack_cb) {
2509 				/*
2510 				 * If last data frame was sent more than 2 secs
2511 				 * ago and still we didn't receive ack/nack from
2512 				 * fw then allow Tx of this data frame
2513 				 */
2514 				if (curr_timestamp >=
2515 				    wma_handle->last_umac_data_ota_timestamp +
2516 				    200) {
2517 					wma_err("No Tx Ack for last data frame for more than 2 secs, allow Tx of current data frame");
2518 				} else {
2519 					wma_err("Already one Data pending for Ack, reject Tx of data frame");
2520 					cds_packet_free((void *)tx_frame);
2521 					return QDF_STATUS_E_FAILURE;
2522 				}
2523 			}
2524 		} else {
2525 			/*
2526 			 * Data Frames are sent through TxRx Non Standard Data
2527 			 * path so Ack Complete Cb is must
2528 			 */
2529 			wma_err("No Ack Complete Cb. Don't Allow");
2530 			cds_packet_free((void *)tx_frame);
2531 			return QDF_STATUS_E_FAILURE;
2532 		}
2533 
2534 		/* Take out 802.11 header from skb */
2535 		decap_info.hdr_len = wma_ieee80211_hdrsize(wh);
2536 		qdf_mem_copy(decap_info.hdr, wh, decap_info.hdr_len);
2537 		qdf_nbuf_pull_head(skb, decap_info.hdr_len);
2538 
2539 		/*  Decapsulate to 802.3 format */
2540 		wma_decap_to_8023(skb, &decap_info);
2541 
2542 		/* Zero out skb's context buffer for the driver to use */
2543 		qdf_mem_zero(skb->cb, sizeof(skb->cb));
2544 
2545 		/* Terminate the (single-element) list of tx frames */
2546 		skb->next = NULL;
2547 
2548 		/* Store the Ack Complete Cb */
2549 		wma_handle->umac_data_ota_ack_cb = tx_frm_ota_comp_cb;
2550 
2551 		/* Store the timestamp and nbuf for this data Tx */
2552 		wma_handle->last_umac_data_ota_timestamp = curr_timestamp;
2553 		wma_handle->last_umac_data_nbuf = skb;
2554 
2555 		/* Send the Data frame to TxRx in Non Standard Path */
2556 		cdp_hl_tdls_flag_reset(soc,
2557 			vdev_id, tdls_flag);
2558 
2559 		ret = cdp_tx_non_std(soc,
2560 			vdev_id,
2561 			OL_TX_SPEC_NO_FREE, skb);
2562 
2563 		cdp_hl_tdls_flag_reset(soc,
2564 			vdev_id, false);
2565 
2566 		if (ret) {
2567 			wma_err("TxRx Rejected. Fail to do Tx");
2568 			/* Call Download Cb so that umac can free the buffer */
2569 			if (tx_frm_download_comp_cb)
2570 				tx_frm_download_comp_cb(wma_handle->mac_context,
2571 						tx_frame,
2572 						WMA_TX_FRAME_BUFFER_FREE);
2573 			wma_handle->umac_data_ota_ack_cb = NULL;
2574 			wma_handle->last_umac_data_nbuf = NULL;
2575 			return QDF_STATUS_E_FAILURE;
2576 		}
2577 
2578 		/* Call Download Callback if passed */
2579 		if (tx_frm_download_comp_cb)
2580 			tx_frm_download_comp_cb(wma_handle->mac_context,
2581 						tx_frame,
2582 						WMA_TX_FRAME_BUFFER_NO_FREE);
2583 
2584 		return QDF_STATUS_SUCCESS;
2585 	}
2586 
2587 	ctrl_pdev = cdp_get_ctrl_pdev_from_vdev(soc, vdev_id);
2588 	if (!ctrl_pdev) {
2589 		wma_err("ol_pdev_handle is NULL");
2590 		cds_packet_free((void *)tx_frame);
2591 		return QDF_STATUS_E_FAILURE;
2592 	}
2593 	is_high_latency = cdp_cfg_is_high_latency(soc, ctrl_pdev);
2594 	is_wmi_mgmt_tx = wmi_service_enabled(wma_handle->wmi_handle,
2595 					     wmi_service_mgmt_tx_wmi);
2596 
2597 	downld_comp_required = tx_frm_download_comp_cb && is_high_latency &&
2598 				(!is_wmi_mgmt_tx) && tx_frm_ota_comp_cb;
2599 
2600 	/* Fill the frame index to send */
2601 	if (pFc->type == SIR_MAC_MGMT_FRAME) {
2602 		if (tx_frm_ota_comp_cb) {
2603 			if (downld_comp_required)
2604 				tx_frm_index =
2605 					GENERIC_DOWNLD_COMP_ACK_COMP_INDEX;
2606 			else
2607 				tx_frm_index = GENERIC_NODOWLOAD_ACK_COMP_INDEX;
2608 
2609 		} else {
2610 			tx_frm_index =
2611 				GENERIC_NODOWNLD_NOACK_COMP_INDEX;
2612 		}
2613 
2614 	}
2615 
2616 	/*
2617 	 * If Download Complete is required
2618 	 * Wait for download complete
2619 	 */
2620 	if (downld_comp_required) {
2621 		/* Store Tx Comp Cb */
2622 		wma_handle->tx_frm_download_comp_cb = tx_frm_download_comp_cb;
2623 
2624 		/* Reset the Tx Frame Complete Event */
2625 		qdf_status = qdf_event_reset(
2626 				&wma_handle->tx_frm_download_comp_event);
2627 
2628 		if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2629 			wma_alert("Event Reset failed tx comp event %x",
2630 				 qdf_status);
2631 			cds_packet_free((void *)tx_frame);
2632 			goto error;
2633 		}
2634 	}
2635 
2636 	/* If the frame has to be sent at BD Rate2 inform TxRx */
2637 	if (tx_flag & HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME)
2638 		use_6mbps = 1;
2639 
2640 	if (pFc->subType == SIR_MAC_MGMT_PROBE_RSP) {
2641 		if (wma_is_vdev_in_ap_mode(wma_handle, vdev_id) &&
2642 		    wma_handle->interfaces[vdev_id].ch_freq)
2643 			chanfreq = wma_handle->interfaces[vdev_id].ch_freq;
2644 		else
2645 			chanfreq = channel_freq;
2646 		wma_debug("Probe response frame on channel %d vdev:%d",
2647 			 chanfreq, vdev_id);
2648 		if (wma_is_vdev_in_ap_mode(wma_handle, vdev_id) && !chanfreq)
2649 			wma_err("AP oper chan is zero");
2650 	} else if (pFc->subType == SIR_MAC_MGMT_ACTION ||
2651 			pFc->subType == SIR_MAC_MGMT_AUTH) {
2652 		chanfreq = channel_freq;
2653 	} else {
2654 		chanfreq = 0;
2655 	}
2656 
2657 	if (pFc->type == SIR_MAC_MGMT_FRAME) {
2658 		if ((mac->mlme_cfg->gen.debug_packet_log &
2659 		    DEBUG_PKTLOG_TYPE_MGMT) &&
2660 		    (pFc->subType != SIR_MAC_MGMT_PROBE_REQ) &&
2661 		    (pFc->subType != SIR_MAC_MGMT_PROBE_RSP) &&
2662 		    (pFc->subType != SIR_MAC_MGMT_ACTION)) {
2663 			wma_debug("TX MGMT - Type %hu, SubType %hu seq_num[%d]",
2664 				  pFc->type, pFc->subType,
2665 				  ((mHdr->seqControl.seqNumHi << 4) |
2666 				  mHdr->seqControl.seqNumLo));
2667 			qdf_trace_hex_dump(QDF_MODULE_ID_WMA,
2668 					   QDF_TRACE_LEVEL_DEBUG, pData,
2669 					   frmLen);
2670 		} else if ((mac->mlme_cfg->gen.debug_packet_log &
2671 			   DEBUG_PKTLOG_TYPE_ACTION) &&
2672 			   (pFc->subType == SIR_MAC_MGMT_ACTION)) {
2673 			wma_debug("TX MGMT - Type %hu, SubType %hu seq_num[%d]",
2674 				  pFc->type, pFc->subType,
2675 				 ((mHdr->seqControl.seqNumHi << 4) |
2676 				 mHdr->seqControl.seqNumLo));
2677 			qdf_trace_hex_dump(QDF_MODULE_ID_WMA,
2678 					   QDF_TRACE_LEVEL_DEBUG, pData,
2679 					   frmLen);
2680 		}
2681 	}
2682 
2683 	if (wlan_reg_is_5ghz_ch_freq(wma_handle->interfaces[vdev_id].ch_freq))
2684 		is_5g = true;
2685 
2686 	mgmt_param.tx_frame = tx_frame;
2687 	mgmt_param.frm_len = frmLen;
2688 	mgmt_param.vdev_id = vdev_id;
2689 	mgmt_param.pdata = pData;
2690 	mgmt_param.chanfreq = chanfreq;
2691 	mgmt_param.qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
2692 	mgmt_param.use_6mbps = use_6mbps;
2693 	mgmt_param.tx_type = tx_frm_index;
2694 	mgmt_param.peer_rssi = peer_rssi;
2695 	if (iface && wlan_vdev_mlme_get_opmode(iface->vdev) == QDF_STA_MODE &&
2696 	    wlan_vdev_mlme_is_mlo_vdev(iface->vdev) &&
2697 	    (wlan_vdev_mlme_is_active(iface->vdev) == QDF_STATUS_SUCCESS) &&
2698 	    frmType == TXRX_FRM_802_11_MGMT &&
2699 	    pFc->subType != SIR_MAC_MGMT_PROBE_REQ &&
2700 	    pFc->subType != SIR_MAC_MGMT_AUTH &&
2701 	    action != (ACTION_CATEGORY_PUBLIC << 8 | TDLS_DISCOVERY_RESPONSE) &&
2702 	    action != (ACTION_CATEGORY_BACK << 8 | ADDBA_RESPONSE))
2703 		mgmt_param.mlo_link_agnostic = true;
2704 
2705 	if (tx_flag & HAL_USE_INCORRECT_KEY_PMF)
2706 		mgmt_param.tx_flags |= MGMT_TX_USE_INCORRECT_KEY;
2707 
2708 	/*
2709 	 * Update the tx_params TLV only for rates
2710 	 * other than 1Mbps and 6 Mbps
2711 	 */
2712 	if (rid < RATEID_DEFAULT &&
2713 	    (rid != RATEID_1MBPS && !(rid == RATEID_6MBPS && is_5g))) {
2714 		wma_debug("using rate id: %d for Tx", rid);
2715 		mgmt_param.tx_params_valid = true;
2716 		wma_update_tx_send_params(&mgmt_param.tx_param, rid);
2717 	}
2718 
2719 	psoc = wma_handle->psoc;
2720 	if (!psoc) {
2721 		wma_err("psoc ctx is NULL");
2722 		cds_packet_free((void *)tx_frame);
2723 		goto error;
2724 	}
2725 
2726 	if (!wma_handle->pdev) {
2727 		wma_err("pdev ctx is NULL");
2728 		cds_packet_free((void *)tx_frame);
2729 		goto error;
2730 	}
2731 
2732 	pdev_id = wlan_objmgr_pdev_get_pdev_id(wma_handle->pdev);
2733 	wh = (struct ieee80211_frame *)(qdf_nbuf_data(tx_frame));
2734 	mac_addr = wh->i_addr1;
2735 	peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr, WLAN_MGMT_NB_ID);
2736 	if (!peer) {
2737 		mac_addr = wh->i_addr2;
2738 		peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr,
2739 					WLAN_MGMT_NB_ID);
2740 		if (!peer) {
2741 			vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc,
2742 								    vdev_id,
2743 								    WLAN_MGMT_NB_ID);
2744 			if (!vdev) {
2745 				wma_err("vdev is null");
2746 				cds_packet_free((void *)tx_frame);
2747 				goto error;
2748 			}
2749 			mld_addr = wlan_vdev_mlme_get_mldaddr(vdev);
2750 			wlan_objmgr_vdev_release_ref(vdev, WLAN_MGMT_NB_ID);
2751 			if (!mld_addr) {
2752 				wma_err("mld addr is null");
2753 				cds_packet_free((void *)tx_frame);
2754 				goto error;
2755 			}
2756 			wma_debug("mld mac addr " QDF_MAC_ADDR_FMT,
2757 				  QDF_MAC_ADDR_REF(mld_addr));
2758 			peer = wlan_objmgr_get_peer(psoc, pdev_id,
2759 						    mld_addr,
2760 						    WLAN_MGMT_NB_ID);
2761 			if (!peer) {
2762 				wma_err("peer is null");
2763 				cds_packet_free((void *)tx_frame);
2764 				goto error;
2765 			}
2766 		}
2767 	}
2768 
2769 	if (ucfg_pkt_capture_get_pktcap_mode(psoc) &
2770 	    PKT_CAPTURE_MODE_MGMT_ONLY) {
2771 		ucfg_pkt_capture_mgmt_tx(wma_handle->pdev,
2772 					 tx_frame,
2773 					 wma_handle->interfaces[vdev_id].ch_freq,
2774 					 mgmt_param.tx_param.preamble_type);
2775 	}
2776 
2777 	status = wlan_mgmt_txrx_mgmt_frame_tx(peer, wma_handle->mac_context,
2778 					      (qdf_nbuf_t)tx_frame, NULL,
2779 					      tx_frm_ota_comp_cb,
2780 					      WLAN_UMAC_COMP_MLME,
2781 					      &mgmt_param);
2782 
2783 	wlan_objmgr_peer_release_ref(peer, WLAN_MGMT_NB_ID);
2784 	if (status != QDF_STATUS_SUCCESS) {
2785 		wma_err("mgmt tx failed");
2786 		qdf_nbuf_free((qdf_nbuf_t)tx_frame);
2787 		goto error;
2788 	}
2789 
2790 	/*
2791 	 * Failed to send Tx Mgmt Frame
2792 	 */
2793 	if (status) {
2794 	/* Call Download Cb so that umac can free the buffer */
2795 		uint32_t rem;
2796 
2797 		if (tx_frm_download_comp_cb)
2798 			tx_frm_download_comp_cb(wma_handle->mac_context,
2799 						tx_frame,
2800 						WMA_TX_FRAME_BUFFER_FREE);
2801 		rem = qdf_do_div_rem(wma_handle->tx_fail_cnt,
2802 				     MAX_PRINT_FAILURE_CNT);
2803 		if (!rem)
2804 			wma_err("Failed to send Mgmt Frame");
2805 		else
2806 			wma_debug("Failed to send Mgmt Frame");
2807 		wma_handle->tx_fail_cnt++;
2808 		goto error;
2809 	}
2810 
2811 	if (!tx_frm_download_comp_cb)
2812 		return QDF_STATUS_SUCCESS;
2813 
2814 	/*
2815 	 * Wait for Download Complete
2816 	 * if required
2817 	 */
2818 	if (downld_comp_required) {
2819 		/*
2820 		 * Wait for Download Complete
2821 		 * @ Integrated : Dxe Complete
2822 		 * @ Discrete : Target Download Complete
2823 		 */
2824 		qdf_status =
2825 			qdf_wait_for_event_completion(&wma_handle->
2826 					      tx_frm_download_comp_event,
2827 					      WMA_TX_FRAME_COMPLETE_TIMEOUT);
2828 
2829 		if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2830 			wma_nofl_alert("Wait Event failed txfrm_comp_event");
2831 			/*
2832 			 * @Integrated: Something Wrong with Dxe
2833 			 *   TODO: Some Debug Code
2834 			 * Here We need to trigger SSR since
2835 			 * since system went into a bad state where
2836 			 * we didn't get Download Complete for almost
2837 			 * WMA_TX_FRAME_COMPLETE_TIMEOUT (1 sec)
2838 			 */
2839 			/* display scheduler stats */
2840 			return cdp_display_stats(soc, CDP_SCHEDULER_STATS,
2841 						QDF_STATS_VERBOSITY_LEVEL_HIGH);
2842 		}
2843 	}
2844 
2845 	return QDF_STATUS_SUCCESS;
2846 
2847 error:
2848 	wma_handle->tx_frm_download_comp_cb = NULL;
2849 	wma_handle->umac_data_ota_ack_cb = NULL;
2850 	return QDF_STATUS_E_FAILURE;
2851 }
2852 
2853 QDF_STATUS wma_ds_peek_rx_packet_info(cds_pkt_t *pkt, void **pkt_meta)
2854 {
2855 	if (!pkt) {
2856 		wma_err("wma:Invalid parameter sent on wma_peek_rx_pkt_info");
2857 		return QDF_STATUS_E_FAULT;
2858 	}
2859 
2860 	*pkt_meta = &(pkt->pkt_meta);
2861 
2862 	return QDF_STATUS_SUCCESS;
2863 }
2864 
2865 #ifdef HL_RX_AGGREGATION_HOLE_DETECTION
2866 void ol_rx_aggregation_hole(uint32_t hole_info)
2867 {
2868 	struct sir_sme_rx_aggr_hole_ind *rx_aggr_hole_event;
2869 	uint32_t alloc_len;
2870 	cds_msg_t cds_msg = { 0 };
2871 	QDF_STATUS status;
2872 
2873 	alloc_len = sizeof(*rx_aggr_hole_event) +
2874 		sizeof(rx_aggr_hole_event->hole_info_array[0]);
2875 	rx_aggr_hole_event = qdf_mem_malloc(alloc_len);
2876 	if (!rx_aggr_hole_event)
2877 		return;
2878 
2879 	rx_aggr_hole_event->hole_cnt = 1;
2880 	rx_aggr_hole_event->hole_info_array[0] = hole_info;
2881 
2882 	cds_msg.type = eWNI_SME_RX_AGGR_HOLE_IND;
2883 	cds_msg.bodyptr = rx_aggr_hole_event;
2884 	cds_msg.bodyval = 0;
2885 
2886 	status = cds_mq_post_message(CDS_MQ_ID_SME, &cds_msg);
2887 	if (status != QDF_STATUS_SUCCESS) {
2888 		qdf_mem_free(rx_aggr_hole_event);
2889 		return;
2890 	}
2891 }
2892 #endif
2893 
2894 /**
2895  * ol_rx_err() - ol rx err handler
2896  * @pdev: ol pdev
2897  * @vdev_id: vdev id
2898  * @peer_mac_addr: peer mac address
2899  * @tid: TID
2900  * @tsf32: TSF
2901  * @err_type: error type
2902  * @rx_frame: rx frame
2903  * @pn: PN Number
2904  * @key_id: key id
2905  *
2906  * This function handles rx error and send MIC error failure to LIM
2907  *
2908  * Return: none
2909  */
2910 /*
2911  * Local prototype added to temporarily address warning caused by
2912  * -Wmissing-prototypes. A more correct solution will come later
2913  * as a solution to IR-196435 at which point this prototype will
2914  * be removed.
2915  */
2916 void ol_rx_err(void *pdev, uint8_t vdev_id,
2917 	       uint8_t *peer_mac_addr, int tid, uint32_t tsf32,
2918 	       enum ol_rx_err_type err_type, qdf_nbuf_t rx_frame,
2919 	       uint64_t *pn, uint8_t key_id);
2920 void ol_rx_err(void *pdev, uint8_t vdev_id,
2921 	       uint8_t *peer_mac_addr, int tid, uint32_t tsf32,
2922 	       enum ol_rx_err_type err_type, qdf_nbuf_t rx_frame,
2923 	       uint64_t *pn, uint8_t key_id)
2924 {
2925 	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
2926 	struct mic_failure_ind *mic_err_ind;
2927 	qdf_ether_header_t *eth_hdr;
2928 	uint8_t *bssid;
2929 	struct scheduler_msg cds_msg = {0};
2930 
2931 	if (!wma)
2932 		return;
2933 
2934 	if (err_type != OL_RX_ERR_TKIP_MIC)
2935 		return;
2936 
2937 	if (qdf_nbuf_len(rx_frame) < sizeof(*eth_hdr))
2938 		return;
2939 	eth_hdr = (qdf_ether_header_t *)qdf_nbuf_data(rx_frame);
2940 	mic_err_ind = qdf_mem_malloc(sizeof(*mic_err_ind));
2941 	if (!mic_err_ind)
2942 		return;
2943 
2944 	mic_err_ind->messageType = eWNI_SME_MIC_FAILURE_IND;
2945 	mic_err_ind->length = sizeof(*mic_err_ind);
2946 	mic_err_ind->sessionId = vdev_id;
2947 	bssid = wma_get_vdev_bssid(wma->interfaces[vdev_id].vdev);
2948 	if (!bssid) {
2949 		wma_err("Failed to get bssid for vdev_%d", vdev_id);
2950 		qdf_mem_free((void *)mic_err_ind);
2951 		return;
2952 	}
2953 	qdf_copy_macaddr(&mic_err_ind->bssId,
2954 		     (struct qdf_mac_addr *)bssid);
2955 	qdf_mem_copy(mic_err_ind->info.taMacAddr,
2956 		     (struct qdf_mac_addr *) peer_mac_addr,
2957 			sizeof(tSirMacAddr));
2958 	qdf_mem_copy(mic_err_ind->info.srcMacAddr,
2959 		     (struct qdf_mac_addr *) eth_hdr->ether_shost,
2960 			sizeof(tSirMacAddr));
2961 	qdf_mem_copy(mic_err_ind->info.dstMacAddr,
2962 		     (struct qdf_mac_addr *) eth_hdr->ether_dhost,
2963 			sizeof(tSirMacAddr));
2964 	mic_err_ind->info.keyId = key_id;
2965 	mic_err_ind->info.multicast =
2966 		IEEE80211_IS_MULTICAST(eth_hdr->ether_dhost);
2967 	qdf_mem_copy(mic_err_ind->info.TSC, pn, SIR_CIPHER_SEQ_CTR_SIZE);
2968 
2969 	qdf_mem_zero(&cds_msg, sizeof(struct scheduler_msg));
2970 	cds_msg.type = eWNI_SME_MIC_FAILURE_IND;
2971 	cds_msg.bodyptr = (void *) mic_err_ind;
2972 
2973 	if (QDF_STATUS_SUCCESS !=
2974 		scheduler_post_message(QDF_MODULE_ID_TXRX,
2975 				       QDF_MODULE_ID_SME,
2976 				       QDF_MODULE_ID_SME,
2977 				       &cds_msg)) {
2978 		wma_err("could not post mic failure indication to SME");
2979 		qdf_mem_free((void *)mic_err_ind);
2980 	}
2981 }
2982 
2983 void wma_tx_abort(uint8_t vdev_id)
2984 {
2985 #define PEER_ALL_TID_BITMASK 0xffffffff
2986 	tp_wma_handle wma;
2987 	uint32_t peer_tid_bitmap = PEER_ALL_TID_BITMASK;
2988 	struct wma_txrx_node *iface;
2989 	uint8_t *bssid;
2990 	struct peer_flush_params param = {0};
2991 
2992 	wma = cds_get_context(QDF_MODULE_ID_WMA);
2993 	if (!wma)
2994 		return;
2995 
2996 	iface = &wma->interfaces[vdev_id];
2997 	if (!iface->vdev) {
2998 		wma_err("iface->vdev is NULL");
2999 		return;
3000 	}
3001 
3002 	bssid = wma_get_vdev_bssid(iface->vdev);
3003 	if (!bssid) {
3004 		wma_err("Failed to get bssid for vdev_%d", vdev_id);
3005 		return;
3006 	}
3007 
3008 	wma_debug("vdevid %d bssid "QDF_MAC_ADDR_FMT, vdev_id,
3009 		  QDF_MAC_ADDR_REF(bssid));
3010 	wma_vdev_set_pause_bit(vdev_id, PAUSE_TYPE_HOST);
3011 	cdp_fc_vdev_pause(cds_get_context(QDF_MODULE_ID_SOC), vdev_id,
3012 			  OL_TXQ_PAUSE_REASON_TX_ABORT, 0);
3013 
3014 	/* Flush all TIDs except MGMT TID for this peer in Target */
3015 	peer_tid_bitmap &= ~(0x1 << WMI_MGMT_TID);
3016 	param.peer_tid_bitmap = peer_tid_bitmap;
3017 	param.vdev_id = vdev_id;
3018 	wmi_unified_peer_flush_tids_send(wma->wmi_handle, bssid,
3019 					 &param);
3020 }
3021 
3022 void wma_delete_invalid_peer_entries(uint8_t vdev_id, uint8_t *peer_mac_addr)
3023 {
3024 	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
3025 	uint8_t i;
3026 	struct wma_txrx_node *iface;
3027 
3028 	if (!wma)
3029 		return;
3030 
3031 	iface = &wma->interfaces[vdev_id];
3032 
3033 	if (peer_mac_addr) {
3034 		for (i = 0; i < INVALID_PEER_MAX_NUM; i++) {
3035 			if (qdf_mem_cmp
3036 				      (iface->invalid_peers[i].rx_macaddr,
3037 				      peer_mac_addr,
3038 				      QDF_MAC_ADDR_SIZE) == 0) {
3039 				qdf_mem_zero(iface->invalid_peers[i].rx_macaddr,
3040 					     sizeof(QDF_MAC_ADDR_SIZE));
3041 				break;
3042 			}
3043 		}
3044 		if (i == INVALID_PEER_MAX_NUM)
3045 			wma_debug("peer_mac_addr "QDF_MAC_ADDR_FMT" is not found",
3046 				  QDF_MAC_ADDR_REF(peer_mac_addr));
3047 	} else {
3048 		qdf_mem_zero(iface->invalid_peers,
3049 			     sizeof(iface->invalid_peers));
3050 	}
3051 }
3052 
3053 uint8_t wma_rx_invalid_peer_ind(uint8_t vdev_id, void *wh)
3054 {
3055 	struct ol_rx_inv_peer_params *rx_inv_msg;
3056 	struct ieee80211_frame *wh_l = (struct ieee80211_frame *)wh;
3057 	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
3058 	uint8_t i, index;
3059 	bool invalid_peer_found = false;
3060 	struct wma_txrx_node *iface;
3061 
3062 	if (!wma)
3063 		return -EINVAL;
3064 
3065 	iface = &wma->interfaces[vdev_id];
3066 	rx_inv_msg = qdf_mem_malloc(sizeof(struct ol_rx_inv_peer_params));
3067 	if (!rx_inv_msg)
3068 		return -ENOMEM;
3069 
3070 	index = iface->invalid_peer_idx;
3071 	rx_inv_msg->vdev_id = vdev_id;
3072 	qdf_mem_copy(rx_inv_msg->ra, wh_l->i_addr1, QDF_MAC_ADDR_SIZE);
3073 	qdf_mem_copy(rx_inv_msg->ta, wh_l->i_addr2, QDF_MAC_ADDR_SIZE);
3074 
3075 
3076 	for (i = 0; i < INVALID_PEER_MAX_NUM; i++) {
3077 		if (qdf_mem_cmp
3078 			      (iface->invalid_peers[i].rx_macaddr,
3079 			      rx_inv_msg->ta,
3080 			      QDF_MAC_ADDR_SIZE) == 0) {
3081 			invalid_peer_found = true;
3082 			break;
3083 		}
3084 	}
3085 
3086 	if (!invalid_peer_found) {
3087 		qdf_mem_copy(iface->invalid_peers[index].rx_macaddr,
3088 			     rx_inv_msg->ta,
3089 			    QDF_MAC_ADDR_SIZE);
3090 
3091 		/* reset count if reached max */
3092 		iface->invalid_peer_idx =
3093 			(index + 1) % INVALID_PEER_MAX_NUM;
3094 
3095 		/* send deauth */
3096 		wma_debug("vdev_id: %d RA: "QDF_MAC_ADDR_FMT" TA: "QDF_MAC_ADDR_FMT,
3097 			  vdev_id, QDF_MAC_ADDR_REF(rx_inv_msg->ra),
3098 			  QDF_MAC_ADDR_REF(rx_inv_msg->ta));
3099 
3100 		wma_send_msg(wma,
3101 			     SIR_LIM_RX_INVALID_PEER,
3102 			     (void *)rx_inv_msg, 0);
3103 	} else {
3104 		wma_debug_rl("Ignore invalid peer indication as received more than once "
3105 			QDF_MAC_ADDR_FMT,
3106 			QDF_MAC_ADDR_REF(rx_inv_msg->ta));
3107 		qdf_mem_free(rx_inv_msg);
3108 	}
3109 
3110 	return 0;
3111 }
3112 
3113 static bool
3114 wma_drop_delba(tp_wma_handle wma, uint8_t vdev_id,
3115 	       enum cdp_delba_rcode cdp_reason_code)
3116 {
3117 	struct wlan_objmgr_vdev *vdev;
3118 	qdf_time_t last_ts, ts = qdf_mc_timer_get_system_time();
3119 	bool drop = false;
3120 
3121 	vdev = wlan_objmgr_get_vdev_by_id_from_psoc(wma->psoc, vdev_id,
3122 						    WLAN_MLME_CM_ID);
3123 	if (!vdev) {
3124 		wma_err("vdev is NULL");
3125 		return drop;
3126 	}
3127 	if (!wlan_mlme_is_ba_2k_jump_iot_ap(vdev))
3128 		goto done;
3129 
3130 	last_ts = wlan_mlme_get_last_delba_sent_time(vdev);
3131 	if ((last_ts && cdp_reason_code == CDP_DELBA_2K_JUMP) &&
3132 	    (ts - last_ts) < CDP_DELBA_INTERVAL_MS) {
3133 		wma_debug("Drop DELBA, last sent ts: %lu current ts: %lu",
3134 			  last_ts, ts);
3135 		drop = true;
3136 	}
3137 
3138 	wlan_mlme_set_last_delba_sent_time(vdev, ts);
3139 
3140 done:
3141 	wlan_objmgr_vdev_release_ref(vdev, WLAN_MLME_CM_ID);
3142 
3143 	return drop;
3144 }
3145 
3146 int wma_dp_send_delba_ind(uint8_t vdev_id, uint8_t *peer_macaddr,
3147 			  uint8_t tid, uint8_t reason_code,
3148 			  enum cdp_delba_rcode cdp_reason_code)
3149 {
3150 	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
3151 	struct lim_delba_req_info *req;
3152 
3153 	if (!wma || !peer_macaddr) {
3154 		wma_err("wma handle or mac addr is NULL");
3155 		return -EINVAL;
3156 	}
3157 
3158 	if (wma_drop_delba(wma, vdev_id, cdp_reason_code))
3159 		return 0;
3160 
3161 	req = qdf_mem_malloc(sizeof(*req));
3162 	if (!req)
3163 		return -ENOMEM;
3164 	req->vdev_id = vdev_id;
3165 	qdf_mem_copy(req->peer_macaddr, peer_macaddr, QDF_MAC_ADDR_SIZE);
3166 	req->tid = tid;
3167 	req->reason_code = reason_code;
3168 	wma_debug("req delba_ind vdev %d "QDF_MAC_ADDR_FMT" tid %d reason %d",
3169 		 vdev_id, QDF_MAC_ADDR_REF(peer_macaddr), tid, reason_code);
3170 	wma_send_msg_high_priority(wma, SIR_HAL_REQ_SEND_DELBA_REQ_IND,
3171 				   (void *)req, 0);
3172 
3173 	return 0;
3174 }
3175 
3176 bool wma_is_roam_in_progress(uint32_t vdev_id)
3177 {
3178 	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
3179 	enum QDF_OPMODE opmode;
3180 
3181 	if (!wma_is_vdev_valid(vdev_id))
3182 		return false;
3183 
3184 	if (!wma || !wma->interfaces[vdev_id].vdev)
3185 		return false;
3186 
3187 	opmode = wlan_vdev_mlme_get_opmode(wma->interfaces[vdev_id].vdev);
3188 	if (opmode != QDF_STA_MODE && opmode != QDF_P2P_CLIENT_MODE)
3189 		return false;
3190 
3191 	return wlan_cm_is_vdev_roam_started(wma->interfaces[vdev_id].vdev);
3192 }
3193