1  /*
2   * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3   * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4   *
5   * Permission to use, copy, modify, and/or distribute this software for
6   * any purpose with or without fee is hereby granted, provided that the
7   * above copyright notice and this permission notice appear in all
8   * copies.
9   *
10   * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11   * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12   * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13   * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14   * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15   * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16   * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17   * PERFORMANCE OF THIS SOFTWARE.
18   */
19  
20  /**
21   *  DOC:    wma_data.c
22   *  This file contains tx/rx and data path related functions.
23   */
24  
25  /* Header files */
26  
27  #include "wma.h"
28  #include "enet.h"
29  #include "wma_api.h"
30  #include "cds_api.h"
31  #include "wmi_unified_api.h"
32  #include "wlan_qct_sys.h"
33  #include "wni_api.h"
34  #include "ani_global.h"
35  #include "wmi_unified.h"
36  #include "wni_cfg.h"
37  #include <cdp_txrx_tx_throttle.h>
38  #if defined(CONFIG_HL_SUPPORT)
39  #include "wlan_tgt_def_config_hl.h"
40  #else
41  #include "wlan_tgt_def_config.h"
42  #endif
43  #include "qdf_nbuf.h"
44  #include "qdf_types.h"
45  #include "qdf_mem.h"
46  #include "qdf_util.h"
47  
48  #include "wma_types.h"
49  #include "lim_api.h"
50  #include "lim_session_utils.h"
51  
52  #include "cds_utils.h"
53  
54  #if !defined(REMOVE_PKT_LOG)
55  #include "pktlog_ac.h"
56  #endif /* REMOVE_PKT_LOG */
57  
58  #include "dbglog_host.h"
59  #include "csr_api.h"
60  #include "ol_fw.h"
61  
62  #include "wma_internal.h"
63  #include "cdp_txrx_flow_ctrl_legacy.h"
64  #include "cdp_txrx_cmn.h"
65  #include "cdp_txrx_misc.h"
66  #include <cdp_txrx_peer_ops.h>
67  #include <cdp_txrx_cfg.h>
68  #include "cdp_txrx_stats.h"
69  #include <cdp_txrx_misc.h>
70  #include "wlan_mgmt_txrx_utils_api.h"
71  #include "wlan_objmgr_psoc_obj.h"
72  #include "wlan_objmgr_pdev_obj.h"
73  #include "wlan_objmgr_vdev_obj.h"
74  #include "wlan_objmgr_peer_obj.h"
75  #include <cdp_txrx_handle.h>
76  #include "cfg_ucfg_api.h"
77  #include "wlan_policy_mgr_ucfg.h"
78  #include <wlan_pmo_ucfg_api.h>
79  #include "wlan_lmac_if_api.h"
80  #include <wlan_cp_stats_mc_ucfg_api.h>
81  #include <wlan_crypto_global_api.h>
82  #include <wlan_mlme_main.h>
83  #include <wlan_cm_api.h>
84  #include "wlan_pkt_capture_ucfg_api.h"
85  #include "wma_eht.h"
86  #include "wlan_mlo_mgr_sta.h"
87  #include "wlan_fw_offload_main.h"
88  #include "target_if_fwol.h"
89  
90  struct wma_search_rate {
91  	int32_t rate;
92  	uint8_t flag;
93  };
94  
95  #define WMA_MAX_OFDM_CCK_RATE_TBL_SIZE 12
96  /* In ofdm_cck_rate_tbl->flag, if bit 7 is 1 it's CCK, otherwise it ofdm.
97   * Lower bit carries the ofdm/cck index for encoding the rate
98   */
99  static struct wma_search_rate ofdm_cck_rate_tbl[WMA_MAX_OFDM_CCK_RATE_TBL_SIZE] = {
100  	{540, 4},               /* 4: OFDM 54 Mbps */
101  	{480, 0},               /* 0: OFDM 48 Mbps */
102  	{360, 5},               /* 5: OFDM 36 Mbps */
103  	{240, 1},               /* 1: OFDM 24 Mbps */
104  	{180, 6},               /* 6: OFDM 18 Mbps */
105  	{120, 2},               /* 2: OFDM 12 Mbps */
106  	{110, (1 << 7)},        /* 0: CCK 11 Mbps Long */
107  	{90, 7},                /* 7: OFDM 9 Mbps  */
108  	{60, 3},                /* 3: OFDM 6 Mbps  */
109  	{55, ((1 << 7) | 1)},   /* 1: CCK 5.5 Mbps Long */
110  	{20, ((1 << 7) | 2)},   /* 2: CCK 2 Mbps Long   */
111  	{10, ((1 << 7) | 3)} /* 3: CCK 1 Mbps Long   */
112  };
113  
114  #define WMA_MAX_VHT20_RATE_TBL_SIZE 9
115  /* In vht20_400ns_rate_tbl flag carries the mcs index for encoding the rate */
116  static struct wma_search_rate vht20_400ns_rate_tbl[WMA_MAX_VHT20_RATE_TBL_SIZE] = {
117  	{867, 8},               /* MCS8 1SS short GI */
118  	{722, 7},               /* MCS7 1SS short GI */
119  	{650, 6},               /* MCS6 1SS short GI */
120  	{578, 5},               /* MCS5 1SS short GI */
121  	{433, 4},               /* MCS4 1SS short GI */
122  	{289, 3},               /* MCS3 1SS short GI */
123  	{217, 2},               /* MCS2 1SS short GI */
124  	{144, 1},               /* MCS1 1SS short GI */
125  	{72, 0} /* MCS0 1SS short GI */
126  };
127  
128  /* In vht20_800ns_rate_tbl flag carries the mcs index for encoding the rate */
129  static struct wma_search_rate vht20_800ns_rate_tbl[WMA_MAX_VHT20_RATE_TBL_SIZE] = {
130  	{780, 8},               /* MCS8 1SS long GI */
131  	{650, 7},               /* MCS7 1SS long GI */
132  	{585, 6},               /* MCS6 1SS long GI */
133  	{520, 5},               /* MCS5 1SS long GI */
134  	{390, 4},               /* MCS4 1SS long GI */
135  	{260, 3},               /* MCS3 1SS long GI */
136  	{195, 2},               /* MCS2 1SS long GI */
137  	{130, 1},               /* MCS1 1SS long GI */
138  	{65, 0} /* MCS0 1SS long GI */
139  };
140  
141  #define WMA_MAX_VHT40_RATE_TBL_SIZE 10
142  /* In vht40_400ns_rate_tbl flag carries the mcs index for encoding the rate */
143  static struct wma_search_rate vht40_400ns_rate_tbl[WMA_MAX_VHT40_RATE_TBL_SIZE] = {
144  	{2000, 9},              /* MCS9 1SS short GI */
145  	{1800, 8},              /* MCS8 1SS short GI */
146  	{1500, 7},              /* MCS7 1SS short GI */
147  	{1350, 6},              /* MCS6 1SS short GI */
148  	{1200, 5},              /* MCS5 1SS short GI */
149  	{900, 4},               /* MCS4 1SS short GI */
150  	{600, 3},               /* MCS3 1SS short GI */
151  	{450, 2},               /* MCS2 1SS short GI */
152  	{300, 1},               /* MCS1 1SS short GI */
153  	{150, 0},               /* MCS0 1SS short GI */
154  };
155  
156  static struct wma_search_rate vht40_800ns_rate_tbl[WMA_MAX_VHT40_RATE_TBL_SIZE] = {
157  	{1800, 9},              /* MCS9 1SS long GI */
158  	{1620, 8},              /* MCS8 1SS long GI */
159  	{1350, 7},              /* MCS7 1SS long GI */
160  	{1215, 6},              /* MCS6 1SS long GI */
161  	{1080, 5},              /* MCS5 1SS long GI */
162  	{810, 4},               /* MCS4 1SS long GI */
163  	{540, 3},               /* MCS3 1SS long GI */
164  	{405, 2},               /* MCS2 1SS long GI */
165  	{270, 1},               /* MCS1 1SS long GI */
166  	{135, 0} /* MCS0 1SS long GI */
167  };
168  
169  #define WMA_MAX_VHT80_RATE_TBL_SIZE 10
170  static struct wma_search_rate vht80_400ns_rate_tbl[WMA_MAX_VHT80_RATE_TBL_SIZE] = {
171  	{4333, 9},              /* MCS9 1SS short GI */
172  	{3900, 8},              /* MCS8 1SS short GI */
173  	{3250, 7},              /* MCS7 1SS short GI */
174  	{2925, 6},              /* MCS6 1SS short GI */
175  	{2600, 5},              /* MCS5 1SS short GI */
176  	{1950, 4},              /* MCS4 1SS short GI */
177  	{1300, 3},              /* MCS3 1SS short GI */
178  	{975, 2},               /* MCS2 1SS short GI */
179  	{650, 1},               /* MCS1 1SS short GI */
180  	{325, 0} /* MCS0 1SS short GI */
181  };
182  
183  static struct wma_search_rate vht80_800ns_rate_tbl[WMA_MAX_VHT80_RATE_TBL_SIZE] = {
184  	{3900, 9},              /* MCS9 1SS long GI */
185  	{3510, 8},              /* MCS8 1SS long GI */
186  	{2925, 7},              /* MCS7 1SS long GI */
187  	{2633, 6},              /* MCS6 1SS long GI */
188  	{2340, 5},              /* MCS5 1SS long GI */
189  	{1755, 4},              /* MCS4 1SS long GI */
190  	{1170, 3},              /* MCS3 1SS long GI */
191  	{878, 2},               /* MCS2 1SS long GI */
192  	{585, 1},               /* MCS1 1SS long GI */
193  	{293, 0} /* MCS0 1SS long GI */
194  };
195  
196  #define WMA_MAX_HT20_RATE_TBL_SIZE 8
197  static struct wma_search_rate ht20_400ns_rate_tbl[WMA_MAX_HT20_RATE_TBL_SIZE] = {
198  	{722, 7},               /* MCS7 1SS short GI */
199  	{650, 6},               /* MCS6 1SS short GI */
200  	{578, 5},               /* MCS5 1SS short GI */
201  	{433, 4},               /* MCS4 1SS short GI */
202  	{289, 3},               /* MCS3 1SS short GI */
203  	{217, 2},               /* MCS2 1SS short GI */
204  	{144, 1},               /* MCS1 1SS short GI */
205  	{72, 0} /* MCS0 1SS short GI */
206  };
207  
208  static struct wma_search_rate ht20_800ns_rate_tbl[WMA_MAX_HT20_RATE_TBL_SIZE] = {
209  	{650, 7},               /* MCS7 1SS long GI */
210  	{585, 6},               /* MCS6 1SS long GI */
211  	{520, 5},               /* MCS5 1SS long GI */
212  	{390, 4},               /* MCS4 1SS long GI */
213  	{260, 3},               /* MCS3 1SS long GI */
214  	{195, 2},               /* MCS2 1SS long GI */
215  	{130, 1},               /* MCS1 1SS long GI */
216  	{65, 0} /* MCS0 1SS long GI */
217  };
218  
219  #define WMA_MAX_HT40_RATE_TBL_SIZE 8
220  static struct wma_search_rate ht40_400ns_rate_tbl[WMA_MAX_HT40_RATE_TBL_SIZE] = {
221  	{1500, 7},              /* MCS7 1SS short GI */
222  	{1350, 6},              /* MCS6 1SS short GI */
223  	{1200, 5},              /* MCS5 1SS short GI */
224  	{900, 4},               /* MCS4 1SS short GI */
225  	{600, 3},               /* MCS3 1SS short GI */
226  	{450, 2},               /* MCS2 1SS short GI */
227  	{300, 1},               /* MCS1 1SS short GI */
228  	{150, 0} /* MCS0 1SS short GI */
229  };
230  
231  static struct wma_search_rate ht40_800ns_rate_tbl[WMA_MAX_HT40_RATE_TBL_SIZE] = {
232  	{1350, 7},              /* MCS7 1SS long GI */
233  	{1215, 6},              /* MCS6 1SS long GI */
234  	{1080, 5},              /* MCS5 1SS long GI */
235  	{810, 4},               /* MCS4 1SS long GI */
236  	{540, 3},               /* MCS3 1SS long GI */
237  	{405, 2},               /* MCS2 1SS long GI */
238  	{270, 1},               /* MCS1 1SS long GI */
239  	{135, 0} /* MCS0 1SS long GI */
240  };
241  
242  /**
243   * wma_bin_search_rate() - binary search function to find rate
244   * @tbl: rate table
245   * @tbl_size: table size
246   * @mbpsx10_rate: return mbps rate
247   * @ret_flag: return flag
248   *
249   * Return: none
250   */
wma_bin_search_rate(struct wma_search_rate * tbl,int32_t tbl_size,int32_t * mbpsx10_rate,uint8_t * ret_flag)251  static void wma_bin_search_rate(struct wma_search_rate *tbl, int32_t tbl_size,
252  				int32_t *mbpsx10_rate, uint8_t *ret_flag)
253  {
254  	int32_t upper, lower, mid;
255  
256  	/* the table is descenting. index holds the largest value and the
257  	 * bottom index holds the smallest value
258  	 */
259  
260  	upper = 0;              /* index 0 */
261  	lower = tbl_size - 1;   /* last index */
262  
263  	if (*mbpsx10_rate >= tbl[upper].rate) {
264  		/* use the largest rate */
265  		*mbpsx10_rate = tbl[upper].rate;
266  		*ret_flag = tbl[upper].flag;
267  		return;
268  	} else if (*mbpsx10_rate <= tbl[lower].rate) {
269  		/* use the smallest rate */
270  		*mbpsx10_rate = tbl[lower].rate;
271  		*ret_flag = tbl[lower].flag;
272  		return;
273  	}
274  	/* now we do binery search to get the floor value */
275  	while (lower - upper > 1) {
276  		mid = (upper + lower) >> 1;
277  		if (*mbpsx10_rate == tbl[mid].rate) {
278  			/* found the exact match */
279  			*mbpsx10_rate = tbl[mid].rate;
280  			*ret_flag = tbl[mid].flag;
281  			return;
282  		}
283  		/* not found. if mid's rate is larger than input move
284  		 * upper to mid. If mid's rate is larger than input
285  		 * move lower to mid.
286  		 */
287  		if (*mbpsx10_rate > tbl[mid].rate)
288  			lower = mid;
289  		else
290  			upper = mid;
291  	}
292  	/* after the bin search the index is the ceiling of rate */
293  	*mbpsx10_rate = tbl[upper].rate;
294  	*ret_flag = tbl[upper].flag;
295  	return;
296  }
297  
298  /**
299   * wma_fill_ofdm_cck_mcast_rate() - fill ofdm cck mcast rate
300   * @mbpsx10_rate: mbps rates
301   * @nss: nss
302   * @rate: rate
303   *
304   * Return: QDF status
305   */
wma_fill_ofdm_cck_mcast_rate(int32_t mbpsx10_rate,uint8_t nss,uint8_t * rate)306  static QDF_STATUS wma_fill_ofdm_cck_mcast_rate(int32_t mbpsx10_rate,
307  					       uint8_t nss, uint8_t *rate)
308  {
309  	uint8_t idx = 0;
310  
311  	wma_bin_search_rate(ofdm_cck_rate_tbl, WMA_MAX_OFDM_CCK_RATE_TBL_SIZE,
312  			    &mbpsx10_rate, &idx);
313  
314  	/* if bit 7 is set it uses CCK */
315  	if (idx & 0x80)
316  		*rate |= (1 << 6) | (idx & 0xF); /* set bit 6 to 1 for CCK */
317  	else
318  		*rate |= (idx & 0xF);
319  	return QDF_STATUS_SUCCESS;
320  }
321  
322  /**
323   * wma_set_ht_vht_mcast_rate() - set ht/vht mcast rate
324   * @shortgi: short guard interval
325   * @mbpsx10_rate: mbps rates
326   * @sgi_idx: shortgi index
327   * @sgi_rate: shortgi rate
328   * @lgi_idx: longgi index
329   * @lgi_rate: longgi rate
330   * @premable: preamble
331   * @rate: rate
332   * @streaming_rate: streaming rate
333   *
334   * Return: none
335   */
wma_set_ht_vht_mcast_rate(uint32_t shortgi,int32_t mbpsx10_rate,uint8_t sgi_idx,int32_t sgi_rate,uint8_t lgi_idx,int32_t lgi_rate,uint8_t premable,uint8_t * rate,int32_t * streaming_rate)336  static void wma_set_ht_vht_mcast_rate(uint32_t shortgi, int32_t mbpsx10_rate,
337  				      uint8_t sgi_idx, int32_t sgi_rate,
338  				      uint8_t lgi_idx, int32_t lgi_rate,
339  				      uint8_t premable, uint8_t *rate,
340  				      int32_t *streaming_rate)
341  {
342  	if (shortgi == 0) {
343  		*rate |= (premable << 6) | (lgi_idx & 0xF);
344  		*streaming_rate = lgi_rate;
345  	} else {
346  		*rate |= (premable << 6) | (sgi_idx & 0xF);
347  		*streaming_rate = sgi_rate;
348  	}
349  }
350  
351  /**
352   * wma_fill_ht20_mcast_rate() - fill ht20 mcast rate
353   * @shortgi: short guard interval
354   * @mbpsx10_rate: mbps rates
355   * @nss: nss
356   * @rate: rate
357   * @streaming_rate: streaming rate
358   *
359   * Return: QDF status
360   */
wma_fill_ht20_mcast_rate(uint32_t shortgi,int32_t mbpsx10_rate,uint8_t nss,uint8_t * rate,int32_t * streaming_rate)361  static QDF_STATUS wma_fill_ht20_mcast_rate(uint32_t shortgi,
362  					   int32_t mbpsx10_rate, uint8_t nss,
363  					   uint8_t *rate,
364  					   int32_t *streaming_rate)
365  {
366  	uint8_t sgi_idx = 0, lgi_idx = 0;
367  	int32_t sgi_rate, lgi_rate;
368  
369  	if (nss == 1)
370  		mbpsx10_rate = mbpsx10_rate >> 1;
371  
372  	sgi_rate = mbpsx10_rate;
373  	lgi_rate = mbpsx10_rate;
374  	if (shortgi)
375  		wma_bin_search_rate(ht20_400ns_rate_tbl,
376  				    WMA_MAX_HT20_RATE_TBL_SIZE, &sgi_rate,
377  				    &sgi_idx);
378  	else
379  		wma_bin_search_rate(ht20_800ns_rate_tbl,
380  				    WMA_MAX_HT20_RATE_TBL_SIZE, &lgi_rate,
381  				    &lgi_idx);
382  
383  	wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
384  				  lgi_idx, lgi_rate, 2, rate, streaming_rate);
385  	if (nss == 1)
386  		*streaming_rate = *streaming_rate << 1;
387  	return QDF_STATUS_SUCCESS;
388  }
389  
390  /**
391   * wma_fill_ht40_mcast_rate() - fill ht40 mcast rate
392   * @shortgi: short guard interval
393   * @mbpsx10_rate: mbps rates
394   * @nss: nss
395   * @rate: rate
396   * @streaming_rate: streaming rate
397   *
398   * Return: QDF status
399   */
wma_fill_ht40_mcast_rate(uint32_t shortgi,int32_t mbpsx10_rate,uint8_t nss,uint8_t * rate,int32_t * streaming_rate)400  static QDF_STATUS wma_fill_ht40_mcast_rate(uint32_t shortgi,
401  					   int32_t mbpsx10_rate, uint8_t nss,
402  					   uint8_t *rate,
403  					   int32_t *streaming_rate)
404  {
405  	uint8_t sgi_idx = 0, lgi_idx = 0;
406  	int32_t sgi_rate, lgi_rate;
407  
408  	/* for 2x2 divide the rate by 2 */
409  	if (nss == 1)
410  		mbpsx10_rate = mbpsx10_rate >> 1;
411  
412  	sgi_rate = mbpsx10_rate;
413  	lgi_rate = mbpsx10_rate;
414  	if (shortgi)
415  		wma_bin_search_rate(ht40_400ns_rate_tbl,
416  				    WMA_MAX_HT40_RATE_TBL_SIZE, &sgi_rate,
417  				    &sgi_idx);
418  	else
419  		wma_bin_search_rate(ht40_800ns_rate_tbl,
420  				    WMA_MAX_HT40_RATE_TBL_SIZE, &lgi_rate,
421  				    &lgi_idx);
422  
423  	wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
424  				  lgi_idx, lgi_rate, 2, rate, streaming_rate);
425  
426  	return QDF_STATUS_SUCCESS;
427  }
428  
429  /**
430   * wma_fill_vht20_mcast_rate() - fill vht20 mcast rate
431   * @shortgi: short guard interval
432   * @mbpsx10_rate: mbps rates
433   * @nss: nss
434   * @rate: rate
435   * @streaming_rate: streaming rate
436   *
437   * Return: QDF status
438   */
wma_fill_vht20_mcast_rate(uint32_t shortgi,int32_t mbpsx10_rate,uint8_t nss,uint8_t * rate,int32_t * streaming_rate)439  static QDF_STATUS wma_fill_vht20_mcast_rate(uint32_t shortgi,
440  					    int32_t mbpsx10_rate, uint8_t nss,
441  					    uint8_t *rate,
442  					    int32_t *streaming_rate)
443  {
444  	uint8_t sgi_idx = 0, lgi_idx = 0;
445  	int32_t sgi_rate, lgi_rate;
446  
447  	/* for 2x2 divide the rate by 2 */
448  	if (nss == 1)
449  		mbpsx10_rate = mbpsx10_rate >> 1;
450  
451  	sgi_rate = mbpsx10_rate;
452  	lgi_rate = mbpsx10_rate;
453  	if (shortgi)
454  		wma_bin_search_rate(vht20_400ns_rate_tbl,
455  				    WMA_MAX_VHT20_RATE_TBL_SIZE, &sgi_rate,
456  				    &sgi_idx);
457  	else
458  		wma_bin_search_rate(vht20_800ns_rate_tbl,
459  				    WMA_MAX_VHT20_RATE_TBL_SIZE, &lgi_rate,
460  				    &lgi_idx);
461  
462  	wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
463  				  lgi_idx, lgi_rate, 3, rate, streaming_rate);
464  	if (nss == 1)
465  		*streaming_rate = *streaming_rate << 1;
466  	return QDF_STATUS_SUCCESS;
467  }
468  
469  /**
470   * wma_fill_vht40_mcast_rate() - fill vht40 mcast rate
471   * @shortgi: short guard interval
472   * @mbpsx10_rate: mbps rates
473   * @nss: nss
474   * @rate: rate
475   * @streaming_rate: streaming rate
476   *
477   * Return: QDF status
478   */
wma_fill_vht40_mcast_rate(uint32_t shortgi,int32_t mbpsx10_rate,uint8_t nss,uint8_t * rate,int32_t * streaming_rate)479  static QDF_STATUS wma_fill_vht40_mcast_rate(uint32_t shortgi,
480  					    int32_t mbpsx10_rate, uint8_t nss,
481  					    uint8_t *rate,
482  					    int32_t *streaming_rate)
483  {
484  	uint8_t sgi_idx = 0, lgi_idx = 0;
485  	int32_t sgi_rate, lgi_rate;
486  
487  	/* for 2x2 divide the rate by 2 */
488  	if (nss == 1)
489  		mbpsx10_rate = mbpsx10_rate >> 1;
490  
491  	sgi_rate = mbpsx10_rate;
492  	lgi_rate = mbpsx10_rate;
493  	if (shortgi)
494  		wma_bin_search_rate(vht40_400ns_rate_tbl,
495  				    WMA_MAX_VHT40_RATE_TBL_SIZE, &sgi_rate,
496  				    &sgi_idx);
497  	else
498  		wma_bin_search_rate(vht40_800ns_rate_tbl,
499  				    WMA_MAX_VHT40_RATE_TBL_SIZE, &lgi_rate,
500  				    &lgi_idx);
501  
502  	wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate,
503  				  sgi_idx, sgi_rate, lgi_idx, lgi_rate,
504  				  3, rate, streaming_rate);
505  	if (nss == 1)
506  		*streaming_rate = *streaming_rate << 1;
507  	return QDF_STATUS_SUCCESS;
508  }
509  
510  /**
511   * wma_fill_vht80_mcast_rate() - fill vht80 mcast rate
512   * @shortgi: short guard interval
513   * @mbpsx10_rate: mbps rates
514   * @nss: nss
515   * @rate: rate
516   * @streaming_rate: streaming rate
517   *
518   * Return: QDF status
519   */
wma_fill_vht80_mcast_rate(uint32_t shortgi,int32_t mbpsx10_rate,uint8_t nss,uint8_t * rate,int32_t * streaming_rate)520  static QDF_STATUS wma_fill_vht80_mcast_rate(uint32_t shortgi,
521  					    int32_t mbpsx10_rate, uint8_t nss,
522  					    uint8_t *rate,
523  					    int32_t *streaming_rate)
524  {
525  	uint8_t sgi_idx = 0, lgi_idx = 0;
526  	int32_t sgi_rate, lgi_rate;
527  
528  	/* for 2x2 divide the rate by 2 */
529  	if (nss == 1)
530  		mbpsx10_rate = mbpsx10_rate >> 1;
531  
532  	sgi_rate = mbpsx10_rate;
533  	lgi_rate = mbpsx10_rate;
534  	if (shortgi)
535  		wma_bin_search_rate(vht80_400ns_rate_tbl,
536  				    WMA_MAX_VHT80_RATE_TBL_SIZE, &sgi_rate,
537  				    &sgi_idx);
538  	else
539  		wma_bin_search_rate(vht80_800ns_rate_tbl,
540  				    WMA_MAX_VHT80_RATE_TBL_SIZE, &lgi_rate,
541  				    &lgi_idx);
542  
543  	wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
544  				  lgi_idx, lgi_rate, 3, rate, streaming_rate);
545  	if (nss == 1)
546  		*streaming_rate = *streaming_rate << 1;
547  	return QDF_STATUS_SUCCESS;
548  }
549  
550  /**
551   * wma_fill_ht_mcast_rate() - fill ht mcast rate
552   * @shortgi: short guard interval
553   * @chwidth: channel width
554   * @chanmode: channel mode
555   * @mhz: frequency
556   * @mbpsx10_rate: mbps rates
557   * @nss: nss
558   * @rate: rate
559   * @streaming_rate: streaming rate
560   *
561   * Return: QDF status
562   */
wma_fill_ht_mcast_rate(uint32_t shortgi,uint32_t chwidth,int32_t mbpsx10_rate,uint8_t nss,uint8_t * rate,int32_t * streaming_rate)563  static QDF_STATUS wma_fill_ht_mcast_rate(uint32_t shortgi,
564  					 uint32_t chwidth, int32_t mbpsx10_rate,
565  					 uint8_t nss, uint8_t *rate,
566  					 int32_t *streaming_rate)
567  {
568  	int32_t ret = 0;
569  
570  	*streaming_rate = 0;
571  	if (chwidth == 0)
572  		ret = wma_fill_ht20_mcast_rate(shortgi, mbpsx10_rate,
573  					       nss, rate, streaming_rate);
574  	else if (chwidth == 1)
575  		ret = wma_fill_ht40_mcast_rate(shortgi, mbpsx10_rate,
576  					       nss, rate, streaming_rate);
577  	else
578  		wma_err("Error, Invalid chwidth enum %d", chwidth);
579  	return (*streaming_rate != 0) ? QDF_STATUS_SUCCESS : QDF_STATUS_E_INVAL;
580  }
581  
582  /**
583   * wma_fill_vht_mcast_rate() - fill vht mcast rate
584   * @shortgi: short guard interval
585   * @chwidth: channel width
586   * @chanmode: channel mode
587   * @mhz: frequency
588   * @mbpsx10_rate: mbps rates
589   * @nss: nss
590   * @rate: rate
591   * @streaming_rate: streaming rate
592   *
593   * Return: QDF status
594   */
wma_fill_vht_mcast_rate(uint32_t shortgi,uint32_t chwidth,int32_t mbpsx10_rate,uint8_t nss,uint8_t * rate,int32_t * streaming_rate)595  static QDF_STATUS wma_fill_vht_mcast_rate(uint32_t shortgi,
596  					  uint32_t chwidth,
597  					  int32_t mbpsx10_rate, uint8_t nss,
598  					  uint8_t *rate,
599  					  int32_t *streaming_rate)
600  {
601  	int32_t ret = 0;
602  
603  	*streaming_rate = 0;
604  	if (chwidth == 0)
605  		ret = wma_fill_vht20_mcast_rate(shortgi, mbpsx10_rate, nss,
606  						rate, streaming_rate);
607  	else if (chwidth == 1)
608  		ret = wma_fill_vht40_mcast_rate(shortgi, mbpsx10_rate, nss,
609  						rate, streaming_rate);
610  	else if (chwidth == 2)
611  		ret = wma_fill_vht80_mcast_rate(shortgi, mbpsx10_rate, nss,
612  						rate, streaming_rate);
613  	else
614  		wma_err("chwidth enum %d not supported", chwidth);
615  	return (*streaming_rate != 0) ? QDF_STATUS_SUCCESS : QDF_STATUS_E_INVAL;
616  }
617  
618  #define WMA_MCAST_1X1_CUT_OFF_RATE 2000
619  /**
620   * wma_encode_mc_rate() - fill mc rates
621   * @shortgi: short guard interval
622   * @chwidth: channel width
623   * @chanmode: channel mode
624   * @mhz: frequency
625   * @mbpsx10_rate: mbps rates
626   * @nss: nss
627   * @rate: rate
628   *
629   * Return: QDF status
630   */
wma_encode_mc_rate(uint32_t shortgi,uint32_t chwidth,A_UINT32 mhz,int32_t mbpsx10_rate,uint8_t nss,uint8_t * rate)631  static QDF_STATUS wma_encode_mc_rate(uint32_t shortgi, uint32_t chwidth,
632  			     A_UINT32 mhz, int32_t mbpsx10_rate, uint8_t nss,
633  			     uint8_t *rate)
634  {
635  	int32_t ret = 0;
636  
637  	/* nss input value: 0 - 1x1; 1 - 2x2; 2 - 3x3
638  	 * the phymode selection is based on following assumption:
639  	 * (1) if the app specifically requested 1x1 or 2x2 we hornor it
640  	 * (2) if mbpsx10_rate <= 540: always use BG
641  	 * (3) 540 < mbpsx10_rate <= 2000: use 1x1 HT/VHT
642  	 * (4) 2000 < mbpsx10_rate: use 2x2 HT/VHT
643  	 */
644  	wma_debug("Input: nss = %d, mbpsx10 = 0x%x, chwidth = %d, shortgi = %d",
645  		  nss, mbpsx10_rate, chwidth, shortgi);
646  	if ((mbpsx10_rate & 0x40000000) && nss > 0) {
647  		/* bit 30 indicates user inputted nss,
648  		 * bit 28 and 29 used to encode nss
649  		 */
650  		uint8_t user_nss = (mbpsx10_rate & 0x30000000) >> 28;
651  
652  		nss = (user_nss < nss) ? user_nss : nss;
653  		/* zero out bits 19 - 21 to recover the actual rate */
654  		mbpsx10_rate &= ~0x70000000;
655  	} else if (mbpsx10_rate <= WMA_MCAST_1X1_CUT_OFF_RATE) {
656  		/* if the input rate is less or equal to the
657  		 * 1x1 cutoff rate we use 1x1 only
658  		 */
659  		nss = 0;
660  	}
661  	/* encode NSS bits (bit 4, bit 5) */
662  	*rate = (nss & 0x3) << 4;
663  	/* if mcast input rate exceeds the ofdm/cck max rate 54mpbs
664  	 * we try to choose best ht/vht mcs rate
665  	 */
666  	if (540 < mbpsx10_rate) {
667  		/* cannot use ofdm/cck, choose closest ht/vht mcs rate */
668  		uint8_t rate_ht = *rate;
669  		uint8_t rate_vht = *rate;
670  		int32_t stream_rate_ht = 0;
671  		int32_t stream_rate_vht = 0;
672  		int32_t stream_rate = 0;
673  
674  		ret = wma_fill_ht_mcast_rate(shortgi, chwidth, mbpsx10_rate,
675  					     nss, &rate_ht,
676  					     &stream_rate_ht);
677  		if (ret != QDF_STATUS_SUCCESS)
678  			stream_rate_ht = 0;
679  		if (mhz < WMA_2_4_GHZ_MAX_FREQ) {
680  			/* not in 5 GHZ frequency */
681  			*rate = rate_ht;
682  			stream_rate = stream_rate_ht;
683  			goto ht_vht_done;
684  		}
685  		/* capable doing 11AC mcast so that search vht tables */
686  		ret = wma_fill_vht_mcast_rate(shortgi, chwidth, mbpsx10_rate,
687  					      nss, &rate_vht,
688  					      &stream_rate_vht);
689  		if (ret != QDF_STATUS_SUCCESS) {
690  			if (stream_rate_ht != 0)
691  				ret = QDF_STATUS_SUCCESS;
692  			*rate = rate_ht;
693  			stream_rate = stream_rate_ht;
694  			goto ht_vht_done;
695  		}
696  		if (stream_rate_ht == 0) {
697  			/* only vht rate available */
698  			*rate = rate_vht;
699  			stream_rate = stream_rate_vht;
700  		} else {
701  			/* set ht as default first */
702  			*rate = rate_ht;
703  			stream_rate = stream_rate_ht;
704  			if (stream_rate < mbpsx10_rate) {
705  				if (mbpsx10_rate <= stream_rate_vht ||
706  				    stream_rate < stream_rate_vht) {
707  					*rate = rate_vht;
708  					stream_rate = stream_rate_vht;
709  				}
710  			} else {
711  				if (stream_rate_vht >= mbpsx10_rate &&
712  				    stream_rate_vht < stream_rate) {
713  					*rate = rate_vht;
714  					stream_rate = stream_rate_vht;
715  				}
716  			}
717  		}
718  ht_vht_done:
719  		wma_debug("NSS = %d, freq = %d", nss, mhz);
720  		wma_debug("input_rate = %d, chwidth = %d rate = 0x%x, streaming_rate = %d",
721  			 mbpsx10_rate, chwidth, *rate, stream_rate);
722  	} else {
723  		if (mbpsx10_rate > 0)
724  			ret = wma_fill_ofdm_cck_mcast_rate(mbpsx10_rate,
725  							   nss, rate);
726  		else
727  			*rate = 0xFF;
728  
729  		wma_debug("NSS = %d, input_rate = %d, rate = 0x%x",
730  			  nss, mbpsx10_rate, *rate);
731  	}
732  	return ret;
733  }
734  
735  /**
736   * wma_cp_stats_set_rate_flag() - set rate flags within cp_stats priv object
737   * @wma: wma handle
738   * @vdev_id: vdev id
739   *
740   * Return: none
741   */
wma_cp_stats_set_rate_flag(tp_wma_handle wma,uint8_t vdev_id)742  static void wma_cp_stats_set_rate_flag(tp_wma_handle wma, uint8_t vdev_id)
743  {
744  	struct wlan_objmgr_vdev *vdev;
745  	struct wlan_objmgr_psoc *psoc = wma->psoc;
746  	struct wma_txrx_node *iface = &wma->interfaces[vdev_id];
747  	uint32_t rate_flag;
748  	QDF_STATUS status;
749  
750  	vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc, vdev_id,
751  						    WLAN_LEGACY_WMA_ID);
752  	if (!vdev) {
753  		wma_err("vdev not found for id: %d", vdev_id);
754  		return;
755  	}
756  
757  	status = wma_get_vdev_rate_flag(iface->vdev, &rate_flag);
758  	if (QDF_IS_STATUS_ERROR(status)) {
759  		wma_err("vdev not found for id: %d", vdev_id);
760  		return;
761  	}
762  	ucfg_mc_cp_stats_set_rate_flags(vdev, rate_flag);
763  	wlan_objmgr_vdev_release_ref(vdev, WLAN_LEGACY_WMA_ID);
764  }
765  
766  #ifdef WLAN_FEATURE_11AX
767  /**
768   * wma_set_bss_rate_flags_he() - set rate flags based on BSS capability
769   * @rate_flags: rate_flags pointer
770   * @add_bss: add_bss params
771   *
772   * Return: QDF_STATUS
773   */
wma_set_bss_rate_flags_he(enum tx_rate_info * rate_flags,struct bss_params * add_bss)774  static QDF_STATUS wma_set_bss_rate_flags_he(enum tx_rate_info *rate_flags,
775  					    struct bss_params *add_bss)
776  {
777  	if (!add_bss->he_capable)
778  		return QDF_STATUS_E_NOSUPPORT;
779  
780  	*rate_flags |= wma_get_he_rate_flags(add_bss->ch_width);
781  
782  	wma_debug("he_capable %d rate_flags 0x%x", add_bss->he_capable,
783  		  *rate_flags);
784  	return QDF_STATUS_SUCCESS;
785  }
786  
wma_get_bss_he_capable(struct bss_params * add_bss)787  static bool wma_get_bss_he_capable(struct bss_params *add_bss)
788  {
789  	return add_bss->he_capable;
790  }
791  #else
wma_set_bss_rate_flags_he(enum tx_rate_info * rate_flags,struct bss_params * add_bss)792  static QDF_STATUS wma_set_bss_rate_flags_he(enum tx_rate_info *rate_flags,
793  					    struct bss_params *add_bss)
794  {
795  	return QDF_STATUS_E_NOSUPPORT;
796  }
797  
wma_get_bss_he_capable(struct bss_params * add_bss)798  static bool wma_get_bss_he_capable(struct bss_params *add_bss)
799  {
800  	return false;
801  }
802  #endif
803  
wma_get_vht_rate_flags(enum phy_ch_width ch_width)804  enum tx_rate_info wma_get_vht_rate_flags(enum phy_ch_width ch_width)
805  {
806  	enum tx_rate_info rate_flags = 0;
807  
808  	if (ch_width == CH_WIDTH_80P80MHZ)
809  		rate_flags |= TX_RATE_VHT160 | TX_RATE_VHT80 | TX_RATE_VHT40 |
810  				TX_RATE_VHT20;
811  	if (ch_width == CH_WIDTH_160MHZ)
812  		rate_flags |= TX_RATE_VHT160 | TX_RATE_VHT80 | TX_RATE_VHT40 |
813  				TX_RATE_VHT20;
814  	if (ch_width == CH_WIDTH_80MHZ)
815  		rate_flags |= TX_RATE_VHT80 | TX_RATE_VHT40 | TX_RATE_VHT20;
816  	else if (ch_width)
817  		rate_flags |= TX_RATE_VHT40 | TX_RATE_VHT20;
818  	else
819  		rate_flags |= TX_RATE_VHT20;
820  	return rate_flags;
821  }
822  
wma_get_ht_rate_flags(enum phy_ch_width ch_width)823  enum tx_rate_info wma_get_ht_rate_flags(enum phy_ch_width ch_width)
824  {
825  	enum tx_rate_info rate_flags = 0;
826  
827  	if (ch_width)
828  		rate_flags |= TX_RATE_HT40 | TX_RATE_HT20;
829  	else
830  		rate_flags |= TX_RATE_HT20;
831  
832  	return rate_flags;
833  }
834  
wma_get_he_rate_flags(enum phy_ch_width ch_width)835  enum tx_rate_info wma_get_he_rate_flags(enum phy_ch_width ch_width)
836  {
837  	enum tx_rate_info rate_flags = 0;
838  
839  	if (ch_width == CH_WIDTH_160MHZ ||
840  	    ch_width == CH_WIDTH_80P80MHZ)
841  		rate_flags |= TX_RATE_HE160 | TX_RATE_HE80 | TX_RATE_HE40 |
842  				TX_RATE_HE20;
843  	else if (ch_width == CH_WIDTH_80MHZ)
844  		rate_flags |= TX_RATE_HE80 | TX_RATE_HE40 | TX_RATE_HE20;
845  	else if (ch_width)
846  		rate_flags |= TX_RATE_HE40 | TX_RATE_HE20;
847  	else
848  		rate_flags |= TX_RATE_HE20;
849  
850  	return rate_flags;
851  }
852  
wma_set_bss_rate_flags(tp_wma_handle wma,uint8_t vdev_id,struct bss_params * add_bss)853  void wma_set_bss_rate_flags(tp_wma_handle wma, uint8_t vdev_id,
854  			    struct bss_params *add_bss)
855  {
856  	struct wma_txrx_node *iface = &wma->interfaces[vdev_id];
857  	struct vdev_mlme_obj *vdev_mlme;
858  	enum tx_rate_info *rate_flags;
859  	QDF_STATUS qdf_status;
860  
861  	vdev_mlme = wlan_vdev_mlme_get_cmpt_obj(iface->vdev);
862  	if (!vdev_mlme) {
863  		wma_err("Failed to get mlme obj for vdev_%d", vdev_id);
864  		return;
865  	}
866  	rate_flags = &vdev_mlme->mgmt.rate_info.rate_flags;
867  	*rate_flags = 0;
868  
869  	qdf_status = wma_set_bss_rate_flags_eht(rate_flags, add_bss);
870  	if (QDF_IS_STATUS_ERROR(qdf_status)) {
871  		if (QDF_STATUS_SUCCESS !=
872  			wma_set_bss_rate_flags_he(rate_flags, add_bss)) {
873  			if (add_bss->vhtCapable)
874  				*rate_flags = wma_get_vht_rate_flags(add_bss->ch_width);
875  			/* avoid to conflict with htCapable flag */
876  			else if (add_bss->htCapable)
877  				*rate_flags |= wma_get_ht_rate_flags(add_bss->ch_width);
878  		}
879  	}
880  
881  	if (add_bss->staContext.fShortGI20Mhz ||
882  	    add_bss->staContext.fShortGI40Mhz)
883  		*rate_flags |= TX_RATE_SGI;
884  
885  	if (!add_bss->htCapable && !add_bss->vhtCapable &&
886  	    !wma_get_bss_he_capable(add_bss) &&
887  	    !wma_get_bss_eht_capable(add_bss))
888  		*rate_flags = TX_RATE_LEGACY;
889  
890  	wma_debug("capable: vht %u, ht %u, rate_flags %x, ch_width %d",
891  		  add_bss->vhtCapable, add_bss->htCapable,
892  		  *rate_flags, add_bss->ch_width);
893  
894  	wma_cp_stats_set_rate_flag(wma, vdev_id);
895  }
896  
wma_set_vht_txbf_cfg(struct mac_context * mac,uint8_t vdev_id)897  void wma_set_vht_txbf_cfg(struct mac_context *mac, uint8_t vdev_id)
898  {
899  	wmi_vdev_txbf_en txbf_en = {0};
900  	QDF_STATUS status;
901  	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
902  
903  	if (!wma)
904  		return;
905  
906  	txbf_en.sutxbfee = mac->mlme_cfg->vht_caps.vht_cap_info.su_bformee;
907  	txbf_en.mutxbfee =
908  		mac->mlme_cfg->vht_caps.vht_cap_info.enable_mu_bformee;
909  	txbf_en.sutxbfer = mac->mlme_cfg->vht_caps.vht_cap_info.su_bformer;
910  
911  	status = wma_vdev_set_param(wma->wmi_handle, vdev_id,
912  				    wmi_vdev_param_txbf,
913  				    *((A_UINT8 *)&txbf_en));
914  	if (QDF_IS_STATUS_ERROR(status))
915  		wma_err("failed to set VHT TXBF(status = %d)", status);
916  }
917  
918  /**
919   * wmi_unified_send_txbf() - set txbf parameter to fw
920   * @wma: wma handle
921   * @params: txbf parameters
922   *
923   * Return: 0 for success or error code
924   */
wmi_unified_send_txbf(tp_wma_handle wma,tpAddStaParams params)925  int32_t wmi_unified_send_txbf(tp_wma_handle wma, tpAddStaParams params)
926  {
927  	wmi_vdev_txbf_en txbf_en = {0};
928  
929  	/* This is set when Other partner is Bformer
930  	 * and we are capable bformee(enabled both in ini and fw)
931  	 */
932  	txbf_en.sutxbfee = params->vhtTxBFCapable;
933  	txbf_en.mutxbfee = params->vhtTxMUBformeeCapable;
934  	txbf_en.sutxbfer = params->enable_su_tx_bformer;
935  
936  	/* When MU TxBfee is set, SU TxBfee must be set by default */
937  	if (txbf_en.mutxbfee)
938  		txbf_en.sutxbfee = txbf_en.mutxbfee;
939  
940  	wma_debug("txbf_en.sutxbfee %d txbf_en.mutxbfee %d, sutxbfer %d",
941  		 txbf_en.sutxbfee, txbf_en.mutxbfee, txbf_en.sutxbfer);
942  
943  	return wma_vdev_set_param(wma->wmi_handle,
944  						params->smesessionId,
945  						wmi_vdev_param_txbf,
946  						*((A_UINT8 *) &txbf_en));
947  }
948  
949  /**
950   * wma_data_tx_ack_work_handler() - process data tx ack
951   * @ack_work: work structure
952   *
953   * Return: none
954   */
wma_data_tx_ack_work_handler(void * ack_work)955  static void wma_data_tx_ack_work_handler(void *ack_work)
956  {
957  	struct wma_tx_ack_work_ctx *work;
958  	tp_wma_handle wma_handle;
959  	wma_tx_ota_comp_callback ack_cb;
960  
961  	work = (struct wma_tx_ack_work_ctx *)ack_work;
962  
963  	wma_handle = work->wma_handle;
964  	if (!wma_handle || cds_is_load_or_unload_in_progress()) {
965  		wma_err("Driver load/unload in progress");
966  		goto free_frame;
967  	}
968  
969  	wma_debug("Data Tx Ack Cb Status %d", work->status);
970  	ack_cb = wma_handle->umac_data_ota_ack_cb;
971  	if (!ack_cb) {
972  		wma_err("Data Tx Ack Cb is NULL");
973  		goto free_frame;
974  	}
975  
976  	ack_cb(wma_handle->mac_context, work->frame, work->status,
977  	       NULL);
978  	goto end;
979  
980  free_frame:
981  	if (work->frame)
982  		qdf_nbuf_free(work->frame);
983  
984  end:
985  	qdf_mem_free(work);
986  
987  	if (wma_handle) {
988  		wma_handle->umac_data_ota_ack_cb = NULL;
989  		wma_handle->last_umac_data_nbuf = NULL;
990  		wma_handle->ack_work_ctx = NULL;
991  	}
992  }
993  
994  /**
995   * wma_data_tx_ack_comp_hdlr() - handles tx data ack completion
996   * @context: context with which the handler is registered
997   * @netbuf: tx data nbuf
998   * @err: status of tx completion
999   *
1000   * This is the cb registered with TxRx for
1001   * Ack Complete
1002   *
1003   * Return: none
1004   */
1005  void
wma_data_tx_ack_comp_hdlr(void * wma_context,qdf_nbuf_t netbuf,int32_t status)1006  wma_data_tx_ack_comp_hdlr(void *wma_context, qdf_nbuf_t netbuf, int32_t status)
1007  {
1008  	tp_wma_handle wma_handle = (tp_wma_handle) wma_context;
1009  	struct wma_tx_ack_work_ctx *ack_work;
1010  	QDF_STATUS qdf_status;
1011  
1012  	if (wma_validate_handle(wma_handle))
1013  		return;
1014  
1015  	if (!netbuf) {
1016  		wma_debug("netbuf is NULL");
1017  		return;
1018  	}
1019  
1020  	/*
1021  	 * if netBuf does not match with pending nbuf then just free the
1022  	 * netbuf and do not call ack cb
1023  	 */
1024  	if (wma_handle->last_umac_data_nbuf != netbuf) {
1025  		wma_err("nbuf does not match but umac_data_ota_ack_cb is %s null",
1026  			wma_handle->umac_data_ota_ack_cb ? "not" : "");
1027  		goto free_nbuf;
1028  	}
1029  
1030  	if (!wma_handle->umac_data_ota_ack_cb) {
1031  		wma_err_rl("ota_ack cb not registered");
1032  		goto free_nbuf;
1033  	}
1034  
1035  	ack_work = qdf_mem_malloc(sizeof(struct wma_tx_ack_work_ctx));
1036  	if (ack_work) {
1037  		wma_handle->ack_work_ctx = ack_work;
1038  
1039  		ack_work->wma_handle = wma_handle;
1040  		ack_work->sub_type = 0;
1041  		ack_work->status = status;
1042  		ack_work->frame = netbuf;
1043  
1044  		/*
1045  		 * free of the netbuf will be done by the scheduled work so
1046  		 * just do unmap here
1047  		 */
1048  		qdf_nbuf_unmap_single(wma_handle->qdf_dev, netbuf,
1049  				      QDF_DMA_TO_DEVICE);
1050  
1051  		qdf_status = qdf_create_work(0, &ack_work->ack_cmp_work,
1052  					     wma_data_tx_ack_work_handler,
1053  					     ack_work);
1054  		if (QDF_IS_STATUS_ERROR(qdf_status)) {
1055  			qdf_nbuf_free(netbuf);
1056  			wma_err("Failed to create TX ack work");
1057  			return;
1058  		}
1059  
1060  		qdf_sched_work(0, &ack_work->ack_cmp_work);
1061  		return;
1062  	}
1063  
1064  free_nbuf:
1065  	/* unmap and freeing the tx buf as txrx is not taking care */
1066  	qdf_nbuf_unmap_single(wma_handle->qdf_dev, netbuf, QDF_DMA_TO_DEVICE);
1067  	qdf_nbuf_free(netbuf);
1068  }
1069  
1070  /**
1071   * wma_check_txrx_chainmask() - check txrx chainmask
1072   * @num_rf_chains: number of rf chains
1073   * @cmd_value: command value
1074   *
1075   * Return: QDF_STATUS_SUCCESS for success or error code
1076   */
wma_check_txrx_chainmask(int num_rf_chains,int cmd_value)1077  QDF_STATUS wma_check_txrx_chainmask(int num_rf_chains, int cmd_value)
1078  {
1079  	if ((cmd_value > WMA_MAX_RF_CHAINS(num_rf_chains)) ||
1080  	    (cmd_value < WMA_MIN_RF_CHAINS)) {
1081  		wma_err("Requested value %d over the range", cmd_value);
1082  		return QDF_STATUS_E_INVAL;
1083  	}
1084  	return QDF_STATUS_SUCCESS;
1085  }
1086  
1087  /**
1088   * wma_set_enable_disable_mcc_adaptive_scheduler() -enable/disable mcc scheduler
1089   * @mcc_adaptive_scheduler: enable/disable
1090   *
1091   * This function enable/disable mcc adaptive scheduler in fw.
1092   *
1093   * Return: QDF_STATUS_SUCCESS for success or error code
1094   */
wma_set_enable_disable_mcc_adaptive_scheduler(uint32_t mcc_adaptive_scheduler)1095  QDF_STATUS wma_set_enable_disable_mcc_adaptive_scheduler(uint32_t
1096  							 mcc_adaptive_scheduler)
1097  {
1098  	tp_wma_handle wma = NULL;
1099  	uint32_t pdev_id;
1100  
1101  	wma = cds_get_context(QDF_MODULE_ID_WMA);
1102  	if (!wma)
1103  		return QDF_STATUS_E_FAULT;
1104  
1105  	/*
1106  	 * Since there could be up to two instances of OCS in FW (one per MAC),
1107  	 * FW provides the option of enabling and disabling MAS on a per MAC
1108  	 * basis. But, Host does not have enable/disable option for individual
1109  	 * MACs. So, FW agreed for the Host to send down a 'pdev id' of 0.
1110  	 * When 'pdev id' of 0 is used, FW treats this as a SOC level command
1111  	 * and applies the same value to both MACs. Irrespective of the value
1112  	 * of 'WMI_SERVICE_DEPRECATED_REPLACE', the pdev id needs to be '0'
1113  	 * (SOC level) for WMI_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMDID
1114  	 */
1115  	pdev_id = WMI_PDEV_ID_SOC;
1116  
1117  	return wmi_unified_set_enable_disable_mcc_adaptive_scheduler_cmd(
1118  			wma->wmi_handle, mcc_adaptive_scheduler, pdev_id);
1119  }
1120  
1121  /**
1122   * wma_set_mcc_channel_time_latency() -set MCC channel time latency
1123   * @wma: wma handle
1124   * @mcc_channel: mcc channel
1125   * @mcc_channel_time_latency: MCC channel time latency.
1126   *
1127   * Currently used to set time latency for an MCC vdev/adapter using operating
1128   * channel of it and channel number. The info is provided run time using
1129   * iwpriv command: iwpriv <wlan0 | p2p0> setMccLatency <latency in ms>.
1130   *
1131   * Return: QDF status
1132   */
wma_set_mcc_channel_time_latency(tp_wma_handle wma,uint32_t mcc_channel,uint32_t mcc_channel_time_latency)1133  QDF_STATUS wma_set_mcc_channel_time_latency(tp_wma_handle wma,
1134  	uint32_t mcc_channel, uint32_t mcc_channel_time_latency)
1135  {
1136  	bool mcc_adapt_sch = false;
1137  	struct mac_context *mac = NULL;
1138  	uint32_t channel1 = mcc_channel;
1139  	uint32_t chan1_freq = cds_chan_to_freq(channel1);
1140  
1141  	if (!wma) {
1142  		wma_err("NULL wma ptr. Exiting");
1143  		QDF_ASSERT(0);
1144  		return QDF_STATUS_E_FAILURE;
1145  	}
1146  	mac = cds_get_context(QDF_MODULE_ID_PE);
1147  	if (!mac) {
1148  		QDF_ASSERT(0);
1149  		return QDF_STATUS_E_FAILURE;
1150  	}
1151  
1152  	/* First step is to confirm if MCC is active */
1153  	if (!lim_is_in_mcc(mac)) {
1154  		wma_err("MCC is not active. Exiting");
1155  		QDF_ASSERT(0);
1156  		return QDF_STATUS_E_FAILURE;
1157  	}
1158  	/* Confirm MCC adaptive scheduler feature is disabled */
1159  	if (policy_mgr_get_dynamic_mcc_adaptive_sch(mac->psoc,
1160  						    &mcc_adapt_sch) ==
1161  	    QDF_STATUS_SUCCESS) {
1162  		if (mcc_adapt_sch) {
1163  			wma_debug("Can't set channel latency while MCC ADAPTIVE SCHED is enabled. Exit");
1164  			return QDF_STATUS_SUCCESS;
1165  		}
1166  	} else {
1167  		wma_err("Failed to get value for MCC_ADAPTIVE_SCHED, "
1168  			 "Exit w/o setting latency");
1169  		QDF_ASSERT(0);
1170  		return QDF_STATUS_E_FAILURE;
1171  	}
1172  
1173  	return wmi_unified_set_mcc_channel_time_latency_cmd(wma->wmi_handle,
1174  						chan1_freq,
1175  						mcc_channel_time_latency);
1176  }
1177  
1178  /**
1179   * wma_set_mcc_channel_time_quota() -set MCC channel time quota
1180   * @wma: wma handle
1181   * @adapter_1_chan_number: adapter 1 channel number
1182   * @adapter_1_quota: adapter 1 quota
1183   * @adapter_2_chan_number: adapter 2 channel number
1184   *
1185   * Currently used to set time quota for 2 MCC vdevs/adapters using (operating
1186   * channel, quota) for each mode . The info is provided run time using
1187   * iwpriv command: iwpriv <wlan0 | p2p0> setMccQuota <quota in ms>.
1188   * Note: the quota provided in command is for the same mode in cmd. HDD
1189   * checks if MCC mode is active, gets the second mode and its operating chan.
1190   * Quota for the 2nd role is calculated as 100 - quota of first mode.
1191   *
1192   * Return: QDF status
1193   */
wma_set_mcc_channel_time_quota(tp_wma_handle wma,uint32_t adapter_1_chan_number,uint32_t adapter_1_quota,uint32_t adapter_2_chan_number)1194  QDF_STATUS wma_set_mcc_channel_time_quota(tp_wma_handle wma,
1195  		uint32_t adapter_1_chan_number,	uint32_t adapter_1_quota,
1196  		uint32_t adapter_2_chan_number)
1197  {
1198  	bool mcc_adapt_sch = false;
1199  	struct mac_context *mac = NULL;
1200  	uint32_t chan1_freq = cds_chan_to_freq(adapter_1_chan_number);
1201  	uint32_t chan2_freq = cds_chan_to_freq(adapter_2_chan_number);
1202  
1203  	if (!wma) {
1204  		wma_err("NULL wma ptr. Exiting");
1205  		QDF_ASSERT(0);
1206  		return QDF_STATUS_E_FAILURE;
1207  	}
1208  	mac = cds_get_context(QDF_MODULE_ID_PE);
1209  	if (!mac) {
1210  		QDF_ASSERT(0);
1211  		return QDF_STATUS_E_FAILURE;
1212  	}
1213  
1214  	/* First step is to confirm if MCC is active */
1215  	if (!lim_is_in_mcc(mac)) {
1216  		wma_debug("MCC is not active. Exiting");
1217  		QDF_ASSERT(0);
1218  		return QDF_STATUS_E_FAILURE;
1219  	}
1220  
1221  	/* Confirm MCC adaptive scheduler feature is disabled */
1222  	if (policy_mgr_get_dynamic_mcc_adaptive_sch(mac->psoc,
1223  						    &mcc_adapt_sch) ==
1224  	    QDF_STATUS_SUCCESS) {
1225  		if (mcc_adapt_sch) {
1226  			wma_debug("Can't set channel quota while MCC_ADAPTIVE_SCHED is enabled. Exit");
1227  			return QDF_STATUS_SUCCESS;
1228  		}
1229  	} else {
1230  		wma_err("Failed to retrieve WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED. Exit");
1231  		QDF_ASSERT(0);
1232  		return QDF_STATUS_E_FAILURE;
1233  	}
1234  
1235  	return wmi_unified_set_mcc_channel_time_quota_cmd(wma->wmi_handle,
1236  						chan1_freq,
1237  						adapter_1_quota,
1238  						chan2_freq);
1239  }
1240  
1241  #define MAX_VDEV_PROCESS_RATE_PARAMS 2
1242  /* params being sent:
1243   * wmi_vdev_param_sgi
1244   * wmi_vdev_param_mcast_data_rate
1245   */
wma_process_rate_update_indicate(tp_wma_handle wma,tSirRateUpdateInd * pRateUpdateParams)1246  QDF_STATUS wma_process_rate_update_indicate(tp_wma_handle wma,
1247  					    tSirRateUpdateInd *
1248  					    pRateUpdateParams)
1249  {
1250  	int32_t ret = 0;
1251  	uint8_t vdev_id = 0;
1252  	int32_t mbpsx10_rate = -1;
1253  	uint32_t paramid;
1254  	uint8_t rate = 0;
1255  	uint32_t short_gi, rate_flag;
1256  	struct wma_txrx_node *intr = wma->interfaces;
1257  	QDF_STATUS status;
1258  	struct dev_set_param setparam[MAX_VDEV_PROCESS_RATE_PARAMS] = {};
1259  	uint8_t index = 0;
1260  
1261  	/* Get the vdev id */
1262  	if (wma_find_vdev_id_by_addr(wma, pRateUpdateParams->bssid.bytes,
1263  				     &vdev_id)) {
1264  		wma_err("vdev handle is invalid for "QDF_MAC_ADDR_FMT,
1265  			 QDF_MAC_ADDR_REF(pRateUpdateParams->bssid.bytes));
1266  		qdf_mem_free(pRateUpdateParams);
1267  		return QDF_STATUS_E_INVAL;
1268  	}
1269  	short_gi = intr[vdev_id].config.shortgi;
1270  
1271  	status = wma_get_vdev_rate_flag(intr[vdev_id].vdev, &rate_flag);
1272  	if (QDF_IS_STATUS_ERROR(status)) {
1273  		wma_err("Failed to get rate_flag for VDEV_%d", vdev_id);
1274  		qdf_mem_free(pRateUpdateParams);
1275  		return QDF_STATUS_E_INVAL;
1276  	}
1277  
1278  	if (short_gi == 0)
1279  		short_gi = (rate_flag & TX_RATE_SGI) ? true : false;
1280  	/* first check if reliable TX mcast rate is used. If not check the bcast
1281  	 * Then is mcast. Mcast rate is saved in mcastDataRate24GHz
1282  	 */
1283  	if (pRateUpdateParams->reliableMcastDataRateTxFlag > 0) {
1284  		mbpsx10_rate = pRateUpdateParams->reliableMcastDataRate;
1285  		paramid = wmi_vdev_param_mcast_data_rate;
1286  		if (pRateUpdateParams->
1287  		    reliableMcastDataRateTxFlag & TX_RATE_SGI)
1288  			short_gi = 1;   /* upper layer specified short GI */
1289  	} else if (pRateUpdateParams->bcastDataRate > -1) {
1290  		mbpsx10_rate = pRateUpdateParams->bcastDataRate;
1291  		paramid = wmi_vdev_param_bcast_data_rate;
1292  	} else {
1293  		mbpsx10_rate = pRateUpdateParams->mcastDataRate24GHz;
1294  		paramid = wmi_vdev_param_mcast_data_rate;
1295  		if (pRateUpdateParams->
1296  		    mcastDataRate24GHzTxFlag & TX_RATE_SGI)
1297  			short_gi = 1;   /* upper layer specified short GI */
1298  	}
1299  	wma_debug("dev_id = %d, dev_type = %d, dev_mode = %d,",
1300  		 vdev_id, intr[vdev_id].type,
1301  		 pRateUpdateParams->dev_mode);
1302  	wma_debug("mac = "QDF_MAC_ADDR_FMT", config.shortgi = %d, rate_flags = 0x%x",
1303  		 QDF_MAC_ADDR_REF(pRateUpdateParams->bssid.bytes),
1304  		 intr[vdev_id].config.shortgi, rate_flag);
1305  	ret = wma_encode_mc_rate(short_gi, intr[vdev_id].config.chwidth,
1306  				 intr[vdev_id].ch_freq, mbpsx10_rate,
1307  				 pRateUpdateParams->nss, &rate);
1308  	if (ret != QDF_STATUS_SUCCESS) {
1309  		wma_err("Error, Invalid input rate value");
1310  		qdf_mem_free(pRateUpdateParams);
1311  		return ret;
1312  	}
1313  
1314  	ret = mlme_check_index_setparam(setparam, wmi_vdev_param_sgi, short_gi,
1315  					index++, MAX_VDEV_PROCESS_RATE_PARAMS);
1316  	if (QDF_IS_STATUS_ERROR(status)) {
1317  		wma_debug("failed at wmi_vdev_param_sgi");
1318  		goto error;
1319  	}
1320  
1321  	ret = mlme_check_index_setparam(setparam, paramid, rate, index++,
1322  					MAX_VDEV_PROCESS_RATE_PARAMS);
1323  	if (QDF_IS_STATUS_ERROR(status)) {
1324  		wma_debug("failed at paramid:%d", paramid);
1325  		goto error;
1326  	}
1327  
1328  	ret = wma_send_multi_pdev_vdev_set_params(MLME_VDEV_SETPARAM,
1329  						  vdev_id, setparam, index);
1330  	if (QDF_IS_STATUS_ERROR(ret))
1331  		wma_debug("failed to send vdev set params");
1332  error:
1333  	qdf_mem_free(pRateUpdateParams);
1334  	return ret;
1335  }
1336  
1337  /**
1338   * wma_mgmt_tx_ack_comp_hdlr() - handles tx ack mgmt completion
1339   * @context: context with which the handler is registered
1340   * @netbuf: tx mgmt nbuf
1341   * @status: status of tx completion
1342   *
1343   * This is callback registered with TxRx for
1344   * Ack Complete.
1345   *
1346   * Return: none
1347   */
1348  static void
wma_mgmt_tx_ack_comp_hdlr(void * wma_context,qdf_nbuf_t netbuf,int32_t status)1349  wma_mgmt_tx_ack_comp_hdlr(void *wma_context, qdf_nbuf_t netbuf, int32_t status)
1350  {
1351  	tp_wma_handle wma_handle = (tp_wma_handle) wma_context;
1352  	struct wlan_objmgr_pdev *pdev = (struct wlan_objmgr_pdev *)
1353  					wma_handle->pdev;
1354  	struct wmi_mgmt_params mgmt_params = {};
1355  	uint16_t desc_id;
1356  	uint8_t vdev_id;
1357  
1358  	desc_id = QDF_NBUF_CB_MGMT_TXRX_DESC_ID(netbuf);
1359  	vdev_id = mgmt_txrx_get_vdev_id(pdev, desc_id);
1360  
1361  	mgmt_params.vdev_id = vdev_id;
1362  	mgmt_txrx_tx_completion_handler(pdev, desc_id, status, &mgmt_params);
1363  }
1364  
1365  /**
1366   * wma_mgmt_tx_dload_comp_hldr() - handles tx mgmt completion
1367   * @context: context with which the handler is registered
1368   * @netbuf: tx mgmt nbuf
1369   * @status: status of tx completion
1370   *
1371   * This function calls registered download callback while sending mgmt packet.
1372   *
1373   * Return: none
1374   */
1375  static void
wma_mgmt_tx_dload_comp_hldr(void * wma_context,qdf_nbuf_t netbuf,int32_t status)1376  wma_mgmt_tx_dload_comp_hldr(void *wma_context, qdf_nbuf_t netbuf,
1377  			    int32_t status)
1378  {
1379  	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
1380  
1381  	tp_wma_handle wma_handle = (tp_wma_handle) wma_context;
1382  	void *mac_context = wma_handle->mac_context;
1383  
1384  	wma_debug("Tx Complete Status %d", status);
1385  
1386  	if (!wma_handle->tx_frm_download_comp_cb) {
1387  		wma_err("Tx Complete Cb not registered by umac");
1388  		return;
1389  	}
1390  
1391  	/* Call Tx Mgmt Complete Callback registered by umac */
1392  	wma_handle->tx_frm_download_comp_cb(mac_context, netbuf, 0);
1393  
1394  	/* Reset Callback */
1395  	wma_handle->tx_frm_download_comp_cb = NULL;
1396  
1397  	/* Set the Tx Mgmt Complete Event */
1398  	qdf_status = qdf_event_set(&wma_handle->tx_frm_download_comp_event);
1399  	if (!QDF_IS_STATUS_SUCCESS(qdf_status))
1400  		wma_alert("Event Set failed - tx_frm_comp_event");
1401  }
1402  
1403  /**
1404   * wma_tx_attach() - attach tx related callbacks
1405   * @pwmaCtx: wma context
1406   *
1407   * attaches tx fn with underlying layer.
1408   *
1409   * Return: QDF status
1410   */
wma_tx_attach(tp_wma_handle wma_handle)1411  QDF_STATUS wma_tx_attach(tp_wma_handle wma_handle)
1412  {
1413  	/* Get the Vos Context */
1414  	struct cds_context *cds_handle =
1415  		(struct cds_context *) (wma_handle->cds_context);
1416  
1417  	/* Get the txRx Pdev ID */
1418  	uint8_t pdev_id = WMI_PDEV_ID_SOC;
1419  	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1420  
1421  	/* Register for Tx Management Frames */
1422  	cdp_mgmt_tx_cb_set(soc, pdev_id, 0,
1423  			   wma_mgmt_tx_dload_comp_hldr,
1424  			   wma_mgmt_tx_ack_comp_hdlr, wma_handle);
1425  
1426  	/* Register callback to send PEER_UNMAP_RESPONSE cmd*/
1427  	if (cdp_cfg_get_peer_unmap_conf_support(soc))
1428  		cdp_peer_unmap_sync_cb_set(soc, pdev_id,
1429  					   wma_peer_unmap_conf_cb);
1430  
1431  	/* Store the Mac Context */
1432  	wma_handle->mac_context = cds_handle->mac_context;
1433  
1434  	return QDF_STATUS_SUCCESS;
1435  }
1436  
1437  /**
1438   * wma_tx_detach() - detach tx related callbacks
1439   * @tp_wma_handle: wma context
1440   *
1441   * Deregister with TxRx for Tx Mgmt Download and Ack completion.
1442   *
1443   * Return: QDF status
1444   */
wma_tx_detach(tp_wma_handle wma_handle)1445  QDF_STATUS wma_tx_detach(tp_wma_handle wma_handle)
1446  {
1447  	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1448  
1449  	/* Get the txRx Pdev ID */
1450  	uint8_t pdev_id = WMI_PDEV_ID_SOC;
1451  
1452  	if (!soc)
1453  		return QDF_STATUS_E_FAILURE;
1454  
1455  	if (pdev_id != OL_TXRX_INVALID_PDEV_ID) {
1456  		/* Deregister with TxRx for Tx Mgmt completion call back */
1457  		cdp_mgmt_tx_cb_set(soc, pdev_id, 0, NULL, NULL, NULL);
1458  	}
1459  
1460  	/* Reset Tx Frm Callbacks */
1461  	wma_handle->tx_frm_download_comp_cb = NULL;
1462  
1463  	/* Reset Tx Data Frame Ack Cb */
1464  	wma_handle->umac_data_ota_ack_cb = NULL;
1465  
1466  	/* Reset last Tx Data Frame nbuf ptr */
1467  	wma_handle->last_umac_data_nbuf = NULL;
1468  
1469  	return QDF_STATUS_SUCCESS;
1470  }
1471  
1472  #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \
1473  	defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(CONFIG_HL_SUPPORT)
wma_process_vdev_tx_pause_evt(void * soc,tp_wma_handle wma,wmi_tx_pause_event_fixed_param * event,uint8_t vdev_id)1474  static void wma_process_vdev_tx_pause_evt(void *soc,
1475  					  tp_wma_handle wma,
1476  					  wmi_tx_pause_event_fixed_param *event,
1477  					  uint8_t vdev_id)
1478  {
1479  	/* PAUSE action, add bitmap */
1480  	if (event->action == ACTION_PAUSE) {
1481  		/* Exclude TDLS_OFFCHAN_CHOP from vdev based pauses */
1482  		if (event->pause_type == PAUSE_TYPE_CHOP_TDLS_OFFCHAN) {
1483  			cdp_fc_vdev_pause(soc, vdev_id,
1484  					  OL_TXQ_PAUSE_REASON_FW,
1485  					  event->pause_type);
1486  		} else {
1487  			/*
1488  			 * Now only support per-dev pause so it is not
1489  			 * necessary to pause a paused queue again.
1490  			 */
1491  			if (!wma_vdev_get_pause_bitmap(vdev_id))
1492  				cdp_fc_vdev_pause(soc, vdev_id,
1493  						  OL_TXQ_PAUSE_REASON_FW,
1494  						  event->pause_type);
1495  
1496  			wma_vdev_set_pause_bit(vdev_id,
1497  					       event->pause_type);
1498  		}
1499  	}
1500  	/* UNPAUSE action, clean bitmap */
1501  	else if (event->action == ACTION_UNPAUSE) {
1502  		/* Exclude TDLS_OFFCHAN_CHOP from vdev based pauses */
1503  		if (event->pause_type == PAUSE_TYPE_CHOP_TDLS_OFFCHAN) {
1504  			cdp_fc_vdev_unpause(soc, vdev_id,
1505  					    OL_TXQ_PAUSE_REASON_FW,
1506  					    event->pause_type);
1507  		} else {
1508  		/* Handle unpause only if already paused */
1509  			if (wma_vdev_get_pause_bitmap(vdev_id)) {
1510  				wma_vdev_clear_pause_bit(vdev_id,
1511  							 event->pause_type);
1512  
1513  				if (wma->interfaces[vdev_id].pause_bitmap)
1514  					return;
1515  
1516  				/* PAUSE BIT MAP is cleared
1517  				 * UNPAUSE VDEV
1518  				 */
1519  				cdp_fc_vdev_unpause(soc, vdev_id,
1520  						    OL_TXQ_PAUSE_REASON_FW,
1521  						    event->pause_type);
1522  			}
1523  		}
1524  	} else {
1525  		wma_err("Not Valid Action Type %d", event->action);
1526  	}
1527  }
1528  
wma_mcc_vdev_tx_pause_evt_handler(void * handle,uint8_t * event,uint32_t len)1529  int wma_mcc_vdev_tx_pause_evt_handler(void *handle, uint8_t *event,
1530  				      uint32_t len)
1531  {
1532  	tp_wma_handle wma = (tp_wma_handle) handle;
1533  	WMI_TX_PAUSE_EVENTID_param_tlvs *param_buf;
1534  	wmi_tx_pause_event_fixed_param *wmi_event;
1535  	uint8_t vdev_id;
1536  	A_UINT32 vdev_map;
1537  	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1538  
1539  	param_buf = (WMI_TX_PAUSE_EVENTID_param_tlvs *) event;
1540  	if (!param_buf) {
1541  		wma_err("Invalid roam event buffer");
1542  		return -EINVAL;
1543  	}
1544  
1545  	if (ucfg_pmo_get_wow_bus_suspend(wma->psoc)) {
1546  		wma_debug("Suspend is in progress: Pause/Unpause Tx is NoOp");
1547  		return 0;
1548  	}
1549  
1550  	if (!soc)
1551  		return -EINVAL;
1552  
1553  	wmi_event = param_buf->fixed_param;
1554  	vdev_map = wmi_event->vdev_map;
1555  	/* FW mapped vdev from ID
1556  	 * vdev_map = (1 << vdev_id)
1557  	 * So, host should unmap to ID
1558  	 */
1559  	for (vdev_id = 0; vdev_map != 0 && vdev_id < wma->max_bssid;
1560  	     vdev_id++) {
1561  		if (!(vdev_map & 0x1)) {
1562  			/* No Vdev */
1563  		} else {
1564  			if (!wma->interfaces[vdev_id].vdev) {
1565  				wma_err("vdev is NULL for %d", vdev_id);
1566  				/* Test Next VDEV */
1567  				vdev_map >>= 1;
1568  				continue;
1569  			}
1570  
1571  			wma_process_vdev_tx_pause_evt(soc, wma,
1572  						      wmi_event,
1573  						      vdev_id);
1574  
1575  			wma_debug
1576  				("vdev_id %d, pause_map 0x%x, pause type %d, action %d",
1577  				vdev_id, wma_vdev_get_pause_bitmap(vdev_id),
1578  				wmi_event->pause_type, wmi_event->action);
1579  		}
1580  		/* Test Next VDEV */
1581  		vdev_map >>= 1;
1582  	}
1583  
1584  	return 0;
1585  }
1586  
1587  #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
1588  
1589  #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
1590  
1591  /**
1592   * wma_set_peer_rate_report_condition -
1593   *                    this function set peer rate report
1594   *                    condition info to firmware.
1595   * @handle:	Handle of WMA
1596   * @config:	Bad peer configuration from SIR module
1597   *
1598   * It is a wrapper function to sent WMI_PEER_SET_RATE_REPORT_CONDITION_CMDID
1599   * to the firmware\target. If the command sent to firmware failed, free the
1600   * buffer that allocated.
1601   *
1602   * Return: QDF_STATUS based on values sent to firmware
1603   */
1604  static
wma_set_peer_rate_report_condition(WMA_HANDLE handle,struct t_bad_peer_txtcl_config * config)1605  QDF_STATUS wma_set_peer_rate_report_condition(WMA_HANDLE handle,
1606  			struct t_bad_peer_txtcl_config *config)
1607  {
1608  	tp_wma_handle wma_handle = (tp_wma_handle)handle;
1609  	struct wmi_peer_rate_report_params rate_report_params = {0};
1610  	u_int32_t i, j;
1611  
1612  	rate_report_params.rate_report_enable = config->enable;
1613  	rate_report_params.backoff_time = config->tgt_backoff;
1614  	rate_report_params.timer_period = config->tgt_report_prd;
1615  	for (i = 0; i < WMI_PEER_RATE_REPORT_COND_MAX_NUM; i++) {
1616  		rate_report_params.report_per_phy[i].cond_flags =
1617  			config->threshold[i].cond;
1618  		rate_report_params.report_per_phy[i].delta.delta_min  =
1619  			config->threshold[i].delta;
1620  		rate_report_params.report_per_phy[i].delta.percent =
1621  			config->threshold[i].percentage;
1622  		for (j = 0; j < WMI_MAX_NUM_OF_RATE_THRESH; j++) {
1623  			rate_report_params.report_per_phy[i].
1624  				report_rate_threshold[j] =
1625  					config->threshold[i].thresh[j];
1626  		}
1627  	}
1628  
1629  	return wmi_unified_peer_rate_report_cmd(wma_handle->wmi_handle,
1630  						&rate_report_params);
1631  }
1632  
1633  /**
1634   * wma_process_init_bad_peer_tx_ctl_info -
1635   *                this function to initialize peer rate report config info.
1636   * @handle:	Handle of WMA
1637   * @config:	Bad peer configuration from SIR module
1638   *
1639   * This function initializes the bad peer tx control data structure in WMA,
1640   * sends down the initial configuration to the firmware and configures
1641   * the peer status update setting in the tx_rx module.
1642   *
1643   * Return: QDF_STATUS based on procedure status
1644   */
1645  
wma_process_init_bad_peer_tx_ctl_info(tp_wma_handle wma,struct t_bad_peer_txtcl_config * config)1646  QDF_STATUS wma_process_init_bad_peer_tx_ctl_info(tp_wma_handle wma,
1647  					struct t_bad_peer_txtcl_config *config)
1648  {
1649  	/* Parameter sanity check */
1650  	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
1651  
1652  	if (!wma || !config) {
1653  		wma_err("Invalid input");
1654  		return QDF_STATUS_E_FAILURE;
1655  	}
1656  
1657  	wma_debug("enable %d period %d txq limit %d\n",
1658  		 config->enable,
1659  		 config->period,
1660  		 config->txq_limit);
1661  
1662  	/* Only need to initialize the setting
1663  	 * when the feature is enabled
1664  	 */
1665  	if (config->enable) {
1666  		int i = 0;
1667  
1668  		cdp_bad_peer_txctl_set_setting(soc,
1669  					WMI_PDEV_ID_SOC,
1670  					config->enable,
1671  					config->period,
1672  					config->txq_limit);
1673  
1674  		for (i = 0; i < WLAN_WMA_IEEE80211_MAX_LEVEL; i++) {
1675  			u_int32_t threshold, limit;
1676  
1677  			threshold = config->threshold[i].thresh[0];
1678  			limit =	config->threshold[i].txlimit;
1679  			cdp_bad_peer_txctl_update_threshold(soc,
1680  						WMI_PDEV_ID_SOC,
1681  						i,
1682  						threshold,
1683  						limit);
1684  		}
1685  	}
1686  
1687  	return wma_set_peer_rate_report_condition(wma, config);
1688  }
1689  #endif /* defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL) */
1690  
1691  #ifdef FW_THERMAL_THROTTLE_SUPPORT
1692  /**
1693   * wma_update_thermal_mitigation_to_fw - update thermal mitigation to fw
1694   * @wma: wma handle
1695   * @thermal_level: thermal level
1696   *
1697   * This function sends down thermal mitigation params to the fw
1698   *
1699   * Returns: QDF_STATUS_SUCCESS for success otherwise failure
1700   */
wma_update_thermal_mitigation_to_fw(tp_wma_handle wma,u_int8_t thermal_level)1701  static QDF_STATUS wma_update_thermal_mitigation_to_fw(tp_wma_handle wma,
1702  						      u_int8_t thermal_level)
1703  {
1704  	struct thermal_mitigation_params therm_data = {0};
1705  
1706  	/* Check if vdev is in mcc, if in mcc set dc value as 10, else 100 */
1707  	therm_data.dc = 100;
1708  	therm_data.enable = 1;
1709  	therm_data.levelconf[0].dcoffpercent =
1710  		wma->thermal_mgmt_info.throttle_duty_cycle_tbl[thermal_level];
1711  	therm_data.levelconf[0].priority = 0;
1712  	therm_data.num_thermal_conf = 1;
1713  
1714  	return wmi_unified_thermal_mitigation_param_cmd_send(wma->wmi_handle,
1715  							     &therm_data);
1716  }
1717  #else /* FW_THERMAL_THROTTLE_SUPPORT */
1718  /**
1719   * wma_update_thermal_mitigation_to_fw - update thermal mitigation to fw
1720   * @wma: wma handle
1721   * @thermal_level: thermal level
1722   *
1723   * This function sends down thermal mitigation params to the fw
1724   *
1725   * Returns: QDF_STATUS_SUCCESS for success otherwise failure
1726   */
wma_update_thermal_mitigation_to_fw(tp_wma_handle wma,u_int8_t thermal_level)1727  static QDF_STATUS wma_update_thermal_mitigation_to_fw(tp_wma_handle wma,
1728  						      u_int8_t thermal_level)
1729  {
1730  	return QDF_STATUS_SUCCESS;
1731  }
1732  #endif
1733  
1734  /**
1735   * wma_update_thermal_cfg_to_fw() - update thermal configuration to FW
1736   * @wma: Pointer to WMA handle
1737   *
1738   * This function update thermal configuration to FW
1739   *
1740   * Returns: QDF_STATUS_SUCCESS for success otherwise failure
1741   */
wma_update_thermal_cfg_to_fw(tp_wma_handle wma)1742  static QDF_STATUS wma_update_thermal_cfg_to_fw(tp_wma_handle wma)
1743  {
1744  	t_thermal_cmd_params thermal_params = {0};
1745  
1746  	/* Get the temperature thresholds to set in firmware */
1747  	thermal_params.minTemp =
1748  		wma->thermal_mgmt_info.thermalLevels[WLAN_WMA_THERMAL_LEVEL_0].
1749  		minTempThreshold;
1750  	thermal_params.maxTemp =
1751  		wma->thermal_mgmt_info.thermalLevels[WLAN_WMA_THERMAL_LEVEL_0].
1752  		maxTempThreshold;
1753  	thermal_params.thermalEnable =
1754  		wma->thermal_mgmt_info.thermalMgmtEnabled;
1755  	thermal_params.thermal_action = wma->thermal_mgmt_info.thermal_action;
1756  
1757  	wma_debug("TM sending to fw: min_temp %d max_temp %d enable %d act %d",
1758  		  thermal_params.minTemp, thermal_params.maxTemp,
1759  		  thermal_params.thermalEnable, thermal_params.thermal_action);
1760  
1761  	return wma_set_thermal_mgmt(wma, thermal_params);
1762  }
1763  
1764  /**
1765   * wma_process_init_thermal_info() - initialize thermal info
1766   * @wma: Pointer to WMA handle
1767   * @pThermalParams: Pointer to thermal mitigation parameters
1768   *
1769   * This function initializes the thermal management table in WMA,
1770   * sends down the initial temperature thresholds to the firmware
1771   * and configures the throttle period in the tx rx module
1772   *
1773   * Returns: QDF_STATUS_SUCCESS for success otherwise failure
1774   */
wma_process_init_thermal_info(tp_wma_handle wma,t_thermal_mgmt * pThermalParams)1775  QDF_STATUS wma_process_init_thermal_info(tp_wma_handle wma,
1776  					 t_thermal_mgmt *pThermalParams)
1777  {
1778  	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
1779  #ifdef FW_THERMAL_THROTTLE_SUPPORT
1780  	int i = 0;
1781  #endif /* FW_THERMAL_THROTTLE_SUPPORT */
1782  
1783  	if (!wma || !pThermalParams) {
1784  		wma_err("TM Invalid input");
1785  		return QDF_STATUS_E_FAILURE;
1786  	}
1787  
1788  	wma_debug("TM enable %d period %d action %d",
1789  		  pThermalParams->thermalMgmtEnabled,
1790  		  pThermalParams->throttlePeriod,
1791  		  pThermalParams->thermal_action);
1792  
1793  	wma_nofl_debug("Throttle Duty Cycle Level in percentage:\n"
1794  		 "0 %d\n"
1795  		 "1 %d\n"
1796  		 "2 %d\n"
1797  		 "3 %d\n"
1798  		 "4 %d\n"
1799  		 "5 %d",
1800  		 pThermalParams->throttle_duty_cycle_tbl[0],
1801  		 pThermalParams->throttle_duty_cycle_tbl[1],
1802  		 pThermalParams->throttle_duty_cycle_tbl[2],
1803  		 pThermalParams->throttle_duty_cycle_tbl[3],
1804  		 pThermalParams->throttle_duty_cycle_tbl[4],
1805  		 pThermalParams->throttle_duty_cycle_tbl[5]);
1806  
1807  	wma->thermal_mgmt_info.thermalMgmtEnabled =
1808  		pThermalParams->thermalMgmtEnabled;
1809  	wma->thermal_mgmt_info.thermalLevels[0].minTempThreshold =
1810  		pThermalParams->thermalLevels[0].minTempThreshold;
1811  	wma->thermal_mgmt_info.thermalLevels[0].maxTempThreshold =
1812  		pThermalParams->thermalLevels[0].maxTempThreshold;
1813  	wma->thermal_mgmt_info.thermalLevels[1].minTempThreshold =
1814  		pThermalParams->thermalLevels[1].minTempThreshold;
1815  	wma->thermal_mgmt_info.thermalLevels[1].maxTempThreshold =
1816  		pThermalParams->thermalLevels[1].maxTempThreshold;
1817  	wma->thermal_mgmt_info.thermalLevels[2].minTempThreshold =
1818  		pThermalParams->thermalLevels[2].minTempThreshold;
1819  	wma->thermal_mgmt_info.thermalLevels[2].maxTempThreshold =
1820  		pThermalParams->thermalLevels[2].maxTempThreshold;
1821  	wma->thermal_mgmt_info.thermalLevels[3].minTempThreshold =
1822  		pThermalParams->thermalLevels[3].minTempThreshold;
1823  	wma->thermal_mgmt_info.thermalLevels[3].maxTempThreshold =
1824  		pThermalParams->thermalLevels[3].maxTempThreshold;
1825  	wma->thermal_mgmt_info.thermalLevels[4].minTempThreshold =
1826  		pThermalParams->thermalLevels[4].minTempThreshold;
1827  	wma->thermal_mgmt_info.thermalLevels[4].maxTempThreshold =
1828  		pThermalParams->thermalLevels[4].maxTempThreshold;
1829  	wma->thermal_mgmt_info.thermalLevels[5].minTempThreshold =
1830  		pThermalParams->thermalLevels[5].minTempThreshold;
1831  	wma->thermal_mgmt_info.thermalLevels[5].maxTempThreshold =
1832  		pThermalParams->thermalLevels[5].maxTempThreshold;
1833  	wma->thermal_mgmt_info.thermalCurrLevel = WLAN_WMA_THERMAL_LEVEL_0;
1834  	wma->thermal_mgmt_info.thermal_action = pThermalParams->thermal_action;
1835  	wma_nofl_debug("TM level min max:\n"
1836  		 "0 %d   %d\n"
1837  		 "1 %d   %d\n"
1838  		 "2 %d   %d\n"
1839  		 "3 %d   %d\n"
1840  		 "4 %d   %d\n"
1841  		 "5 %d   %d",
1842  		 wma->thermal_mgmt_info.thermalLevels[0].minTempThreshold,
1843  		 wma->thermal_mgmt_info.thermalLevels[0].maxTempThreshold,
1844  		 wma->thermal_mgmt_info.thermalLevels[1].minTempThreshold,
1845  		 wma->thermal_mgmt_info.thermalLevels[1].maxTempThreshold,
1846  		 wma->thermal_mgmt_info.thermalLevels[2].minTempThreshold,
1847  		 wma->thermal_mgmt_info.thermalLevels[2].maxTempThreshold,
1848  		 wma->thermal_mgmt_info.thermalLevels[3].minTempThreshold,
1849  		 wma->thermal_mgmt_info.thermalLevels[3].maxTempThreshold,
1850  		 wma->thermal_mgmt_info.thermalLevels[4].minTempThreshold,
1851  		 wma->thermal_mgmt_info.thermalLevels[4].maxTempThreshold,
1852  		 wma->thermal_mgmt_info.thermalLevels[5].minTempThreshold,
1853  		 wma->thermal_mgmt_info.thermalLevels[5].maxTempThreshold);
1854  
1855  #ifdef FW_THERMAL_THROTTLE_SUPPORT
1856  	for (i = 0; i < THROTTLE_LEVEL_MAX; i++)
1857  		wma->thermal_mgmt_info.throttle_duty_cycle_tbl[i] =
1858  				pThermalParams->throttle_duty_cycle_tbl[i];
1859  #endif /* FW_THERMAL_THROTTLE_SUPPORT */
1860  
1861  	if (wma->thermal_mgmt_info.thermalMgmtEnabled) {
1862  		if (!wma->fw_therm_throt_support) {
1863  			cdp_throttle_init_period(
1864  				cds_get_context(QDF_MODULE_ID_SOC),
1865  				WMI_PDEV_ID_SOC, pThermalParams->throttlePeriod,
1866  				&pThermalParams->throttle_duty_cycle_tbl[0]);
1867  		} else {
1868  			qdf_status = wma_update_thermal_mitigation_to_fw(
1869  					wma, WLAN_WMA_THERMAL_LEVEL_0);
1870  			if (QDF_STATUS_SUCCESS != qdf_status)
1871  				return qdf_status;
1872  		}
1873  		qdf_status = wma_update_thermal_cfg_to_fw(wma);
1874  	}
1875  	return qdf_status;
1876  }
1877  
1878  /**
1879   * wma_set_thermal_level_ind() - send SME set thermal level indication message
1880   * @level:  thermal level
1881   *
1882   * Send SME SET_THERMAL_LEVEL_IND message
1883   *
1884   * Returns: none
1885   */
wma_set_thermal_level_ind(u_int8_t level)1886  static void wma_set_thermal_level_ind(u_int8_t level)
1887  {
1888  	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
1889  	struct scheduler_msg sme_msg = {0};
1890  
1891  	wma_info("Thermal level: %d", level);
1892  
1893  	sme_msg.type = eWNI_SME_SET_THERMAL_LEVEL_IND;
1894  	sme_msg.bodyptr = NULL;
1895  	sme_msg.bodyval = level;
1896  
1897  	qdf_status = scheduler_post_message(QDF_MODULE_ID_WMA,
1898  					    QDF_MODULE_ID_SME,
1899  					    QDF_MODULE_ID_SME, &sme_msg);
1900  	if (!QDF_IS_STATUS_SUCCESS(qdf_status))
1901  		wma_err("Fail to post set thermal level ind msg");
1902  }
1903  
1904  /**
1905   * wma_process_set_thermal_level() - sets thermal level
1906   * @wma: Pointer to WMA handle
1907   * @thermal_level : Thermal level
1908   *
1909   * This function sets the new thermal throttle level in the
1910   * txrx module and sends down the corresponding temperature
1911   * thresholds to the firmware
1912   *
1913   * Returns: QDF_STATUS_SUCCESS for success otherwise failure
1914   */
wma_process_set_thermal_level(tp_wma_handle wma,uint8_t thermal_level)1915  QDF_STATUS wma_process_set_thermal_level(tp_wma_handle wma,
1916  					 uint8_t thermal_level)
1917  {
1918  	if (!wma) {
1919  		wma_err("TM Invalid input");
1920  		return QDF_STATUS_E_FAILURE;
1921  	}
1922  
1923  	wma_debug("TM set level %d", thermal_level);
1924  
1925  	/* Check if thermal mitigation is enabled */
1926  	if (!wma->thermal_mgmt_info.thermalMgmtEnabled) {
1927  		wma_err("Thermal mgmt is not enabled, ignoring set level command");
1928  		return QDF_STATUS_E_FAILURE;
1929  	}
1930  
1931  	if (thermal_level >= WLAN_WMA_MAX_THERMAL_LEVELS) {
1932  		wma_err("Invalid thermal level set %d", thermal_level);
1933  		return QDF_STATUS_E_FAILURE;
1934  	}
1935  
1936  	if (thermal_level == wma->thermal_mgmt_info.thermalCurrLevel) {
1937  		wma_debug("Current level %d is same as the set level, ignoring",
1938  			 wma->thermal_mgmt_info.thermalCurrLevel);
1939  		return QDF_STATUS_SUCCESS;
1940  	}
1941  
1942  	wma->thermal_mgmt_info.thermalCurrLevel = thermal_level;
1943  
1944  	cdp_throttle_set_level(cds_get_context(QDF_MODULE_ID_SOC),
1945  			       WMI_PDEV_ID_SOC, thermal_level);
1946  
1947  	/* Send SME SET_THERMAL_LEVEL_IND message */
1948  	wma_set_thermal_level_ind(thermal_level);
1949  
1950  	return QDF_STATUS_SUCCESS;
1951  }
1952  
1953  
1954  /**
1955   * wma_set_thermal_mgmt() - set thermal mgmt command to fw
1956   * @wma_handle: Pointer to WMA handle
1957   * @thermal_info: Thermal command information
1958   *
1959   * This function sends the thermal management command
1960   * to the firmware
1961   *
1962   * Return: QDF_STATUS_SUCCESS for success otherwise failure
1963   */
wma_set_thermal_mgmt(tp_wma_handle wma_handle,t_thermal_cmd_params thermal_info)1964  QDF_STATUS wma_set_thermal_mgmt(tp_wma_handle wma_handle,
1965  				t_thermal_cmd_params thermal_info)
1966  {
1967  	struct thermal_cmd_params mgmt_thermal_info = {0};
1968  
1969  	if (!wma_handle) {
1970  		wma_err("Invalid input");
1971  		QDF_ASSERT(0);
1972  		return QDF_STATUS_E_FAILURE;
1973  	}
1974  
1975  	mgmt_thermal_info.min_temp = thermal_info.minTemp;
1976  	mgmt_thermal_info.max_temp = thermal_info.maxTemp;
1977  	mgmt_thermal_info.thermal_enable = thermal_info.thermalEnable;
1978  	mgmt_thermal_info.thermal_action = thermal_info.thermal_action;
1979  
1980  	return wmi_unified_set_thermal_mgmt_cmd(wma_handle->wmi_handle,
1981  						&mgmt_thermal_info);
1982  }
1983  
1984  /**
1985   * wma_thermal_mgmt_get_level() - returns throttle level
1986   * @handle: Pointer to WMA handle
1987   * @temp: temperature
1988   *
1989   * This function returns the thermal(throttle) level
1990   * given the temperature
1991   *
1992   * Return: thermal (throttle) level
1993   */
wma_thermal_mgmt_get_level(void * handle,uint32_t temp)1994  static uint8_t wma_thermal_mgmt_get_level(void *handle, uint32_t temp)
1995  {
1996  	tp_wma_handle wma = (tp_wma_handle) handle;
1997  	int i;
1998  	uint8_t level;
1999  
2000  	level = i = wma->thermal_mgmt_info.thermalCurrLevel;
2001  	while (temp < wma->thermal_mgmt_info.thermalLevels[i].minTempThreshold
2002  	       && i > 0) {
2003  		i--;
2004  		level = i;
2005  	}
2006  
2007  	i = wma->thermal_mgmt_info.thermalCurrLevel;
2008  	while (temp > wma->thermal_mgmt_info.thermalLevels[i].maxTempThreshold
2009  	       && i < (WLAN_WMA_MAX_THERMAL_LEVELS - 1)) {
2010  		i++;
2011  		level = i;
2012  	}
2013  
2014  	wma_warn("Change thermal level from %d -> %d",
2015  		 wma->thermal_mgmt_info.thermalCurrLevel, level);
2016  
2017  	return level;
2018  }
2019  
2020  /**
2021   * wms_thermal_level_to_host() - Convert wma thermal level to host enum
2022   * @level: current thermal throttle level
2023   *
2024   * Return: host thermal throttle level
2025   */
2026  static enum thermal_throttle_level
wma_thermal_level_to_host(uint8_t level)2027  wma_thermal_level_to_host(uint8_t level)
2028  {
2029  	switch (level) {
2030  	case WLAN_WMA_THERMAL_LEVEL_0:
2031  		return THERMAL_FULLPERF;
2032  	case WLAN_WMA_THERMAL_LEVEL_1:
2033  	case WLAN_WMA_THERMAL_LEVEL_2:
2034  	case WLAN_WMA_THERMAL_LEVEL_3:
2035  		return THERMAL_MITIGATION;
2036  	case WLAN_WMA_THERMAL_LEVEL_4:
2037  		return THERMAL_SHUTOFF;
2038  	case WLAN_WMA_THERMAL_LEVEL_5:
2039  		return THERMAL_SHUTDOWN_TARGET;
2040  	default:
2041  		return THERMAL_UNKNOWN;
2042  	}
2043  }
2044  
2045  /**
2046   * wma_thermal_mgmt_evt_handler() - thermal mgmt event handler
2047   * @wma_handle: Pointer to WMA handle
2048   * @event: Thermal event information
2049   * @len: length of the event
2050   *
2051   * This function handles the thermal mgmt event from the firmware
2052   *
2053   * Return: 0 for success otherwise failure
2054   */
wma_thermal_mgmt_evt_handler(void * handle,uint8_t * event,uint32_t len)2055  int wma_thermal_mgmt_evt_handler(void *handle, uint8_t *event, uint32_t len)
2056  {
2057  	tp_wma_handle wma;
2058  	wmi_thermal_mgmt_event_fixed_param *tm_event;
2059  	uint8_t thermal_level;
2060  	t_thermal_cmd_params thermal_params = {0};
2061  	WMI_THERMAL_MGMT_EVENTID_param_tlvs *param_buf;
2062  	struct wlan_objmgr_psoc *psoc;
2063  	struct thermal_throttle_info info = {0};
2064  
2065  	if (!event || !handle) {
2066  		wma_err("Invalid thermal mitigation event buffer");
2067  		return -EINVAL;
2068  	}
2069  
2070  	wma = (tp_wma_handle) handle;
2071  
2072  	if (wma_validate_handle(wma))
2073  		return -EINVAL;
2074  
2075  	psoc = wma->psoc;
2076  	if (!psoc) {
2077  		wma_err("NULL psoc");
2078  		return -EINVAL;
2079  	}
2080  
2081  	param_buf = (WMI_THERMAL_MGMT_EVENTID_param_tlvs *) event;
2082  
2083  	/* Check if thermal mitigation is enabled */
2084  	if (!wma->thermal_mgmt_info.thermalMgmtEnabled) {
2085  		wma_err("Thermal mgmt is not enabled, ignoring event");
2086  		return -EINVAL;
2087  	}
2088  
2089  	tm_event = param_buf->fixed_param;
2090  	wma_debug("Thermal mgmt event received with temperature %d",
2091  		 tm_event->temperature_degreeC);
2092  
2093  	/* Get the thermal mitigation level for the reported temperature */
2094  	thermal_level = wma_thermal_mgmt_get_level(handle,
2095  					tm_event->temperature_degreeC);
2096  	wma_debug("Thermal mgmt level  %d", thermal_level);
2097  
2098  	if (thermal_level == wma->thermal_mgmt_info.thermalCurrLevel) {
2099  		wma_debug("Current level %d is same as the set level, ignoring",
2100  			 wma->thermal_mgmt_info.thermalCurrLevel);
2101  		return 0;
2102  	}
2103  
2104  	wma->thermal_mgmt_info.thermalCurrLevel = thermal_level;
2105  	info.level = wma_thermal_level_to_host(thermal_level);
2106  	target_if_fwol_notify_thermal_throttle(psoc, &info);
2107  
2108  	if (!wma->fw_therm_throt_support) {
2109  		/* Inform txrx */
2110  		cdp_throttle_set_level(cds_get_context(QDF_MODULE_ID_SOC),
2111  				       WMI_PDEV_ID_SOC, thermal_level);
2112  	}
2113  
2114  	/* Send SME SET_THERMAL_LEVEL_IND message */
2115  	wma_set_thermal_level_ind(thermal_level);
2116  
2117  	if (wma->fw_therm_throt_support) {
2118  		/* Send duty cycle info to firmware for fw to throttle */
2119  		if (QDF_STATUS_SUCCESS !=
2120  			wma_update_thermal_mitigation_to_fw(wma, thermal_level))
2121  			return QDF_STATUS_E_FAILURE;
2122  	}
2123  
2124  	/* Get the temperature thresholds to set in firmware */
2125  	thermal_params.minTemp =
2126  		wma->thermal_mgmt_info.thermalLevels[thermal_level].
2127  		minTempThreshold;
2128  	thermal_params.maxTemp =
2129  		wma->thermal_mgmt_info.thermalLevels[thermal_level].
2130  		maxTempThreshold;
2131  	thermal_params.thermalEnable =
2132  		wma->thermal_mgmt_info.thermalMgmtEnabled;
2133  	thermal_params.thermal_action = wma->thermal_mgmt_info.thermal_action;
2134  
2135  	if (QDF_STATUS_SUCCESS != wma_set_thermal_mgmt(wma, thermal_params)) {
2136  		wma_err("Could not send thermal mgmt command to the firmware!");
2137  		return -EINVAL;
2138  	}
2139  
2140  	return 0;
2141  }
2142  
2143  /**
2144   * wma_decap_to_8023() - Decapsulate to 802.3 format
2145   * @msdu: skb buffer
2146   * @info: decapsulate info
2147   *
2148   * Return: none
2149   */
wma_decap_to_8023(qdf_nbuf_t msdu,struct wma_decap_info_t * info)2150  static void wma_decap_to_8023(qdf_nbuf_t msdu, struct wma_decap_info_t *info)
2151  {
2152  	struct llc_snap_hdr_t *llc_hdr;
2153  	uint16_t ether_type;
2154  	uint16_t l2_hdr_space;
2155  	struct ieee80211_qosframe_addr4 *wh;
2156  	uint8_t local_buf[ETHERNET_HDR_LEN];
2157  	uint8_t *buf;
2158  	struct ethernet_hdr_t *ethr_hdr;
2159  
2160  	buf = (uint8_t *) qdf_nbuf_data(msdu);
2161  	llc_hdr = (struct llc_snap_hdr_t *)buf;
2162  	ether_type = (llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1];
2163  	/* do llc remove if needed */
2164  	l2_hdr_space = 0;
2165  	if (IS_SNAP(llc_hdr)) {
2166  		if (IS_BTEP(llc_hdr)) {
2167  			/* remove llc */
2168  			l2_hdr_space += sizeof(struct llc_snap_hdr_t);
2169  			llc_hdr = NULL;
2170  		} else if (IS_RFC1042(llc_hdr)) {
2171  			if (!(ether_type == ETHERTYPE_AARP ||
2172  			      ether_type == ETHERTYPE_IPX)) {
2173  				/* remove llc */
2174  				l2_hdr_space += sizeof(struct llc_snap_hdr_t);
2175  				llc_hdr = NULL;
2176  			}
2177  		}
2178  	}
2179  	if (l2_hdr_space > ETHERNET_HDR_LEN)
2180  		buf = qdf_nbuf_pull_head(msdu, l2_hdr_space - ETHERNET_HDR_LEN);
2181  	else if (l2_hdr_space < ETHERNET_HDR_LEN)
2182  		buf = qdf_nbuf_push_head(msdu, ETHERNET_HDR_LEN - l2_hdr_space);
2183  
2184  	/* mpdu hdr should be present in info,re-create ethr_hdr based on
2185  	 * mpdu hdr
2186  	 */
2187  	wh = (struct ieee80211_qosframe_addr4 *)info->hdr;
2188  	ethr_hdr = (struct ethernet_hdr_t *)local_buf;
2189  	switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
2190  	case IEEE80211_FC1_DIR_NODS:
2191  		qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr1,
2192  			     QDF_MAC_ADDR_SIZE);
2193  		qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr2,
2194  			     QDF_MAC_ADDR_SIZE);
2195  		break;
2196  	case IEEE80211_FC1_DIR_TODS:
2197  		qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr3,
2198  			     QDF_MAC_ADDR_SIZE);
2199  		qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr2,
2200  			     QDF_MAC_ADDR_SIZE);
2201  		break;
2202  	case IEEE80211_FC1_DIR_FROMDS:
2203  		qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr1,
2204  			     QDF_MAC_ADDR_SIZE);
2205  		qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr3,
2206  			     QDF_MAC_ADDR_SIZE);
2207  		break;
2208  	case IEEE80211_FC1_DIR_DSTODS:
2209  		qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr3,
2210  			     QDF_MAC_ADDR_SIZE);
2211  		qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr4,
2212  			     QDF_MAC_ADDR_SIZE);
2213  		break;
2214  	}
2215  
2216  	if (!llc_hdr) {
2217  		ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff;
2218  		ethr_hdr->ethertype[1] = (ether_type) & 0xff;
2219  	} else {
2220  		uint32_t pktlen =
2221  			qdf_nbuf_len(msdu) - sizeof(ethr_hdr->ethertype);
2222  		ether_type = (uint16_t) pktlen;
2223  		ether_type = qdf_nbuf_len(msdu) - sizeof(struct ethernet_hdr_t);
2224  		ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff;
2225  		ethr_hdr->ethertype[1] = (ether_type) & 0xff;
2226  	}
2227  	qdf_mem_copy(buf, ethr_hdr, ETHERNET_HDR_LEN);
2228  }
2229  
2230  /**
2231   * wma_ieee80211_hdrsize() - get 802.11 header size
2232   * @data: 80211 frame
2233   *
2234   * Return: size of header
2235   */
wma_ieee80211_hdrsize(const void * data)2236  static int32_t wma_ieee80211_hdrsize(const void *data)
2237  {
2238  	const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data;
2239  	int32_t size = sizeof(struct ieee80211_frame);
2240  
2241  	if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2242  		size += QDF_MAC_ADDR_SIZE;
2243  	if (IEEE80211_QOS_HAS_SEQ(wh))
2244  		size += sizeof(uint16_t);
2245  	return size;
2246  }
2247  
2248  /**
2249   * rate_pream: Mapping from data rates to preamble.
2250   */
2251  static uint32_t rate_pream[] = {WMI_RATE_PREAMBLE_CCK, WMI_RATE_PREAMBLE_CCK,
2252  				WMI_RATE_PREAMBLE_CCK, WMI_RATE_PREAMBLE_CCK,
2253  				WMI_RATE_PREAMBLE_OFDM, WMI_RATE_PREAMBLE_OFDM,
2254  				WMI_RATE_PREAMBLE_OFDM, WMI_RATE_PREAMBLE_OFDM,
2255  				WMI_RATE_PREAMBLE_OFDM, WMI_RATE_PREAMBLE_OFDM,
2256  				WMI_RATE_PREAMBLE_OFDM, WMI_RATE_PREAMBLE_OFDM};
2257  
2258  /**
2259   * rate_mcs: Mapping from data rates to MCS (+4 for OFDM to keep the sequence).
2260   */
2261  static uint32_t rate_mcs[] = {WMI_MAX_CCK_TX_RATE_1M, WMI_MAX_CCK_TX_RATE_2M,
2262  			      WMI_MAX_CCK_TX_RATE_5_5M, WMI_MAX_CCK_TX_RATE_11M,
2263  			      WMI_MAX_OFDM_TX_RATE_6M + 4,
2264  			      WMI_MAX_OFDM_TX_RATE_9M + 4,
2265  			      WMI_MAX_OFDM_TX_RATE_12M + 4,
2266  			      WMI_MAX_OFDM_TX_RATE_18M + 4,
2267  			      WMI_MAX_OFDM_TX_RATE_24M + 4,
2268  			      WMI_MAX_OFDM_TX_RATE_36M + 4,
2269  			      WMI_MAX_OFDM_TX_RATE_48M + 4,
2270  			      WMI_MAX_OFDM_TX_RATE_54M + 4};
2271  
2272  #define WMA_TX_SEND_MGMT_TYPE 0
2273  #define WMA_TX_SEND_DATA_TYPE 1
2274  
2275  /**
2276   * wma_update_tx_send_params() - Update tx_send_params TLV info
2277   * @tx_param: Pointer to tx_send_params
2278   * @rid: rate ID passed by PE
2279   *
2280   * Return: None
2281   */
wma_update_tx_send_params(struct tx_send_params * tx_param,enum rateid rid)2282  static void wma_update_tx_send_params(struct tx_send_params *tx_param,
2283  				      enum rateid rid)
2284  {
2285  	uint8_t  preamble = 0, nss = 0, rix = 0;
2286  
2287  	preamble = rate_pream[rid];
2288  	rix = rate_mcs[rid];
2289  
2290  	tx_param->mcs_mask = (1 << rix);
2291  	tx_param->nss_mask = (1 << nss);
2292  	tx_param->preamble_type = (1 << preamble);
2293  	tx_param->frame_type = WMA_TX_SEND_MGMT_TYPE;
2294  
2295  	wma_debug("rate_id: %d, mcs: %0x, nss: %0x, preamble: %0x",
2296  		 rid, tx_param->mcs_mask, tx_param->nss_mask,
2297  		 tx_param->preamble_type);
2298  }
2299  
wma_tx_packet(void * wma_context,void * tx_frame,uint16_t frmLen,eFrameType frmType,eFrameTxDir txDir,uint8_t tid,wma_tx_dwnld_comp_callback tx_frm_download_comp_cb,void * pData,wma_tx_ota_comp_callback tx_frm_ota_comp_cb,uint8_t tx_flag,uint8_t vdev_id,bool tdls_flag,uint16_t channel_freq,enum rateid rid,int8_t peer_rssi,uint16_t action)2300  QDF_STATUS wma_tx_packet(void *wma_context, void *tx_frame, uint16_t frmLen,
2301  			 eFrameType frmType, eFrameTxDir txDir, uint8_t tid,
2302  			 wma_tx_dwnld_comp_callback tx_frm_download_comp_cb,
2303  			 void *pData,
2304  			 wma_tx_ota_comp_callback tx_frm_ota_comp_cb,
2305  			 uint8_t tx_flag, uint8_t vdev_id, bool tdls_flag,
2306  			 uint16_t channel_freq, enum rateid rid,
2307  			 int8_t peer_rssi, uint16_t action)
2308  {
2309  	tp_wma_handle wma_handle = (tp_wma_handle) (wma_context);
2310  	int32_t status;
2311  	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
2312  	int32_t is_high_latency;
2313  	bool is_wmi_mgmt_tx = false;
2314  	enum frame_index tx_frm_index = GENERIC_NODOWNLD_NOACK_COMP_INDEX;
2315  	tpSirMacFrameCtl pFc = (tpSirMacFrameCtl) (qdf_nbuf_data(tx_frame));
2316  	uint8_t use_6mbps = 0;
2317  	uint8_t downld_comp_required = 0;
2318  	uint16_t chanfreq;
2319  	uint8_t *pFrame = NULL;
2320  	void *pPacket = NULL;
2321  	uint16_t newFrmLen = 0;
2322  	struct wma_txrx_node *iface;
2323  	struct mac_context *mac;
2324  	tpSirMacMgmtHdr mHdr;
2325  	struct wmi_mgmt_params mgmt_param = {0};
2326  	struct cdp_cfg *ctrl_pdev;
2327  	void *soc = cds_get_context(QDF_MODULE_ID_SOC);
2328  	struct ieee80211_frame *wh;
2329  	struct wlan_objmgr_peer *peer = NULL;
2330  	struct wlan_objmgr_psoc *psoc;
2331  	struct wlan_objmgr_vdev *vdev = NULL;
2332  	void *mac_addr;
2333  	uint8_t *mld_addr = NULL;
2334  	bool is_5g = false;
2335  	uint8_t pdev_id;
2336  	bool mlo_link_agnostic;
2337  
2338  	if (wma_validate_handle(wma_handle)) {
2339  		cds_packet_free((void *)tx_frame);
2340  		return QDF_STATUS_E_FAILURE;
2341  	}
2342  
2343  	if (vdev_id >= wma_handle->max_bssid) {
2344  		wma_err("tx packet with invalid vdev_id :%d", vdev_id);
2345  		return QDF_STATUS_E_FAILURE;
2346  	}
2347  
2348  	iface = &wma_handle->interfaces[vdev_id];
2349  
2350  	if (!soc) {
2351  		cds_packet_free((void *)tx_frame);
2352  		return QDF_STATUS_E_FAILURE;
2353  	}
2354  
2355  	cdp_hl_tdls_flag_reset(soc, vdev_id, false);
2356  
2357  	if (frmType >= TXRX_FRM_MAX) {
2358  		wma_err("Invalid Frame Type Fail to send Frame");
2359  		cds_packet_free((void *)tx_frame);
2360  		return QDF_STATUS_E_FAILURE;
2361  	}
2362  
2363  	mac = cds_get_context(QDF_MODULE_ID_PE);
2364  	if (!mac) {
2365  		cds_packet_free((void *)tx_frame);
2366  		return QDF_STATUS_E_FAILURE;
2367  	}
2368  	/*
2369  	 * Currently only support to
2370  	 * send 80211 Mgmt and 80211 Data are added.
2371  	 */
2372  	if (!((frmType == TXRX_FRM_802_11_MGMT) ||
2373  	      (frmType == TXRX_FRM_802_11_DATA))) {
2374  		wma_err("No Support to send other frames except 802.11 Mgmt/Data");
2375  		cds_packet_free((void *)tx_frame);
2376  		return QDF_STATUS_E_FAILURE;
2377  	}
2378  
2379  	if (((iface->rmfEnabled || tx_flag & HAL_USE_PMF)) &&
2380  	    (frmType == TXRX_FRM_802_11_MGMT) &&
2381  	    (pFc->subType == SIR_MAC_MGMT_DISASSOC ||
2382  	     pFc->subType == SIR_MAC_MGMT_DEAUTH ||
2383  	     pFc->subType == SIR_MAC_MGMT_ACTION)) {
2384  		struct ieee80211_frame *wh =
2385  			(struct ieee80211_frame *)qdf_nbuf_data(tx_frame);
2386  		if (!QDF_IS_ADDR_BROADCAST(wh->i_addr1) &&
2387  		    !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2388  			if (pFc->wep) {
2389  				uint8_t mic_len, hdr_len, pdev_id;
2390  
2391  				/* Allocate extra bytes for privacy header and
2392  				 * trailer
2393  				 */
2394  				if (iface->type == WMI_VDEV_TYPE_NDI &&
2395  				    (tx_flag & HAL_USE_PMF)) {
2396  					hdr_len = IEEE80211_CCMP_HEADERLEN;
2397  					mic_len = IEEE80211_CCMP_MICLEN;
2398  				} else {
2399  					pdev_id = wlan_objmgr_pdev_get_pdev_id(
2400  							wma_handle->pdev);
2401  					qdf_status = mlme_get_peer_mic_len(
2402  							wma_handle->psoc,
2403  							pdev_id, wh->i_addr1,
2404  							&mic_len, &hdr_len);
2405  
2406  					if (QDF_IS_STATUS_ERROR(qdf_status)) {
2407  						cds_packet_free(
2408  							(void *)tx_frame);
2409  						goto error;
2410  					}
2411  				}
2412  
2413  				newFrmLen = frmLen + hdr_len + mic_len;
2414  				qdf_status =
2415  					cds_packet_alloc((uint16_t) newFrmLen,
2416  							 (void **)&pFrame,
2417  							 (void **)&pPacket);
2418  
2419  				if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2420  					wma_alert("Failed to allocate %d bytes for RMF status code (%x)",
2421  						newFrmLen,
2422  						qdf_status);
2423  					/* Free the original packet memory */
2424  					cds_packet_free((void *)tx_frame);
2425  					goto error;
2426  				}
2427  
2428  				/*
2429  				 * Initialize the frame with 0's and only fill
2430  				 * MAC header and data, Keep the CCMP header and
2431  				 * trailer as 0's, firmware shall fill this
2432  				 */
2433  				qdf_mem_zero(pFrame, newFrmLen);
2434  				qdf_mem_copy(pFrame, wh, sizeof(*wh));
2435  				qdf_mem_copy(pFrame + sizeof(*wh) +
2436  					     hdr_len,
2437  					     pData + sizeof(*wh),
2438  					     frmLen - sizeof(*wh));
2439  
2440  				cds_packet_free((void *)tx_frame);
2441  				tx_frame = pPacket;
2442  				pData = pFrame;
2443  				frmLen = newFrmLen;
2444  				pFc = (tpSirMacFrameCtl)
2445  						(qdf_nbuf_data(tx_frame));
2446  			}
2447  		} else {
2448  			uint16_t mmie_size;
2449  			int32_t mgmtcipherset;
2450  
2451  			mgmtcipherset = wlan_crypto_get_param(iface->vdev,
2452  						WLAN_CRYPTO_PARAM_MGMT_CIPHER);
2453  			if (mgmtcipherset <= 0) {
2454  				wma_err("Invalid key cipher %d", mgmtcipherset);
2455  				cds_packet_free((void *)tx_frame);
2456  				return -EINVAL;
2457  			}
2458  
2459  			if (mgmtcipherset & (1 << WLAN_CRYPTO_CIPHER_AES_CMAC))
2460  				mmie_size = cds_get_mmie_size();
2461  			else
2462  				mmie_size = cds_get_gmac_mmie_size();
2463  
2464  			/* Allocate extra bytes for MMIE */
2465  			newFrmLen = frmLen + mmie_size;
2466  			qdf_status = cds_packet_alloc((uint16_t) newFrmLen,
2467  						      (void **)&pFrame,
2468  						      (void **)&pPacket);
2469  
2470  			if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2471  				wma_alert("Failed to allocate %d bytes for RMF status code (%x)",
2472  					newFrmLen,
2473  					qdf_status);
2474  				/* Free the original packet memory */
2475  				cds_packet_free((void *)tx_frame);
2476  				goto error;
2477  			}
2478  			/*
2479  			 * Initialize the frame with 0's and only fill
2480  			 * MAC header and data. MMIE field will be
2481  			 * filled by wlan_crypto_add_mmie API
2482  			 */
2483  			qdf_mem_zero(pFrame, newFrmLen);
2484  			qdf_mem_copy(pFrame, wh, sizeof(*wh));
2485  			qdf_mem_copy(pFrame + sizeof(*wh),
2486  				     pData + sizeof(*wh), frmLen - sizeof(*wh));
2487  
2488  			/* The API expect length without the mmie size */
2489  			if (!wlan_crypto_add_mmie(iface->vdev, pFrame,
2490  						  frmLen)) {
2491  				wma_alert("Failed to attach MMIE");
2492  				/* Free the original packet memory */
2493  				cds_packet_free((void *)tx_frame);
2494  				cds_packet_free((void *)pPacket);
2495  				goto error;
2496  			}
2497  			cds_packet_free((void *)tx_frame);
2498  			tx_frame = pPacket;
2499  			pData = pFrame;
2500  			frmLen = newFrmLen;
2501  			pFc = (tpSirMacFrameCtl) (qdf_nbuf_data(tx_frame));
2502  		}
2503  		/*
2504  		 * Some target which support sending mgmt frame based on htt
2505  		 * would DMA write this PMF tx frame buffer, it may cause smmu
2506  		 * check permission fault, set a flag to do bi-direction DMA
2507  		 * map, normal tx unmap is enough for this case.
2508  		 */
2509  		QDF_NBUF_CB_TX_DMA_BI_MAP((qdf_nbuf_t)tx_frame) = 1;
2510  	}
2511  	mHdr = (tpSirMacMgmtHdr)qdf_nbuf_data(tx_frame);
2512  	if ((frmType == TXRX_FRM_802_11_MGMT) &&
2513  	    (pFc->subType == SIR_MAC_MGMT_PROBE_RSP)) {
2514  		uint64_t adjusted_tsf_le;
2515  		struct ieee80211_frame *wh =
2516  			(struct ieee80211_frame *)qdf_nbuf_data(tx_frame);
2517  
2518  		/* Make the TSF offset negative to match TSF in beacons */
2519  		adjusted_tsf_le = cpu_to_le64(0ULL -
2520  					      wma_handle->interfaces[vdev_id].
2521  					      tsfadjust);
2522  		A_MEMCPY(&wh[1], &adjusted_tsf_le, sizeof(adjusted_tsf_le));
2523  	}
2524  	if (frmType == TXRX_FRM_802_11_DATA) {
2525  		qdf_nbuf_t ret;
2526  		qdf_nbuf_t skb = (qdf_nbuf_t) tx_frame;
2527  
2528  		struct wma_decap_info_t decap_info;
2529  		struct ieee80211_frame *wh =
2530  			(struct ieee80211_frame *)qdf_nbuf_data(skb);
2531  		unsigned long curr_timestamp = qdf_mc_timer_get_system_ticks();
2532  
2533  		/*
2534  		 * 1) TxRx Module expects data input to be 802.3 format
2535  		 * So Decapsulation has to be done.
2536  		 * 2) Only one Outstanding Data pending for Ack is allowed
2537  		 */
2538  		if (tx_frm_ota_comp_cb) {
2539  			if (wma_handle->umac_data_ota_ack_cb) {
2540  				/*
2541  				 * If last data frame was sent more than 2 secs
2542  				 * ago and still we didn't receive ack/nack from
2543  				 * fw then allow Tx of this data frame
2544  				 */
2545  				if (curr_timestamp >=
2546  				    wma_handle->last_umac_data_ota_timestamp +
2547  				    200) {
2548  					wma_err("No Tx Ack for last data frame for more than 2 secs, allow Tx of current data frame");
2549  				} else {
2550  					wma_err("Already one Data pending for Ack, reject Tx of data frame");
2551  					cds_packet_free((void *)tx_frame);
2552  					return QDF_STATUS_E_FAILURE;
2553  				}
2554  			}
2555  		} else {
2556  			/*
2557  			 * Data Frames are sent through TxRx Non Standard Data
2558  			 * path so Ack Complete Cb is must
2559  			 */
2560  			wma_err("No Ack Complete Cb. Don't Allow");
2561  			cds_packet_free((void *)tx_frame);
2562  			return QDF_STATUS_E_FAILURE;
2563  		}
2564  
2565  		/* Take out 802.11 header from skb */
2566  		decap_info.hdr_len = wma_ieee80211_hdrsize(wh);
2567  		qdf_mem_copy(decap_info.hdr, wh, decap_info.hdr_len);
2568  		qdf_nbuf_pull_head(skb, decap_info.hdr_len);
2569  
2570  		/*  Decapsulate to 802.3 format */
2571  		wma_decap_to_8023(skb, &decap_info);
2572  
2573  		/* Zero out skb's context buffer for the driver to use */
2574  		qdf_mem_zero(skb->cb, sizeof(skb->cb));
2575  
2576  		/* Terminate the (single-element) list of tx frames */
2577  		skb->next = NULL;
2578  
2579  		/* Store the Ack Complete Cb */
2580  		wma_handle->umac_data_ota_ack_cb = tx_frm_ota_comp_cb;
2581  
2582  		/* Store the timestamp and nbuf for this data Tx */
2583  		wma_handle->last_umac_data_ota_timestamp = curr_timestamp;
2584  		wma_handle->last_umac_data_nbuf = skb;
2585  
2586  		/* Send the Data frame to TxRx in Non Standard Path */
2587  		cdp_hl_tdls_flag_reset(soc,
2588  			vdev_id, tdls_flag);
2589  
2590  		ret = cdp_tx_non_std(soc,
2591  			vdev_id,
2592  			OL_TX_SPEC_NO_FREE, skb);
2593  
2594  		cdp_hl_tdls_flag_reset(soc,
2595  			vdev_id, false);
2596  
2597  		if (ret) {
2598  			wma_err("TxRx Rejected. Fail to do Tx");
2599  			/* Call Download Cb so that umac can free the buffer */
2600  			if (tx_frm_download_comp_cb)
2601  				tx_frm_download_comp_cb(wma_handle->mac_context,
2602  						tx_frame,
2603  						WMA_TX_FRAME_BUFFER_FREE);
2604  			wma_handle->umac_data_ota_ack_cb = NULL;
2605  			wma_handle->last_umac_data_nbuf = NULL;
2606  			return QDF_STATUS_E_FAILURE;
2607  		}
2608  
2609  		/* Call Download Callback if passed */
2610  		if (tx_frm_download_comp_cb)
2611  			tx_frm_download_comp_cb(wma_handle->mac_context,
2612  						tx_frame,
2613  						WMA_TX_FRAME_BUFFER_NO_FREE);
2614  
2615  		return QDF_STATUS_SUCCESS;
2616  	}
2617  
2618  	ctrl_pdev = cdp_get_ctrl_pdev_from_vdev(soc, vdev_id);
2619  	if (!ctrl_pdev) {
2620  		wma_err("ol_pdev_handle is NULL");
2621  		cds_packet_free((void *)tx_frame);
2622  		return QDF_STATUS_E_FAILURE;
2623  	}
2624  	is_high_latency = cdp_cfg_is_high_latency(soc, ctrl_pdev);
2625  	is_wmi_mgmt_tx = wmi_service_enabled(wma_handle->wmi_handle,
2626  					     wmi_service_mgmt_tx_wmi);
2627  
2628  	downld_comp_required = tx_frm_download_comp_cb && is_high_latency &&
2629  				(!is_wmi_mgmt_tx) && tx_frm_ota_comp_cb;
2630  
2631  	/* Fill the frame index to send */
2632  	if (pFc->type == SIR_MAC_MGMT_FRAME) {
2633  		if (tx_frm_ota_comp_cb) {
2634  			if (downld_comp_required)
2635  				tx_frm_index =
2636  					GENERIC_DOWNLD_COMP_ACK_COMP_INDEX;
2637  			else
2638  				tx_frm_index = GENERIC_NODOWLOAD_ACK_COMP_INDEX;
2639  
2640  		} else {
2641  			tx_frm_index =
2642  				GENERIC_NODOWNLD_NOACK_COMP_INDEX;
2643  		}
2644  
2645  	}
2646  
2647  	/*
2648  	 * If Download Complete is required
2649  	 * Wait for download complete
2650  	 */
2651  	if (downld_comp_required) {
2652  		/* Store Tx Comp Cb */
2653  		wma_handle->tx_frm_download_comp_cb = tx_frm_download_comp_cb;
2654  
2655  		/* Reset the Tx Frame Complete Event */
2656  		qdf_status = qdf_event_reset(
2657  				&wma_handle->tx_frm_download_comp_event);
2658  
2659  		if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2660  			wma_alert("Event Reset failed tx comp event %x",
2661  				 qdf_status);
2662  			cds_packet_free((void *)tx_frame);
2663  			goto error;
2664  		}
2665  	}
2666  
2667  	/* If the frame has to be sent at BD Rate2 inform TxRx */
2668  	if (tx_flag & HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME)
2669  		use_6mbps = 1;
2670  
2671  	if (pFc->subType == SIR_MAC_MGMT_PROBE_RSP) {
2672  		if (wma_is_vdev_in_ap_mode(wma_handle, vdev_id) &&
2673  		    wma_handle->interfaces[vdev_id].ch_freq)
2674  			chanfreq = wma_handle->interfaces[vdev_id].ch_freq;
2675  		else
2676  			chanfreq = channel_freq;
2677  		wma_debug("Probe response frame on channel %d vdev:%d",
2678  			 chanfreq, vdev_id);
2679  		if (wma_is_vdev_in_ap_mode(wma_handle, vdev_id) && !chanfreq)
2680  			wma_err("AP oper chan is zero");
2681  	} else if (pFc->subType == SIR_MAC_MGMT_ACTION ||
2682  			pFc->subType == SIR_MAC_MGMT_AUTH) {
2683  		chanfreq = channel_freq;
2684  	} else {
2685  		chanfreq = 0;
2686  	}
2687  
2688  	if (pFc->type == SIR_MAC_MGMT_FRAME) {
2689  		if (((mac->mlme_cfg->gen.debug_packet_log &
2690  		    DEBUG_PKTLOG_TYPE_MGMT) &&
2691  		    (pFc->subType != SIR_MAC_MGMT_PROBE_REQ) &&
2692  		    (pFc->subType != SIR_MAC_MGMT_PROBE_RSP) &&
2693  		    (pFc->subType != SIR_MAC_MGMT_ACTION)) ||
2694  		    ((mac->mlme_cfg->gen.debug_packet_log &
2695  		      DEBUG_PKTLOG_TYPE_ACTION) &&
2696  		     (pFc->subType == SIR_MAC_MGMT_ACTION)))
2697  			mgmt_txrx_frame_hex_dump(pData, frmLen, true);
2698  	}
2699  	if (wlan_reg_is_5ghz_ch_freq(wma_handle->interfaces[vdev_id].ch_freq))
2700  		is_5g = true;
2701  
2702  	wh = (struct ieee80211_frame *)(qdf_nbuf_data(tx_frame));
2703  
2704  	mlo_link_agnostic =
2705  		wlan_get_mlo_link_agnostic_flag(iface->vdev, wh->i_addr1);
2706  
2707  	mgmt_param.tx_frame = tx_frame;
2708  	mgmt_param.frm_len = frmLen;
2709  	mgmt_param.vdev_id = vdev_id;
2710  	mgmt_param.pdata = pData;
2711  	mgmt_param.chanfreq = chanfreq;
2712  	mgmt_param.qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
2713  	mgmt_param.use_6mbps = use_6mbps;
2714  	mgmt_param.tx_type = tx_frm_index;
2715  	mgmt_param.peer_rssi = peer_rssi;
2716  	if (wlan_vdev_mlme_get_opmode(iface->vdev) == QDF_STA_MODE &&
2717  	    wlan_vdev_mlme_is_mlo_vdev(iface->vdev) &&
2718  	    (wlan_vdev_mlme_is_active(iface->vdev) == QDF_STATUS_SUCCESS) &&
2719  	    frmType == TXRX_FRM_802_11_MGMT &&
2720  	    pFc->subType != SIR_MAC_MGMT_PROBE_REQ &&
2721  	    pFc->subType != SIR_MAC_MGMT_AUTH &&
2722  	    action != (ACTION_CATEGORY_PUBLIC << 8 | TDLS_DISCOVERY_RESPONSE) &&
2723  	    action != (ACTION_CATEGORY_BACK << 8 | ADDBA_RESPONSE) &&
2724  	    mlo_link_agnostic)
2725  		mgmt_param.mlo_link_agnostic = true;
2726  
2727  	if (tx_flag & HAL_USE_INCORRECT_KEY_PMF)
2728  		mgmt_param.tx_flags |= MGMT_TX_USE_INCORRECT_KEY;
2729  
2730  	/*
2731  	 * Update the tx_params TLV only for rates
2732  	 * other than 1Mbps and 6 Mbps
2733  	 */
2734  	if (rid < RATEID_DEFAULT &&
2735  	    (rid != RATEID_1MBPS && !(rid == RATEID_6MBPS && is_5g))) {
2736  		wma_debug("using rate id: %d for Tx", rid);
2737  		mgmt_param.tx_params_valid = true;
2738  		wma_update_tx_send_params(&mgmt_param.tx_param, rid);
2739  	}
2740  
2741  	psoc = wma_handle->psoc;
2742  	if (!psoc) {
2743  		wma_err("psoc ctx is NULL");
2744  		cds_packet_free((void *)tx_frame);
2745  		goto error;
2746  	}
2747  
2748  	if (!wma_handle->pdev) {
2749  		wma_err("pdev ctx is NULL");
2750  		cds_packet_free((void *)tx_frame);
2751  		goto error;
2752  	}
2753  
2754  	pdev_id = wlan_objmgr_pdev_get_pdev_id(wma_handle->pdev);
2755  	mac_addr = wh->i_addr1;
2756  	peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr, WLAN_MGMT_NB_ID);
2757  	if (!peer) {
2758  		mac_addr = wh->i_addr2;
2759  		peer = wlan_objmgr_get_peer(psoc, pdev_id, mac_addr,
2760  					WLAN_MGMT_NB_ID);
2761  		if (!peer) {
2762  			vdev = wlan_objmgr_get_vdev_by_id_from_psoc(psoc,
2763  								    vdev_id,
2764  								    WLAN_MGMT_NB_ID);
2765  			if (!vdev) {
2766  				wma_err("vdev is null");
2767  				cds_packet_free((void *)tx_frame);
2768  				goto error;
2769  			}
2770  			mld_addr = wlan_vdev_mlme_get_mldaddr(vdev);
2771  			wlan_objmgr_vdev_release_ref(vdev, WLAN_MGMT_NB_ID);
2772  			if (!mld_addr) {
2773  				wma_err("mld addr is null");
2774  				cds_packet_free((void *)tx_frame);
2775  				goto error;
2776  			}
2777  			wma_debug("mld mac addr " QDF_MAC_ADDR_FMT,
2778  				  QDF_MAC_ADDR_REF(mld_addr));
2779  			peer = wlan_objmgr_get_peer(psoc, pdev_id,
2780  						    mld_addr,
2781  						    WLAN_MGMT_NB_ID);
2782  			if (!peer) {
2783  				wma_err("peer is null");
2784  				cds_packet_free((void *)tx_frame);
2785  				goto error;
2786  			}
2787  		}
2788  	}
2789  
2790  	if (ucfg_pkt_capture_get_pktcap_mode(psoc) &
2791  	    PKT_CAPTURE_MODE_MGMT_ONLY) {
2792  		ucfg_pkt_capture_mgmt_tx(wma_handle->pdev,
2793  					 tx_frame,
2794  					 wma_handle->interfaces[vdev_id].ch_freq,
2795  					 mgmt_param.tx_param.preamble_type);
2796  	}
2797  
2798  	status = wlan_mgmt_txrx_mgmt_frame_tx(peer, wma_handle->mac_context,
2799  					      (qdf_nbuf_t)tx_frame, NULL,
2800  					      tx_frm_ota_comp_cb,
2801  					      WLAN_UMAC_COMP_MLME,
2802  					      &mgmt_param);
2803  
2804  	wlan_objmgr_peer_release_ref(peer, WLAN_MGMT_NB_ID);
2805  	if (status != QDF_STATUS_SUCCESS) {
2806  		wma_err_rl("mgmt tx failed");
2807  		qdf_nbuf_free((qdf_nbuf_t)tx_frame);
2808  		goto error;
2809  	}
2810  
2811  	/*
2812  	 * Failed to send Tx Mgmt Frame
2813  	 */
2814  	if (status) {
2815  	/* Call Download Cb so that umac can free the buffer */
2816  		uint32_t rem;
2817  
2818  		if (tx_frm_download_comp_cb)
2819  			tx_frm_download_comp_cb(wma_handle->mac_context,
2820  						tx_frame,
2821  						WMA_TX_FRAME_BUFFER_FREE);
2822  		rem = qdf_do_div_rem(wma_handle->tx_fail_cnt,
2823  				     MAX_PRINT_FAILURE_CNT);
2824  		if (!rem)
2825  			wma_err("Failed to send Mgmt Frame");
2826  		else
2827  			wma_debug("Failed to send Mgmt Frame");
2828  		wma_handle->tx_fail_cnt++;
2829  		goto error;
2830  	}
2831  
2832  	if (!tx_frm_download_comp_cb)
2833  		return QDF_STATUS_SUCCESS;
2834  
2835  	/*
2836  	 * Wait for Download Complete
2837  	 * if required
2838  	 */
2839  	if (downld_comp_required) {
2840  		/*
2841  		 * Wait for Download Complete
2842  		 * @ Integrated : Dxe Complete
2843  		 * @ Discrete : Target Download Complete
2844  		 */
2845  		qdf_status =
2846  			qdf_wait_for_event_completion(&wma_handle->
2847  					      tx_frm_download_comp_event,
2848  					      WMA_TX_FRAME_COMPLETE_TIMEOUT);
2849  
2850  		if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2851  			wma_nofl_alert("Wait Event failed txfrm_comp_event");
2852  			/*
2853  			 * @Integrated: Something Wrong with Dxe
2854  			 *   TODO: Some Debug Code
2855  			 * Here We need to trigger SSR since
2856  			 * since system went into a bad state where
2857  			 * we didn't get Download Complete for almost
2858  			 * WMA_TX_FRAME_COMPLETE_TIMEOUT (1 sec)
2859  			 */
2860  			/* display scheduler stats */
2861  			return cdp_display_stats(soc, CDP_SCHEDULER_STATS,
2862  						QDF_STATS_VERBOSITY_LEVEL_HIGH);
2863  		}
2864  	}
2865  
2866  	return QDF_STATUS_SUCCESS;
2867  
2868  error:
2869  	wma_handle->tx_frm_download_comp_cb = NULL;
2870  	wma_handle->umac_data_ota_ack_cb = NULL;
2871  	return QDF_STATUS_E_FAILURE;
2872  }
2873  
wma_ds_peek_rx_packet_info(cds_pkt_t * pkt,void ** pkt_meta)2874  QDF_STATUS wma_ds_peek_rx_packet_info(cds_pkt_t *pkt, void **pkt_meta)
2875  {
2876  	if (!pkt) {
2877  		wma_err("wma:Invalid parameter sent on wma_peek_rx_pkt_info");
2878  		return QDF_STATUS_E_FAULT;
2879  	}
2880  
2881  	*pkt_meta = &(pkt->pkt_meta);
2882  
2883  	return QDF_STATUS_SUCCESS;
2884  }
2885  
2886  #ifdef HL_RX_AGGREGATION_HOLE_DETECTION
ol_rx_aggregation_hole(uint32_t hole_info)2887  void ol_rx_aggregation_hole(uint32_t hole_info)
2888  {
2889  	struct sir_sme_rx_aggr_hole_ind *rx_aggr_hole_event;
2890  	uint32_t alloc_len;
2891  	cds_msg_t cds_msg = { 0 };
2892  	QDF_STATUS status;
2893  
2894  	alloc_len = sizeof(*rx_aggr_hole_event) +
2895  		sizeof(rx_aggr_hole_event->hole_info_array[0]);
2896  	rx_aggr_hole_event = qdf_mem_malloc(alloc_len);
2897  	if (!rx_aggr_hole_event)
2898  		return;
2899  
2900  	rx_aggr_hole_event->hole_cnt = 1;
2901  	rx_aggr_hole_event->hole_info_array[0] = hole_info;
2902  
2903  	cds_msg.type = eWNI_SME_RX_AGGR_HOLE_IND;
2904  	cds_msg.bodyptr = rx_aggr_hole_event;
2905  	cds_msg.bodyval = 0;
2906  
2907  	status = cds_mq_post_message(CDS_MQ_ID_SME, &cds_msg);
2908  	if (status != QDF_STATUS_SUCCESS) {
2909  		qdf_mem_free(rx_aggr_hole_event);
2910  		return;
2911  	}
2912  }
2913  #endif
2914  
2915  /**
2916   * ol_rx_err() - ol rx err handler
2917   * @pdev: ol pdev
2918   * @vdev_id: vdev id
2919   * @peer_mac_addr: peer mac address
2920   * @tid: TID
2921   * @tsf32: TSF
2922   * @err_type: error type
2923   * @rx_frame: rx frame
2924   * @pn: PN Number
2925   * @key_id: key id
2926   *
2927   * This function handles rx error and send MIC error failure to LIM
2928   *
2929   * Return: none
2930   */
2931  /*
2932   * Local prototype added to temporarily address warning caused by
2933   * -Wmissing-prototypes. A more correct solution will come later
2934   * as a solution to IR-196435 at which point this prototype will
2935   * be removed.
2936   */
2937  void ol_rx_err(void *pdev, uint8_t vdev_id,
2938  	       uint8_t *peer_mac_addr, int tid, uint32_t tsf32,
2939  	       enum ol_rx_err_type err_type, qdf_nbuf_t rx_frame,
2940  	       uint64_t *pn, uint8_t key_id);
ol_rx_err(void * pdev,uint8_t vdev_id,uint8_t * peer_mac_addr,int tid,uint32_t tsf32,enum ol_rx_err_type err_type,qdf_nbuf_t rx_frame,uint64_t * pn,uint8_t key_id)2941  void ol_rx_err(void *pdev, uint8_t vdev_id,
2942  	       uint8_t *peer_mac_addr, int tid, uint32_t tsf32,
2943  	       enum ol_rx_err_type err_type, qdf_nbuf_t rx_frame,
2944  	       uint64_t *pn, uint8_t key_id)
2945  {
2946  	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
2947  	struct mic_failure_ind *mic_err_ind;
2948  	qdf_ether_header_t *eth_hdr;
2949  	uint8_t *bssid;
2950  	struct scheduler_msg cds_msg = {0};
2951  
2952  	if (!wma)
2953  		return;
2954  
2955  	if (err_type != OL_RX_ERR_TKIP_MIC)
2956  		return;
2957  
2958  	if (qdf_nbuf_len(rx_frame) < sizeof(*eth_hdr))
2959  		return;
2960  	eth_hdr = (qdf_ether_header_t *)qdf_nbuf_data(rx_frame);
2961  	mic_err_ind = qdf_mem_malloc(sizeof(*mic_err_ind));
2962  	if (!mic_err_ind)
2963  		return;
2964  
2965  	mic_err_ind->messageType = eWNI_SME_MIC_FAILURE_IND;
2966  	mic_err_ind->length = sizeof(*mic_err_ind);
2967  	mic_err_ind->sessionId = vdev_id;
2968  	bssid = wma_get_vdev_bssid(wma->interfaces[vdev_id].vdev);
2969  	if (!bssid) {
2970  		wma_err("Failed to get bssid for vdev_%d", vdev_id);
2971  		qdf_mem_free((void *)mic_err_ind);
2972  		return;
2973  	}
2974  	qdf_copy_macaddr(&mic_err_ind->bssId,
2975  		     (struct qdf_mac_addr *)bssid);
2976  	qdf_mem_copy(mic_err_ind->info.taMacAddr,
2977  		     (struct qdf_mac_addr *) peer_mac_addr,
2978  			sizeof(tSirMacAddr));
2979  	qdf_mem_copy(mic_err_ind->info.srcMacAddr,
2980  		     (struct qdf_mac_addr *) eth_hdr->ether_shost,
2981  			sizeof(tSirMacAddr));
2982  	qdf_mem_copy(mic_err_ind->info.dstMacAddr,
2983  		     (struct qdf_mac_addr *) eth_hdr->ether_dhost,
2984  			sizeof(tSirMacAddr));
2985  	mic_err_ind->info.keyId = key_id;
2986  	mic_err_ind->info.multicast =
2987  		IEEE80211_IS_MULTICAST(eth_hdr->ether_dhost);
2988  	qdf_mem_copy(mic_err_ind->info.TSC, pn, SIR_CIPHER_SEQ_CTR_SIZE);
2989  
2990  	qdf_mem_zero(&cds_msg, sizeof(struct scheduler_msg));
2991  	cds_msg.type = eWNI_SME_MIC_FAILURE_IND;
2992  	cds_msg.bodyptr = (void *) mic_err_ind;
2993  
2994  	if (QDF_STATUS_SUCCESS !=
2995  		scheduler_post_message(QDF_MODULE_ID_TXRX,
2996  				       QDF_MODULE_ID_SME,
2997  				       QDF_MODULE_ID_SME,
2998  				       &cds_msg)) {
2999  		wma_err("could not post mic failure indication to SME");
3000  		qdf_mem_free((void *)mic_err_ind);
3001  	}
3002  }
3003  
wma_tx_abort(uint8_t vdev_id)3004  void wma_tx_abort(uint8_t vdev_id)
3005  {
3006  #define PEER_ALL_TID_BITMASK 0xffffffff
3007  	tp_wma_handle wma;
3008  	uint32_t peer_tid_bitmap = PEER_ALL_TID_BITMASK;
3009  	struct wma_txrx_node *iface;
3010  	uint8_t *bssid;
3011  	struct peer_flush_params param = {0};
3012  
3013  	wma = cds_get_context(QDF_MODULE_ID_WMA);
3014  	if (!wma)
3015  		return;
3016  
3017  	iface = &wma->interfaces[vdev_id];
3018  	if (!iface->vdev) {
3019  		wma_err("iface->vdev is NULL");
3020  		return;
3021  	}
3022  
3023  	bssid = wma_get_vdev_bssid(iface->vdev);
3024  	if (!bssid) {
3025  		wma_err("Failed to get bssid for vdev_%d", vdev_id);
3026  		return;
3027  	}
3028  
3029  	wma_debug("vdevid %d bssid "QDF_MAC_ADDR_FMT, vdev_id,
3030  		  QDF_MAC_ADDR_REF(bssid));
3031  	wma_vdev_set_pause_bit(vdev_id, PAUSE_TYPE_HOST);
3032  	cdp_fc_vdev_pause(cds_get_context(QDF_MODULE_ID_SOC), vdev_id,
3033  			  OL_TXQ_PAUSE_REASON_TX_ABORT, 0);
3034  
3035  	/* Flush all TIDs except MGMT TID for this peer in Target */
3036  	peer_tid_bitmap &= ~(0x1 << WMI_MGMT_TID);
3037  	param.peer_tid_bitmap = peer_tid_bitmap;
3038  	param.vdev_id = vdev_id;
3039  	wmi_unified_peer_flush_tids_send(wma->wmi_handle, bssid,
3040  					 &param);
3041  }
3042  
wma_delete_invalid_peer_entries(uint8_t vdev_id,uint8_t * peer_mac_addr)3043  void wma_delete_invalid_peer_entries(uint8_t vdev_id, uint8_t *peer_mac_addr)
3044  {
3045  	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
3046  	uint8_t i;
3047  	struct wma_txrx_node *iface;
3048  
3049  	if (!wma)
3050  		return;
3051  
3052  	iface = &wma->interfaces[vdev_id];
3053  
3054  	if (peer_mac_addr) {
3055  		for (i = 0; i < INVALID_PEER_MAX_NUM; i++) {
3056  			if (qdf_mem_cmp
3057  				      (iface->invalid_peers[i].rx_macaddr,
3058  				      peer_mac_addr,
3059  				      QDF_MAC_ADDR_SIZE) == 0) {
3060  				qdf_mem_zero(iface->invalid_peers[i].rx_macaddr,
3061  					     sizeof(QDF_MAC_ADDR_SIZE));
3062  				break;
3063  			}
3064  		}
3065  		if (i == INVALID_PEER_MAX_NUM)
3066  			wma_debug("peer_mac_addr "QDF_MAC_ADDR_FMT" is not found",
3067  				  QDF_MAC_ADDR_REF(peer_mac_addr));
3068  	} else {
3069  		qdf_mem_zero(iface->invalid_peers,
3070  			     sizeof(iface->invalid_peers));
3071  	}
3072  }
3073  
wma_rx_invalid_peer_ind(uint8_t vdev_id,void * wh)3074  uint8_t wma_rx_invalid_peer_ind(uint8_t vdev_id, void *wh)
3075  {
3076  	struct ol_rx_inv_peer_params *rx_inv_msg;
3077  	struct ieee80211_frame *wh_l = (struct ieee80211_frame *)wh;
3078  	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
3079  	uint8_t i, index;
3080  	bool invalid_peer_found = false;
3081  	struct wma_txrx_node *iface;
3082  
3083  	if (!wma)
3084  		return -EINVAL;
3085  
3086  	iface = &wma->interfaces[vdev_id];
3087  	rx_inv_msg = qdf_mem_malloc(sizeof(struct ol_rx_inv_peer_params));
3088  	if (!rx_inv_msg)
3089  		return -ENOMEM;
3090  
3091  	index = iface->invalid_peer_idx;
3092  	rx_inv_msg->vdev_id = vdev_id;
3093  	qdf_mem_copy(rx_inv_msg->ra, wh_l->i_addr1, QDF_MAC_ADDR_SIZE);
3094  	qdf_mem_copy(rx_inv_msg->ta, wh_l->i_addr2, QDF_MAC_ADDR_SIZE);
3095  
3096  
3097  	for (i = 0; i < INVALID_PEER_MAX_NUM; i++) {
3098  		if (qdf_mem_cmp
3099  			      (iface->invalid_peers[i].rx_macaddr,
3100  			      rx_inv_msg->ta,
3101  			      QDF_MAC_ADDR_SIZE) == 0) {
3102  			invalid_peer_found = true;
3103  			break;
3104  		}
3105  	}
3106  
3107  	if (!invalid_peer_found) {
3108  		qdf_mem_copy(iface->invalid_peers[index].rx_macaddr,
3109  			     rx_inv_msg->ta,
3110  			    QDF_MAC_ADDR_SIZE);
3111  
3112  		/* reset count if reached max */
3113  		iface->invalid_peer_idx =
3114  			(index + 1) % INVALID_PEER_MAX_NUM;
3115  
3116  		/* send deauth */
3117  		wma_debug("vdev_id: %d RA: "QDF_MAC_ADDR_FMT" TA: "QDF_MAC_ADDR_FMT,
3118  			  vdev_id, QDF_MAC_ADDR_REF(rx_inv_msg->ra),
3119  			  QDF_MAC_ADDR_REF(rx_inv_msg->ta));
3120  
3121  		wma_send_msg(wma,
3122  			     SIR_LIM_RX_INVALID_PEER,
3123  			     (void *)rx_inv_msg, 0);
3124  	} else {
3125  		wma_debug_rl("Ignore invalid peer indication as received more than once "
3126  			QDF_MAC_ADDR_FMT,
3127  			QDF_MAC_ADDR_REF(rx_inv_msg->ta));
3128  		qdf_mem_free(rx_inv_msg);
3129  	}
3130  
3131  	return 0;
3132  }
3133  
3134  static bool
wma_drop_delba(tp_wma_handle wma,uint8_t vdev_id,enum cdp_delba_rcode cdp_reason_code)3135  wma_drop_delba(tp_wma_handle wma, uint8_t vdev_id,
3136  	       enum cdp_delba_rcode cdp_reason_code)
3137  {
3138  	struct wlan_objmgr_vdev *vdev;
3139  	qdf_time_t last_ts, ts = qdf_mc_timer_get_system_time();
3140  	bool drop = false;
3141  
3142  	vdev = wlan_objmgr_get_vdev_by_id_from_psoc(wma->psoc, vdev_id,
3143  						    WLAN_MLME_CM_ID);
3144  	if (!vdev) {
3145  		wma_err("vdev is NULL");
3146  		return drop;
3147  	}
3148  	if (!wlan_mlme_is_ba_2k_jump_iot_ap(vdev))
3149  		goto done;
3150  
3151  	last_ts = wlan_mlme_get_last_delba_sent_time(vdev);
3152  	if ((last_ts && cdp_reason_code == CDP_DELBA_2K_JUMP) &&
3153  	    (ts - last_ts) < CDP_DELBA_INTERVAL_MS) {
3154  		wma_debug("Drop DELBA, last sent ts: %lu current ts: %lu",
3155  			  last_ts, ts);
3156  		drop = true;
3157  	}
3158  
3159  	wlan_mlme_set_last_delba_sent_time(vdev, ts);
3160  
3161  done:
3162  	wlan_objmgr_vdev_release_ref(vdev, WLAN_MLME_CM_ID);
3163  
3164  	return drop;
3165  }
3166  
wma_dp_send_delba_ind(uint8_t vdev_id,uint8_t * peer_macaddr,uint8_t tid,uint8_t reason_code,enum cdp_delba_rcode cdp_reason_code)3167  int wma_dp_send_delba_ind(uint8_t vdev_id, uint8_t *peer_macaddr,
3168  			  uint8_t tid, uint8_t reason_code,
3169  			  enum cdp_delba_rcode cdp_reason_code)
3170  {
3171  	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
3172  	struct lim_delba_req_info *req;
3173  
3174  	if (!wma || !peer_macaddr) {
3175  		wma_err("wma handle or mac addr is NULL");
3176  		return -EINVAL;
3177  	}
3178  
3179  	if (wma_drop_delba(wma, vdev_id, cdp_reason_code))
3180  		return 0;
3181  
3182  	req = qdf_mem_malloc(sizeof(*req));
3183  	if (!req)
3184  		return -ENOMEM;
3185  	req->vdev_id = vdev_id;
3186  	qdf_mem_copy(req->peer_macaddr, peer_macaddr, QDF_MAC_ADDR_SIZE);
3187  	req->tid = tid;
3188  	req->reason_code = reason_code;
3189  	wma_debug("req delba_ind vdev %d "QDF_MAC_ADDR_FMT" tid %d reason %d",
3190  		 vdev_id, QDF_MAC_ADDR_REF(peer_macaddr), tid, reason_code);
3191  	wma_send_msg_high_priority(wma, SIR_HAL_REQ_SEND_DELBA_REQ_IND,
3192  				   (void *)req, 0);
3193  
3194  	return 0;
3195  }
3196  
wma_is_roam_in_progress(uint32_t vdev_id)3197  bool wma_is_roam_in_progress(uint32_t vdev_id)
3198  {
3199  	tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
3200  	enum QDF_OPMODE opmode;
3201  
3202  	if (!wma_is_vdev_valid(vdev_id))
3203  		return false;
3204  
3205  	if (!wma || !wma->interfaces[vdev_id].vdev)
3206  		return false;
3207  
3208  	opmode = wlan_vdev_mlme_get_opmode(wma->interfaces[vdev_id].vdev);
3209  	if (opmode != QDF_STA_MODE && opmode != QDF_P2P_CLIENT_MODE)
3210  		return false;
3211  
3212  	return wlan_cm_is_vdev_roam_started(wma->interfaces[vdev_id].vdev);
3213  }
3214