1 /*
2  * Copyright (c) 2011-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*=== includes ===*/
21 /* header files for OS primitives */
22 #include <osdep.h>              /* uint32_t, etc. */
23 #include <qdf_mem.h>         /* qdf_mem_malloc,free */
24 #include <qdf_types.h>          /* qdf_device_t, qdf_print */
25 #include <qdf_lock.h>           /* qdf_spinlock */
26 #include <qdf_atomic.h>         /* qdf_atomic_read */
27 #include <qdf_debugfs.h>
28 
29 /* header files for utilities */
30 #include "queue.h"          /* TAILQ */
31 
32 /* header files for configuration API */
33 #include <ol_cfg.h>             /* ol_cfg_is_high_latency */
34 #include <ol_if_athvar.h>
35 
36 /* header files for HTT API */
37 #include <ol_htt_api.h>
38 #include <ol_htt_tx_api.h>
39 
40 /* header files for our own APIs */
41 #include <ol_txrx_api.h>
42 #include <ol_txrx_dbg.h>
43 #include <cdp_txrx_ocb.h>
44 #include <ol_txrx_ctrl_api.h>
45 #include <cdp_txrx_stats.h>
46 #include <ol_txrx_osif_api.h>
47 /* header files for our internal definitions */
48 #include <ol_txrx_internal.h>   /* TXRX_ASSERT, etc. */
49 #include <wdi_event.h>          /* WDI events */
50 #include <ol_tx.h>              /* ol_tx_ll */
51 #include <ol_rx.h>              /* ol_rx_deliver */
52 #include <ol_txrx_peer_find.h>  /* ol_txrx_peer_find_attach, etc. */
53 #include <ol_rx_pn.h>           /* ol_rx_pn_check, etc. */
54 #include <ol_rx_fwd.h>          /* ol_rx_fwd_check, etc. */
55 #include <ol_rx_reorder_timeout.h>      /* OL_RX_REORDER_TIMEOUT_INIT, etc. */
56 #include <ol_rx_reorder.h>
57 #include <ol_tx_send.h>         /* ol_tx_discard_target_frms */
58 #include <ol_tx_desc.h>         /* ol_tx_desc_frame_free */
59 #include <ol_tx_queue.h>
60 #include <ol_tx_sched.h>           /* ol_tx_sched_attach, etc. */
61 #include <ol_txrx.h>
62 #include <ol_txrx_types.h>
63 #include <ol_cfg.h>
64 #include <cdp_txrx_flow_ctrl_legacy.h>
65 #include <cdp_txrx_cmn_reg.h>
66 #include <cdp_txrx_bus.h>
67 #include <cdp_txrx_ipa.h>
68 #include <cdp_txrx_pmf.h>
69 #include "wma.h"
70 #include "hif.h"
71 #include "hif_main.h"
72 #include <cdp_txrx_peer_ops.h>
73 #ifndef REMOVE_PKT_LOG
74 #include "pktlog_ac.h"
75 #endif
76 #include <wlan_policy_mgr_api.h>
77 #include "epping_main.h"
78 #include <a_types.h>
79 #include <cdp_txrx_handle.h>
80 #include <cdp_txrx_cmn_reg.h>
81 #include "wlan_qct_sys.h"
82 
83 #include <htt_internal.h>
84 #include <ol_txrx_ipa.h>
85 #include "wlan_roam_debug.h"
86 #include "cfg_ucfg_api.h"
87 #ifdef DP_SUPPORT_RECOVERY_NOTIFY
88 #include <qdf_notifier.h>
89 #include <qdf_hang_event_notifier.h>
90 #endif
91 
92 #define DPT_DEBUGFS_PERMS	(QDF_FILE_USR_READ |	\
93 				QDF_FILE_USR_WRITE |	\
94 				QDF_FILE_GRP_READ |	\
95 				QDF_FILE_OTH_READ)
96 
97 #define DPT_DEBUGFS_NUMBER_BASE	10
98 /**
99  * enum dpt_set_param_debugfs - dpt set params
100  * @DPT_SET_PARAM_PROTO_BITMAP : set proto bitmap
101  * @DPT_SET_PARAM_NR_RECORDS: set num of records
102  * @DPT_SET_PARAM_VERBOSITY: set verbosity
103  */
104 enum dpt_set_param_debugfs {
105 	DPT_SET_PARAM_PROTO_BITMAP = 1,
106 	DPT_SET_PARAM_NR_RECORDS = 2,
107 	DPT_SET_PARAM_VERBOSITY = 3,
108 	DPT_SET_PARAM_NUM_RECORDS_TO_DUMP = 4,
109 	DPT_SET_PARAM_MAX,
110 };
111 
112 static void ol_vdev_rx_set_intrabss_fwd(struct cdp_soc_t *soc_hdl,
113 					uint8_t vdev_id, bool val);
114 uint32_t ol_txrx_get_tx_pending(struct cdp_pdev *pdev_handle);
115 extern void
116 ol_txrx_set_wmm_param(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
117 		      struct ol_tx_wmm_param_t wmm_param);
118 
119 /* thresh for peer's cached buf queue beyond which the elements are dropped */
120 #define OL_TXRX_CACHED_BUFQ_THRESH 128
121 
122 #ifdef DP_SUPPORT_RECOVERY_NOTIFY
123 static
ol_peer_recovery_notifier_cb(struct notifier_block * block,unsigned long state,void * data)124 int ol_peer_recovery_notifier_cb(struct notifier_block *block,
125 				 unsigned long state, void *data)
126 {
127 	struct qdf_notifer_data *notif_data = data;
128 	qdf_notif_block *notif_block;
129 	struct ol_txrx_peer_t *peer;
130 	struct peer_hang_data hang_data = {0};
131 	enum peer_debug_id_type dbg_id;
132 
133 	if (!data || !block)
134 		return -EINVAL;
135 
136 	notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
137 
138 	peer = notif_block->priv_data;
139 	if (!peer)
140 		return -EINVAL;
141 
142 	if (notif_data->offset + sizeof(struct peer_hang_data) >
143 			QDF_WLAN_HANG_FW_OFFSET)
144 		return NOTIFY_STOP_MASK;
145 
146 	QDF_HANG_EVT_SET_HDR(&hang_data.tlv_header,
147 			     HANG_EVT_TAG_DP_PEER_INFO,
148 			     QDF_HANG_GET_STRUCT_TLVLEN(struct peer_hang_data));
149 
150 	qdf_mem_copy(&hang_data.peer_mac_addr, &peer->mac_addr.raw,
151 		     QDF_MAC_ADDR_SIZE);
152 
153 	for (dbg_id = 0; dbg_id < PEER_DEBUG_ID_MAX; dbg_id++)
154 		if (qdf_atomic_read(&peer->access_list[dbg_id]))
155 			hang_data.peer_timeout_bitmask |= (1 << dbg_id);
156 
157 	qdf_mem_copy(notif_data->hang_data + notif_data->offset,
158 		     &hang_data, sizeof(struct peer_hang_data));
159 	notif_data->offset += sizeof(struct peer_hang_data);
160 
161 	return 0;
162 }
163 
164 static qdf_notif_block ol_peer_recovery_notifier = {
165 	.notif_block.notifier_call = ol_peer_recovery_notifier_cb,
166 };
167 
168 static
ol_register_peer_recovery_notifier(struct ol_txrx_peer_t * peer)169 QDF_STATUS ol_register_peer_recovery_notifier(struct ol_txrx_peer_t *peer)
170 {
171 	ol_peer_recovery_notifier.priv_data = peer;
172 
173 	return qdf_hang_event_register_notifier(&ol_peer_recovery_notifier);
174 }
175 
176 static
ol_unregister_peer_recovery_notifier(void)177 QDF_STATUS ol_unregister_peer_recovery_notifier(void)
178 {
179 	return qdf_hang_event_unregister_notifier(&ol_peer_recovery_notifier);
180 }
181 #else
182 static inline
ol_register_peer_recovery_notifier(struct ol_txrx_peer_t * peer)183 QDF_STATUS ol_register_peer_recovery_notifier(struct ol_txrx_peer_t *peer)
184 {
185 	return QDF_STATUS_SUCCESS;
186 }
187 
188 static
ol_unregister_peer_recovery_notifier(void)189 QDF_STATUS ol_unregister_peer_recovery_notifier(void)
190 {
191 	return QDF_STATUS_SUCCESS;
192 }
193 #endif
194 
195 /**
196  * ol_tx_mark_first_wakeup_packet() - set flag to indicate that
197  *    fw is compatible for marking first packet after wow wakeup
198  * @soc_hdl: Datapath soc handle
199  * @pdev_id: id of data path pdev handle
200  * @value: 1 for enabled/ 0 for disabled
201  *
202  * Return: None
203  */
ol_tx_mark_first_wakeup_packet(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint8_t value)204 static void ol_tx_mark_first_wakeup_packet(struct cdp_soc_t *soc_hdl,
205 					   uint8_t pdev_id, uint8_t value)
206 {
207 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
208 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
209 
210 	if (!pdev) {
211 		ol_txrx_err("pdev is NULL");
212 		return;
213 	}
214 
215 	htt_mark_first_wakeup_packet(pdev->htt_pdev, value);
216 }
217 
218 /**
219  * ol_tx_set_is_mgmt_over_wmi_enabled() - set flag to indicate that mgmt over
220  *                                        wmi is enabled or not.
221  * @value: 1 for enabled/ 0 for disable
222  *
223  * Return: None
224  */
ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)225 void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)
226 {
227 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
228 	ol_txrx_pdev_handle pdev;
229 
230 	if (qdf_unlikely(!soc))
231 		return;
232 
233 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
234 	if (!pdev) {
235 		ol_txrx_err("pdev is NULL");
236 		return;
237 	}
238 
239 	pdev->is_mgmt_over_wmi_enabled = value;
240 }
241 
242 /**
243  * ol_tx_get_is_mgmt_over_wmi_enabled() - get value of is_mgmt_over_wmi_enabled
244  *
245  * Return: is_mgmt_over_wmi_enabled
246  */
ol_tx_get_is_mgmt_over_wmi_enabled(void)247 uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void)
248 {
249 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
250 	ol_txrx_pdev_handle pdev;
251 
252 	if (qdf_unlikely(!soc))
253 		return 0;
254 
255 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
256 	if (!pdev) {
257 		ol_txrx_err("pdev is NULL");
258 		return 0;
259 	}
260 
261 	return pdev->is_mgmt_over_wmi_enabled;
262 }
263 
264 
265 #ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
266 static void *
ol_txrx_find_peer_by_addr_and_vdev(struct cdp_pdev * ppdev,struct cdp_vdev * pvdev,uint8_t * peer_addr)267 ol_txrx_find_peer_by_addr_and_vdev(struct cdp_pdev *ppdev,
268 	struct cdp_vdev *pvdev, uint8_t *peer_addr)
269 {
270 	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
271 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
272 	struct ol_txrx_peer_t *peer;
273 
274 	peer = ol_txrx_peer_vdev_find_hash(pdev, vdev, peer_addr, 0, 1);
275 	if (!peer)
276 		return NULL;
277 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
278 	return peer;
279 }
280 
281 /**
282  * ol_txrx_get_vdevid() - Get virtual interface id which peer registered
283  * @soc_hdl - data path soc handle
284  * @peer_mac - peer mac address
285  * @vdev_id - virtual interface id which peer registered
286  *
287  * Get virtual interface id which peer registered
288  *
289  * Return: QDF_STATUS_SUCCESS registration success
290  *         QDF_STATUS_E_NOSUPPORT not support this feature
291  */
ol_txrx_get_vdevid(struct cdp_soc_t * soc_hdl,uint8_t * peer_mac,uint8_t * vdev_id)292 static QDF_STATUS ol_txrx_get_vdevid(struct cdp_soc_t *soc_hdl,
293 				     uint8_t *peer_mac, uint8_t *vdev_id)
294 {
295 	uint8_t pdev_id = OL_TXRX_PDEV_ID;
296 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
297 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
298 	struct ol_txrx_peer_t *peer =
299 		ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
300 						    PEER_DEBUG_ID_OL_INTERNAL);
301 
302 	if (!peer) {
303 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
304 			  "peer argument is null!!");
305 		return QDF_STATUS_E_FAILURE;
306 	}
307 
308 	*vdev_id = peer->vdev->vdev_id;
309 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
310 
311 	return QDF_STATUS_SUCCESS;
312 }
313 
314 ol_txrx_vdev_handle
ol_txrx_get_vdev_by_peer_addr(struct cdp_pdev * ppdev,struct qdf_mac_addr peer_addr)315 ol_txrx_get_vdev_by_peer_addr(struct cdp_pdev *ppdev,
316 			      struct qdf_mac_addr peer_addr)
317 {
318 	struct ol_txrx_pdev_t *pdev = cdp_pdev_to_ol_txrx_pdev_t(ppdev);
319 	struct ol_txrx_peer_t *peer = NULL;
320 	ol_txrx_vdev_handle vdev;
321 
322 	if (!pdev) {
323 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
324 			  "PDEV not found for peer_addr: " QDF_MAC_ADDR_FMT,
325 			  QDF_MAC_ADDR_REF(peer_addr.bytes));
326 		return NULL;
327 	}
328 
329 	peer = ol_txrx_peer_get_ref_by_addr(pdev, peer_addr.bytes,
330 					    PEER_DEBUG_ID_OL_INTERNAL);
331 
332 	if (!peer) {
333 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
334 			  "Peer not found for peer_addr:" QDF_MAC_ADDR_FMT,
335 			  QDF_MAC_ADDR_REF(peer_addr.bytes));
336 		return NULL;
337 	}
338 
339 	vdev = peer->vdev;
340 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
341 
342 	return vdev;
343 }
344 
345 /**
346  * ol_txrx_wrapper_get_vdev_by_peer_addr() - Get vdev handle by peer mac address
347  * @ppdev - data path device instance
348  * @peer_addr - peer mac address
349  *
350  * Get virtual interface handle by local peer mac address
351  *
352  * Return: Virtual interface instance handle
353  *         NULL in case cannot find
354  */
355 static struct cdp_vdev *
ol_txrx_wrapper_get_vdev_by_peer_addr(struct cdp_pdev * ppdev,struct qdf_mac_addr peer_addr)356 ol_txrx_wrapper_get_vdev_by_peer_addr(struct cdp_pdev *ppdev,
357 				      struct qdf_mac_addr peer_addr)
358 {
359 	return (struct cdp_vdev *)ol_txrx_get_vdev_by_peer_addr(ppdev,
360 								peer_addr);
361 }
362 
363 /*
364  * ol_txrx_find_peer_exist - find peer if already exists
365  * @soc_hdl: datapath soc handle
366  * @pdev_id: physical device instance id
367  * @peer_mac_addr: peer mac address
368  *
369  * Return: true or false
370  */
ol_txrx_find_peer_exist(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint8_t * peer_addr)371 static bool ol_txrx_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
372 				    uint8_t *peer_addr)
373 {
374 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
375 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
376 
377 	if (!pdev)
378 		return false;
379 
380 	return !!ol_txrx_find_peer_by_addr(ol_txrx_pdev_t_to_cdp_pdev(pdev),
381 					   peer_addr);
382 }
383 
384 /*
385  * ol_txrx_find_peer_exist_on_vdev - find if duplicate peer exists
386  * on the given vdev
387  * @soc_hdl: datapath soc handle
388  * @vdev_id: vdev instance id
389  * @peer_mac_addr: peer mac address
390  *
391  * Return: true or false
392  */
ol_txrx_find_peer_exist_on_vdev(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_addr)393 static bool ol_txrx_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
394 					    uint8_t vdev_id,
395 					    uint8_t *peer_addr)
396 {
397 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
398 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
399 								     vdev_id);
400 
401 	if (!vdev)
402 		return false;
403 
404 	return !!ol_txrx_find_peer_by_addr_and_vdev(
405 					ol_txrx_pdev_t_to_cdp_pdev(vdev->pdev),
406 					ol_txrx_vdev_t_to_cdp_vdev(vdev),
407 					peer_addr);
408 }
409 
410 /*
411  * ol_txrx_find_peer_exist_on_other_vdev - find if duplicate peer exists
412  * on other than the given vdev
413  * @soc_hdl: datapath soc handle
414  * @vdev_id: vdev instance id
415  * @peer_mac_addr: peer mac address
416  * @max_bssid: max number of bssids
417  *
418  * Return: true or false
419  */
ol_txrx_find_peer_exist_on_other_vdev(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_addr,uint16_t max_bssid)420 static bool ol_txrx_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
421 						  uint8_t vdev_id,
422 						  uint8_t *peer_addr,
423 						  uint16_t max_bssid)
424 {
425 	int i;
426 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
427 	struct ol_txrx_vdev_t *vdev;
428 
429 	for (i = 0; i < max_bssid; i++) {
430 		vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc, i);
431 		/* Need to check vdevs other than the vdev_id */
432 		if (vdev_id == i || !vdev)
433 			continue;
434 		if (ol_txrx_find_peer_by_addr_and_vdev(
435 					ol_txrx_pdev_t_to_cdp_pdev(vdev->pdev),
436 					ol_txrx_vdev_t_to_cdp_vdev(vdev),
437 					peer_addr)) {
438 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
439 				  "%s: Duplicate peer "QDF_MAC_ADDR_FMT" already exist on vdev %d",
440 				  __func__, QDF_MAC_ADDR_REF(peer_addr), i);
441 			return true;
442 		}
443 	}
444 
445 	return false;
446 }
447 
448 /**
449  * ol_txrx_find_peer_by_addr() - find peer via peer mac addr and peer_id
450  * @ppdev: pointer of type cdp_pdev
451  * @peer_addr: peer mac addr
452  *
453  * This function finds a peer with given mac address and returns its peer_id.
454  * Note that this function does not increment the peer->ref_cnt.
455  * This means that the peer may be deleted in some other parallel context after
456  * its been found.
457  *
458  * Return: peer handle if peer is found, NULL if peer is not found.
459  */
ol_txrx_find_peer_by_addr(struct cdp_pdev * ppdev,uint8_t * peer_addr)460 void *ol_txrx_find_peer_by_addr(struct cdp_pdev *ppdev,
461 				uint8_t *peer_addr)
462 {
463 	struct ol_txrx_peer_t *peer;
464 	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
465 
466 	peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
467 						   PEER_DEBUG_ID_OL_INTERNAL);
468 	if (!peer)
469 		return NULL;
470 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
471 	return peer;
472 }
473 
474 /**
475  * ol_txrx_peer_get_ref_by_addr() - get peer ref via peer mac addr and peer_id
476  * @pdev: pointer of type ol_txrx_pdev_handle
477  * @peer_addr: peer mac addr
478  *
479  * This function finds the peer with given mac address and returns its peer_id.
480  * Note that this function increments the peer->ref_cnt.
481  * This makes sure that peer will be valid. This also means the caller needs to
482  * call the corresponding API - ol_txrx_peer_release_ref to delete the peer
483  * reference.
484  * Sample usage:
485  *    {
486  *      //the API call below increments the peer->ref_cnt
487  *      peer = ol_txrx_peer_get_ref_by_addr(pdev, peer_addr, peer_id, dbg_id);
488  *
489  *      // Once peer usage is done
490  *
491  *      //the API call below decrements the peer->ref_cnt
492  *       ol_txrx_peer_release_ref(peer, dbg_id);
493  *    }
494  *
495  * Return: peer handle if the peer is found, NULL if peer is not found.
496  */
ol_txrx_peer_get_ref_by_addr(ol_txrx_pdev_handle pdev,u8 * peer_addr,enum peer_debug_id_type dbg_id)497 ol_txrx_peer_handle ol_txrx_peer_get_ref_by_addr(ol_txrx_pdev_handle pdev,
498 						 u8 *peer_addr,
499 						 enum peer_debug_id_type dbg_id)
500 {
501 	struct ol_txrx_peer_t *peer;
502 
503 	peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
504 						   dbg_id);
505 	if (!peer)
506 		return NULL;
507 
508 	return peer;
509 }
510 
511 /**
512  * @brief Find a txrx peer handle from a peer's local ID
513  * @param pdev - the data physical device object
514  * @param local_peer_id - the ID txrx assigned locally to the peer in question
515  * @dbg_id - debug_id to track caller
516  * @return handle to the txrx peer object
517  * @details
518  *  The control SW typically uses the txrx peer handle to refer to the peer.
519  *  In unusual circumstances, if it is infeasible for the control SW maintain
520  *  the txrx peer handle but it can maintain a small integer local peer ID,
521  *  this function allows the peer handled to be retrieved, based on the local
522  *  peer ID.
523  *
524  * Note that this function increments the peer->ref_cnt.
525  * This makes sure that peer will be valid. This also means the caller needs to
526  * call the corresponding API -
527  *          ol_txrx_peer_release_ref
528  *
529  * reference.
530  * Sample usage:
531  *    {
532  *      //the API call below increments the peer->ref_cnt
533  *      peer = ol_txrx_peer_get_ref_by_local_id(pdev,local_peer_id, dbg_id);
534  *
535  *      // Once peer usage is done
536  *
537  *      //the API call below decrements the peer->ref_cnt
538  *      ol_txrx_peer_release_ref(peer, dbg_id);
539  *    }
540  *
541  * Return: peer handle if the peer is found, NULL if peer is not found.
542  */
543 ol_txrx_peer_handle
ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev * ppdev,uint8_t local_peer_id,enum peer_debug_id_type dbg_id)544 ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev *ppdev,
545 			      uint8_t local_peer_id,
546 			      enum peer_debug_id_type dbg_id)
547 {
548 	struct ol_txrx_peer_t *peer = NULL;
549 	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
550 
551 	if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
552 	    (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
553 		return NULL;
554 	}
555 
556 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
557 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
558 	peer = pdev->local_peer_ids.map[local_peer_id];
559 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
560 	if (peer && peer->valid)
561 		ol_txrx_peer_get_ref(peer, dbg_id);
562 	else
563 		peer = NULL;
564 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
565 
566 	return peer;
567 }
568 
ol_txrx_local_peer_id_pool_init(struct ol_txrx_pdev_t * pdev)569 static void ol_txrx_local_peer_id_pool_init(struct ol_txrx_pdev_t *pdev)
570 {
571 	int i;
572 
573 	/* point the freelist to the first ID */
574 	pdev->local_peer_ids.freelist = 0;
575 
576 	/* link each ID to the next one */
577 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
578 		pdev->local_peer_ids.pool[i] = i + 1;
579 		pdev->local_peer_ids.map[i] = NULL;
580 	}
581 
582 	/* link the last ID to itself, to mark the end of the list */
583 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
584 	pdev->local_peer_ids.pool[i] = i;
585 
586 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
587 }
588 
589 static void
ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)590 ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t *pdev,
591 			    struct ol_txrx_peer_t *peer)
592 {
593 	int i;
594 
595 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
596 	i = pdev->local_peer_ids.freelist;
597 	if (pdev->local_peer_ids.pool[i] == i) {
598 		/* the list is empty, except for the list-end marker */
599 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
600 	} else {
601 		/* take the head ID and advance the freelist */
602 		peer->local_id = i;
603 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
604 		pdev->local_peer_ids.map[i] = peer;
605 	}
606 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
607 }
608 
609 static void
ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)610 ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t *pdev,
611 			   struct ol_txrx_peer_t *peer)
612 {
613 	int i = peer->local_id;
614 
615 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
616 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
617 		return;
618 	}
619 	/* put this ID on the head of the freelist */
620 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
621 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
622 	pdev->local_peer_ids.freelist = i;
623 	pdev->local_peer_ids.map[i] = NULL;
624 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
625 }
626 
ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t * pdev)627 static void ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t *pdev)
628 {
629 	qdf_spinlock_destroy(&pdev->local_peer_ids.lock);
630 }
631 
632 #else
633 #define ol_txrx_local_peer_id_pool_init(pdev)   /* no-op */
634 #define ol_txrx_local_peer_id_alloc(pdev, peer) /* no-op */
635 #define ol_txrx_local_peer_id_free(pdev, peer)  /* no-op */
636 #define ol_txrx_local_peer_id_cleanup(pdev)     /* no-op */
637 #endif
638 
639 #if defined(CONFIG_DP_TRACE) && defined(WLAN_DEBUGFS)
640 /**
641  * ol_txrx_read_dpt_buff_debugfs() - read dp trace buffer
642  * @file: file to read
643  * @arg: pdev object
644  *
645  * Return: QDF_STATUS
646  */
ol_txrx_read_dpt_buff_debugfs(qdf_debugfs_file_t file,void * arg)647 static QDF_STATUS ol_txrx_read_dpt_buff_debugfs(qdf_debugfs_file_t file,
648 						void *arg)
649 {
650 	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)arg;
651 	uint32_t i = 0;
652 	QDF_STATUS status = QDF_STATUS_SUCCESS;
653 
654 	if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID)
655 		return QDF_STATUS_E_INVAL;
656 	else if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE) {
657 		pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
658 		return QDF_STATUS_SUCCESS;
659 	}
660 
661 	i = qdf_dpt_get_curr_pos_debugfs(file, pdev->state);
662 	status =  qdf_dpt_dump_stats_debugfs(file, i);
663 	if (status == QDF_STATUS_E_FAILURE)
664 		pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_IN_PROGRESS;
665 	else if (status == QDF_STATUS_SUCCESS)
666 		pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE;
667 
668 	return status;
669 }
670 
671 /**
672  * ol_txrx_conv_str_to_int_debugfs() - convert string to int
673  * @buf: buffer containing string
674  * @len: buffer len
675  * @proto_bitmap: defines the protocol to be tracked
676  * @nr_records: defines the nth packet which is traced
677  * @verbosity: defines the verbosity level
678  *
679  * This function expects char buffer to be null terminated.
680  * Otherwise results could be unexpected values.
681  *
682  * Return: 0 on success
683  */
ol_txrx_conv_str_to_int_debugfs(char * buf,qdf_size_t len,int * proto_bitmap,int * nr_records,int * verbosity,int * num_records_to_dump)684 static int ol_txrx_conv_str_to_int_debugfs(char *buf, qdf_size_t len,
685 					   int *proto_bitmap,
686 					   int *nr_records,
687 					   int *verbosity,
688 					   int *num_records_to_dump)
689 {
690 	int num_value = DPT_SET_PARAM_PROTO_BITMAP;
691 	int ret, param_value = 0;
692 	char *buf_param = buf;
693 	int i;
694 
695 	for (i = 1; i < DPT_SET_PARAM_MAX; i++) {
696 		/* Loop till you reach space as kstrtoint operates till
697 		 * null character. Replace space with null character
698 		 * to read each value.
699 		 * terminate the loop either at null terminated char or
700 		 * len is 0.
701 		 */
702 		while (*buf && len) {
703 			if (*buf == ' ') {
704 				*buf = '\0';
705 				buf++;
706 				len--;
707 				break;
708 			}
709 			buf++;
710 			len--;
711 		}
712 		/* get the parameter */
713 		ret = qdf_kstrtoint(buf_param,
714 				    DPT_DEBUGFS_NUMBER_BASE,
715 				    &param_value);
716 		if (ret) {
717 			QDF_TRACE(QDF_MODULE_ID_TXRX,
718 				  QDF_TRACE_LEVEL_ERROR,
719 				  "%s: Error while parsing buffer. ret %d",
720 				  __func__, ret);
721 			return ret;
722 		}
723 		switch (num_value) {
724 		case DPT_SET_PARAM_PROTO_BITMAP:
725 			*proto_bitmap = param_value;
726 			break;
727 		case DPT_SET_PARAM_NR_RECORDS:
728 			*nr_records = param_value;
729 			break;
730 		case DPT_SET_PARAM_VERBOSITY:
731 			*verbosity = param_value;
732 			break;
733 		case DPT_SET_PARAM_NUM_RECORDS_TO_DUMP:
734 			if (param_value > MAX_QDF_DP_TRACE_RECORDS)
735 				param_value = MAX_QDF_DP_TRACE_RECORDS;
736 			*num_records_to_dump = param_value;
737 			break;
738 		default:
739 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
740 				  "%s %d: :Set command needs exactly 4 arguments in format <proto_bitmap> <number of record> <Verbosity> <number of records to dump>.",
741 				__func__, __LINE__);
742 			break;
743 		}
744 		num_value++;
745 		/*buf_param should now point to the next param value. */
746 		buf_param = buf;
747 	}
748 
749 	/* buf is not yet NULL implies more than 4 params are passed. */
750 	if (*buf) {
751 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
752 			  "%s %d: :Set command needs exactly 4 arguments in format <proto_bitmap> <number of record> <Verbosity> <number of records to dump>.",
753 			__func__, __LINE__);
754 		return -EINVAL;
755 	}
756 	return 0;
757 }
758 
759 /**
760  * ol_txrx_write_dpt_buff_debugfs() - set dp trace parameters
761  * @priv: pdev object
762  * @buf: buff to get value for dpt parameters
763  * @len: buf length
764  *
765  * Return: QDF_STATUS
766  */
ol_txrx_write_dpt_buff_debugfs(void * priv,const char * buf,qdf_size_t len)767 static QDF_STATUS ol_txrx_write_dpt_buff_debugfs(void *priv,
768 					      const char *buf,
769 					      qdf_size_t len)
770 {
771 	int ret;
772 	int proto_bitmap = 0;
773 	int nr_records = 0;
774 	int verbosity = 0;
775 	int num_records_to_dump = 0;
776 	char *buf1 = NULL;
777 
778 	if (!buf || !len) {
779 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
780 			  "%s: null buffer or len. len %u",
781 				__func__, (uint8_t)len);
782 		return QDF_STATUS_E_FAULT;
783 	}
784 
785 	buf1 = (char *)qdf_mem_malloc(len);
786 	if (!buf1)
787 		return QDF_STATUS_E_FAULT;
788 
789 	qdf_mem_copy(buf1, buf, len);
790 	ret = ol_txrx_conv_str_to_int_debugfs(buf1, len, &proto_bitmap,
791 					      &nr_records, &verbosity,
792 					      &num_records_to_dump);
793 	if (ret) {
794 		qdf_mem_free(buf1);
795 		return QDF_STATUS_E_INVAL;
796 	}
797 
798 	qdf_dpt_set_value_debugfs(proto_bitmap, nr_records, verbosity,
799 				  num_records_to_dump);
800 	qdf_mem_free(buf1);
801 	return QDF_STATUS_SUCCESS;
802 }
803 
ol_txrx_debugfs_init(struct ol_txrx_pdev_t * pdev)804 static int ol_txrx_debugfs_init(struct ol_txrx_pdev_t *pdev)
805 {
806 	pdev->dpt_debugfs_fops.show = ol_txrx_read_dpt_buff_debugfs;
807 	pdev->dpt_debugfs_fops.write = ol_txrx_write_dpt_buff_debugfs;
808 	pdev->dpt_debugfs_fops.priv = pdev;
809 
810 	pdev->dpt_stats_log_dir = qdf_debugfs_create_dir("dpt_stats", NULL);
811 
812 	if (!pdev->dpt_stats_log_dir) {
813 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
814 				"%s: error while creating debugfs dir for %s",
815 				__func__, "dpt_stats");
816 		pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
817 		return -EBUSY;
818 	}
819 
820 	if (!qdf_debugfs_create_file("dump_set_dpt_logs", DPT_DEBUGFS_PERMS,
821 				     pdev->dpt_stats_log_dir,
822 				     &pdev->dpt_debugfs_fops)) {
823 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
824 				"%s: debug Entry creation failed!",
825 				__func__);
826 		pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
827 		return -EBUSY;
828 	}
829 
830 	pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
831 	return 0;
832 }
833 
ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)834 static void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
835 {
836 	qdf_debugfs_remove_dir_recursive(pdev->dpt_stats_log_dir);
837 }
838 #else
ol_txrx_debugfs_init(struct ol_txrx_pdev_t * pdev)839 static inline int ol_txrx_debugfs_init(struct ol_txrx_pdev_t *pdev)
840 {
841 	return 0;
842 }
843 
ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)844 static inline void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
845 {
846 }
847 #endif
848 
849 /**
850  * ol_txrx_pdev_attach() - allocate txrx pdev
851  * @soc_hdl: datapath soc handle
852  * @htc_pdev: HTC pdev
853  * @osdev: os dev
854  * @pdev_id: pdev identifier for pdev attach
855  *
856  * Return: QDF_STATUS_SUCCESS on success
857  *		QDF error code for failure
858  */
859 static QDF_STATUS
ol_txrx_pdev_attach(ol_txrx_soc_handle soc,struct cdp_pdev_attach_params * params)860 ol_txrx_pdev_attach(ol_txrx_soc_handle soc,
861 		    struct cdp_pdev_attach_params *params)
862 {
863 	struct ol_txrx_soc_t *ol_soc = cdp_soc_t_to_ol_txrx_soc_t(soc);
864 	struct ol_txrx_pdev_t *pdev;
865 	struct cdp_cfg *cfg_pdev = cds_get_context(QDF_MODULE_ID_CFG);
866 	QDF_STATUS status;
867 	int i, tid;
868 
869 	if (params->pdev_id == OL_TXRX_INVALID_PDEV_ID)
870 		return QDF_STATUS_E_INVAL;
871 
872 	pdev = qdf_mem_malloc(sizeof(*pdev));
873 	if (!pdev) {
874 		status = QDF_STATUS_E_NOMEM;
875 		goto fail0;
876 	}
877 
878 	/* init LL/HL cfg here */
879 	pdev->cfg.is_high_latency = ol_cfg_is_high_latency(cfg_pdev);
880 	/*
881 	 * Credit reporting through HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
882 	 * enabled or not.
883 	 */
884 	pdev->cfg.credit_update_enabled =
885 		ol_cfg_is_credit_update_enabled(cfg_pdev);
886 
887 	/* Explicitly request TX Completions from FW */
888 	pdev->cfg.request_tx_comp = cds_is_ptp_rx_opt_enabled() ||
889 		cds_is_packet_log_enabled();
890 
891 	pdev->cfg.default_tx_comp_req = !ol_cfg_tx_free_at_download(cfg_pdev);
892 
893 	/* store provided params */
894 	pdev->ctrl_pdev = cfg_pdev;
895 	pdev->osdev = params->qdf_osdev;
896 	pdev->id = params->pdev_id;
897 	pdev->soc = ol_soc;
898 	ol_soc->pdev_list[params->pdev_id] = pdev;
899 
900 	for (i = 0; i < htt_num_sec_types; i++)
901 		pdev->sec_types[i] = (enum ol_sec_type)i;
902 
903 	TXRX_STATS_INIT(pdev);
904 	ol_txrx_tso_stats_init(pdev);
905 	ol_txrx_fw_stats_desc_pool_init(pdev, FW_STATS_DESC_POOL_SIZE);
906 
907 	TAILQ_INIT(&pdev->vdev_list);
908 
909 	TAILQ_INIT(&pdev->inactive_peer_list);
910 
911 	TAILQ_INIT(&pdev->req_list);
912 	pdev->req_list_depth = 0;
913 	qdf_spinlock_create(&pdev->req_list_spinlock);
914 	qdf_spinlock_create(&pdev->tx_mutex);
915 
916 	/* do initial set up of the peer ID -> peer object lookup map */
917 	if (ol_txrx_peer_find_attach(pdev)) {
918 		status = QDF_STATUS_E_FAILURE;
919 		goto fail1;
920 	}
921 
922 	/* initialize the counter of the target's tx buffer availability */
923 	qdf_atomic_init(&pdev->target_tx_credit);
924 	qdf_atomic_init(&pdev->orig_target_tx_credit);
925 	qdf_atomic_init(&pdev->pad_reserve_tx_credit);
926 	qdf_atomic_add(1, &pdev->pad_reserve_tx_credit);
927 
928 	if (ol_cfg_is_high_latency(cfg_pdev)) {
929 		qdf_spinlock_create(&pdev->tx_queue_spinlock);
930 		pdev->tx_sched.scheduler = ol_tx_sched_attach(pdev);
931 		if (!pdev->tx_sched.scheduler) {
932 			status = QDF_STATUS_E_FAILURE;
933 			goto fail2;
934 		}
935 	}
936 	ol_txrx_pdev_txq_log_init(pdev);
937 	ol_txrx_pdev_grp_stats_init(pdev);
938 
939 	pdev->htt_pdev =
940 		htt_pdev_alloc(pdev, cfg_pdev,
941 			       params->htc_handle, params->qdf_osdev);
942 	if (!pdev->htt_pdev) {
943 		status = QDF_STATUS_E_FAILURE;
944 		goto fail3;
945 	}
946 
947 	htt_register_rx_pkt_dump_callback(pdev->htt_pdev,
948 			ol_rx_pkt_dump_call);
949 
950 	/*
951 	 * Init the tid --> category table.
952 	 * Regular tids (0-15) map to their AC.
953 	 * Extension tids get their own categories.
954 	 */
955 	for (tid = 0; tid < OL_TX_NUM_QOS_TIDS; tid++) {
956 		int ac = TXRX_TID_TO_WMM_AC(tid);
957 
958 		pdev->tid_to_ac[tid] = ac;
959 	}
960 	pdev->tid_to_ac[OL_TX_NON_QOS_TID] =
961 		OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA;
962 	pdev->tid_to_ac[OL_TX_MGMT_TID] =
963 		OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT;
964 	pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST] =
965 		OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA;
966 	pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT] =
967 		OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT;
968 
969 	if (ol_cfg_is_flow_steering_enabled(pdev->ctrl_pdev))
970 		pdev->peer_id_unmap_ref_cnt =
971 			TXRX_RFS_ENABLE_PEER_ID_UNMAP_COUNT;
972 	else
973 		pdev->peer_id_unmap_ref_cnt =
974 			TXRX_RFS_DISABLE_PEER_ID_UNMAP_COUNT;
975 
976 	if (cds_get_conparam() == QDF_GLOBAL_MONITOR_MODE)
977 		pdev->chan_noise_floor = NORMALIZED_TO_NOISE_FLOOR;
978 
979 	ol_txrx_debugfs_init(pdev);
980 
981 	return QDF_STATUS_SUCCESS;
982 
983 fail3:
984 	ol_txrx_peer_find_detach(pdev);
985 
986 fail2:
987 	if (ol_cfg_is_high_latency(cfg_pdev))
988 		qdf_spinlock_destroy(&pdev->tx_queue_spinlock);
989 
990 fail1:
991 	qdf_spinlock_destroy(&pdev->req_list_spinlock);
992 	qdf_spinlock_destroy(&pdev->tx_mutex);
993 	ol_txrx_tso_stats_deinit(pdev);
994 	ol_txrx_fw_stats_desc_pool_deinit(pdev);
995 	qdf_mem_free(pdev);
996 
997 fail0:
998 	return status;
999 }
1000 
1001 #if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
1002 /**
1003  * htt_pkt_log_init() - API to initialize packet log
1004  * @soc_hdl: Datapath soc handle
1005  * @pdev_id: id of data path pdev handle
1006  * @scn: HIF context
1007  *
1008  * Return: void
1009  */
htt_pkt_log_init(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,void * scn)1010 void htt_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
1011 {
1012 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1013 	ol_txrx_pdev_handle handle =
1014 				ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1015 
1016 	if (handle->pkt_log_init) {
1017 		ol_txrx_err("pktlog already initialized");
1018 		return;
1019 	}
1020 
1021 	if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
1022 			!QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
1023 		pktlog_sethandle(&handle->pl_dev, scn);
1024 		pktlog_set_pdev_id(handle->pl_dev, pdev_id);
1025 		pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
1026 		if (pktlogmod_init(scn))
1027 			qdf_print(" pktlogmod_init failed");
1028 		else
1029 			handle->pkt_log_init = true;
1030 	} else {
1031 		ol_txrx_err("Invalid conn mode: %d", cds_get_conparam());
1032 	}
1033 }
1034 
1035 /**
1036  * htt_pktlogmod_exit() - API to cleanup pktlog info
1037  * @handle: Pdev handle
1038  * @scn: HIF Context
1039  *
1040  * Return: void
1041  */
htt_pktlogmod_exit(struct ol_txrx_pdev_t * handle)1042 static void htt_pktlogmod_exit(struct ol_txrx_pdev_t *handle)
1043 {
1044 	if (!handle->pkt_log_init) {
1045 		ol_txrx_err("pktlog is not initialized");
1046 		return;
1047 	}
1048 
1049 	if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
1050 		!QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
1051 		pktlogmod_exit(handle);
1052 		handle->pkt_log_init = false;
1053 	} else {
1054 		ol_txrx_err("Invalid conn mode: %d", cds_get_conparam());
1055 	}
1056 }
1057 
1058 #else
htt_pkt_log_init(struct cdp_soc_t * soc_hdl,uint8_t pdev,void * scn)1059 void htt_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev, void *scn) { }
htt_pktlogmod_exit(ol_txrx_pdev_handle handle)1060 static void htt_pktlogmod_exit(ol_txrx_pdev_handle handle)  { }
1061 #endif
1062 
1063 #ifdef QCA_LL_PDEV_TX_FLOW_CONTROL
1064 /**
1065  * ol_txrx_pdev_set_threshold() - set pdev pool stop/start threshold
1066  * @pdev: txrx pdev
1067  *
1068  * Return: void
1069  */
ol_txrx_pdev_set_threshold(struct ol_txrx_pdev_t * pdev)1070 static void ol_txrx_pdev_set_threshold(struct ol_txrx_pdev_t *pdev)
1071 {
1072 	uint32_t stop_threshold;
1073 	uint32_t start_threshold;
1074 	uint16_t desc_pool_size = pdev->tx_desc.pool_size;
1075 
1076 	stop_threshold = ol_cfg_get_tx_flow_stop_queue_th(pdev->ctrl_pdev);
1077 	start_threshold = stop_threshold +
1078 		ol_cfg_get_tx_flow_start_queue_offset(pdev->ctrl_pdev);
1079 	pdev->tx_desc.start_th = (start_threshold * desc_pool_size) / 100;
1080 	pdev->tx_desc.stop_th = (stop_threshold * desc_pool_size) / 100;
1081 	pdev->tx_desc.stop_priority_th =
1082 		(TX_PRIORITY_TH * pdev->tx_desc.stop_th) / 100;
1083 	if (pdev->tx_desc.stop_priority_th >= MAX_TSO_SEGMENT_DESC)
1084 		pdev->tx_desc.stop_priority_th -= MAX_TSO_SEGMENT_DESC;
1085 
1086 	pdev->tx_desc.start_priority_th =
1087 		(TX_PRIORITY_TH * pdev->tx_desc.start_th) / 100;
1088 	if (pdev->tx_desc.start_priority_th >= MAX_TSO_SEGMENT_DESC)
1089 		pdev->tx_desc.start_priority_th -= MAX_TSO_SEGMENT_DESC;
1090 	pdev->tx_desc.status = FLOW_POOL_ACTIVE_UNPAUSED;
1091 }
1092 #else
ol_txrx_pdev_set_threshold(struct ol_txrx_pdev_t * pdev)1093 static inline void ol_txrx_pdev_set_threshold(struct ol_txrx_pdev_t *pdev)
1094 {
1095 }
1096 #endif
1097 
1098 /**
1099  * ol_txrx_pdev_post_attach() - attach txrx pdev
1100  * @soc_hdl: datapath soc handle
1101  * @pdev_id: physical device instance id
1102  *
1103  * Return: 0 for success
1104  */
1105 int
ol_txrx_pdev_post_attach(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)1106 ol_txrx_pdev_post_attach(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1107 {
1108 	uint16_t i;
1109 	uint16_t fail_idx = 0;
1110 	int ret = 0;
1111 	uint16_t desc_pool_size;
1112 	struct hif_opaque_softc *osc =  cds_get_context(QDF_MODULE_ID_HIF);
1113 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1114 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1115 
1116 	uint16_t desc_element_size = sizeof(union ol_tx_desc_list_elem_t);
1117 	union ol_tx_desc_list_elem_t *c_element;
1118 	unsigned int sig_bit;
1119 	uint16_t desc_per_page;
1120 
1121 	if (!osc || !pdev) {
1122 		ret = -EINVAL;
1123 		goto ol_attach_fail;
1124 	}
1125 
1126 	/*
1127 	 * For LL, limit the number of host's tx descriptors to match
1128 	 * the number of target FW tx descriptors.
1129 	 * This simplifies the FW, by ensuring the host will never
1130 	 * download more tx descriptors than the target has space for.
1131 	 * The FW will drop/free low-priority tx descriptors when it
1132 	 * starts to run low, so that in theory the host should never
1133 	 * run out of tx descriptors.
1134 	 */
1135 
1136 	/*
1137 	 * LL - initialize the target credit ourselves.
1138 	 * HL - wait for a HTT target credit initialization
1139 	 * during htt_attach.
1140 	 */
1141 	desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
1142 	ol_tx_init_pdev(pdev);
1143 
1144 	ol_tx_desc_dup_detect_init(pdev, desc_pool_size);
1145 
1146 	ol_tx_setup_fastpath_ce_handles(osc, pdev);
1147 
1148 	if ((ol_txrx_get_new_htt_msg_format(pdev)))
1149 		ol_set_cfg_new_htt_format(pdev->ctrl_pdev, true);
1150 	else
1151 		ol_set_cfg_new_htt_format(pdev->ctrl_pdev, false);
1152 
1153 	ret = htt_attach(pdev->htt_pdev, desc_pool_size);
1154 	if (ret)
1155 		goto htt_attach_fail;
1156 
1157 	/* Attach micro controller data path offload resource */
1158 	if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev)) {
1159 		ret = htt_ipa_uc_attach(pdev->htt_pdev);
1160 		if (ret)
1161 			goto uc_attach_fail;
1162 	}
1163 
1164 	/* Calculate single element reserved size power of 2 */
1165 	pdev->tx_desc.desc_reserved_size = qdf_get_pwr2(desc_element_size);
1166 	qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_desc.desc_pages,
1167 		pdev->tx_desc.desc_reserved_size, desc_pool_size, 0, true);
1168 	if ((0 == pdev->tx_desc.desc_pages.num_pages) ||
1169 		(!pdev->tx_desc.desc_pages.cacheable_pages)) {
1170 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1171 			"Page alloc fail");
1172 		ret = -ENOMEM;
1173 		goto page_alloc_fail;
1174 	}
1175 	desc_per_page = pdev->tx_desc.desc_pages.num_element_per_page;
1176 	pdev->tx_desc.offset_filter = desc_per_page - 1;
1177 	/* Calculate page divider to find page number */
1178 	sig_bit = 0;
1179 	while (desc_per_page) {
1180 		sig_bit++;
1181 		desc_per_page = desc_per_page >> 1;
1182 	}
1183 	pdev->tx_desc.page_divider = (sig_bit - 1);
1184 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1185 		"page_divider 0x%x, offset_filter 0x%x num elem %d, ol desc num page %d, ol desc per page %d",
1186 		pdev->tx_desc.page_divider, pdev->tx_desc.offset_filter,
1187 		desc_pool_size, pdev->tx_desc.desc_pages.num_pages,
1188 		pdev->tx_desc.desc_pages.num_element_per_page);
1189 
1190 	/*
1191 	 * Each SW tx desc (used only within the tx datapath SW) has a
1192 	 * matching HTT tx desc (used for downloading tx meta-data to FW/HW).
1193 	 * Go ahead and allocate the HTT tx desc and link it with the SW tx
1194 	 * desc now, to avoid doing it during time-critical transmit.
1195 	 */
1196 	pdev->tx_desc.pool_size = desc_pool_size;
1197 	pdev->tx_desc.freelist =
1198 		(union ol_tx_desc_list_elem_t *)
1199 		(*pdev->tx_desc.desc_pages.cacheable_pages);
1200 	c_element = pdev->tx_desc.freelist;
1201 	for (i = 0; i < desc_pool_size; i++) {
1202 		void *htt_tx_desc;
1203 		void *htt_frag_desc = NULL;
1204 		qdf_dma_addr_t frag_paddr = 0;
1205 		qdf_dma_addr_t paddr;
1206 
1207 		if (i == (desc_pool_size - 1))
1208 			c_element->next = NULL;
1209 		else
1210 			c_element->next = (union ol_tx_desc_list_elem_t *)
1211 				ol_tx_desc_find(pdev, i + 1);
1212 
1213 		htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr, i);
1214 		if (!htt_tx_desc) {
1215 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
1216 				  "%s: failed to alloc HTT tx desc (%d of %d)",
1217 				__func__, i, desc_pool_size);
1218 			fail_idx = i;
1219 			ret = -ENOMEM;
1220 			goto desc_alloc_fail;
1221 		}
1222 
1223 		c_element->tx_desc.htt_tx_desc = htt_tx_desc;
1224 		c_element->tx_desc.htt_tx_desc_paddr = paddr;
1225 		ret = htt_tx_frag_alloc(pdev->htt_pdev,
1226 					i, &frag_paddr, &htt_frag_desc);
1227 		if (ret) {
1228 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1229 				"%s: failed to alloc HTT frag dsc (%d/%d)",
1230 				__func__, i, desc_pool_size);
1231 			/* Is there a leak here, is this handling correct? */
1232 			fail_idx = i;
1233 			goto desc_alloc_fail;
1234 		}
1235 		if (!ret && htt_frag_desc) {
1236 			/*
1237 			 * Initialize the first 6 words (TSO flags)
1238 			 * of the frag descriptor
1239 			 */
1240 			memset(htt_frag_desc, 0, 6 * sizeof(uint32_t));
1241 			c_element->tx_desc.htt_frag_desc = htt_frag_desc;
1242 			c_element->tx_desc.htt_frag_desc_paddr = frag_paddr;
1243 		}
1244 #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
1245 		c_element->tx_desc.pkt_type = 0xff;
1246 #ifdef QCA_COMPUTE_TX_DELAY
1247 		c_element->tx_desc.entry_timestamp_ticks =
1248 			0xffffffff;
1249 #endif
1250 #endif
1251 		c_element->tx_desc.id = i;
1252 		qdf_atomic_init(&c_element->tx_desc.ref_cnt);
1253 		c_element = c_element->next;
1254 		fail_idx = i;
1255 	}
1256 
1257 	/* link SW tx descs into a freelist */
1258 	pdev->tx_desc.num_free = desc_pool_size;
1259 	ol_txrx_dbg("first tx_desc:0x%pK Last tx desc:0x%pK",
1260 		    (uint32_t *)pdev->tx_desc.freelist,
1261 		    (uint32_t *)(pdev->tx_desc.freelist + desc_pool_size));
1262 
1263 	ol_txrx_pdev_set_threshold(pdev);
1264 
1265 	/* check what format of frames are expected to be delivered by the OS */
1266 	pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
1267 	if (pdev->frame_format == wlan_frm_fmt_native_wifi)
1268 		pdev->htt_pkt_type = htt_pkt_type_native_wifi;
1269 	else if (pdev->frame_format == wlan_frm_fmt_802_3) {
1270 		if (ol_cfg_is_ce_classify_enabled(pdev->ctrl_pdev))
1271 			pdev->htt_pkt_type = htt_pkt_type_eth2;
1272 		else
1273 			pdev->htt_pkt_type = htt_pkt_type_ethernet;
1274 	} else {
1275 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1276 			  "%s Invalid standard frame type: %d",
1277 			  __func__, pdev->frame_format);
1278 		ret = -EINVAL;
1279 		goto control_init_fail;
1280 	}
1281 
1282 	/* setup the global rx defrag waitlist */
1283 	TAILQ_INIT(&pdev->rx.defrag.waitlist);
1284 
1285 	/* configure where defrag timeout and duplicate detection is handled */
1286 	pdev->rx.flags.defrag_timeout_check =
1287 		pdev->rx.flags.dup_check =
1288 		ol_cfg_rx_host_defrag_timeout_duplicate_check(pdev->ctrl_pdev);
1289 
1290 #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1291 	/* Need to revisit this part. Currently,hardcode to riva's caps */
1292 	pdev->target_tx_tran_caps = wlan_frm_tran_cap_raw;
1293 	pdev->target_rx_tran_caps = wlan_frm_tran_cap_raw;
1294 	/*
1295 	 * The Riva HW de-aggregate doesn't have capability to generate 802.11
1296 	 * header for non-first subframe of A-MSDU.
1297 	 */
1298 	pdev->sw_subfrm_hdr_recovery_enable = 1;
1299 	/*
1300 	 * The Riva HW doesn't have the capability to set Protected Frame bit
1301 	 * in the MAC header for encrypted data frame.
1302 	 */
1303 	pdev->sw_pf_proc_enable = 1;
1304 
1305 	if (pdev->frame_format == wlan_frm_fmt_802_3) {
1306 		/*
1307 		 * sw llc process is only needed in
1308 		 * 802.3 to 802.11 transform case
1309 		 */
1310 		pdev->sw_tx_llc_proc_enable = 1;
1311 		pdev->sw_rx_llc_proc_enable = 1;
1312 	} else {
1313 		pdev->sw_tx_llc_proc_enable = 0;
1314 		pdev->sw_rx_llc_proc_enable = 0;
1315 	}
1316 
1317 	switch (pdev->frame_format) {
1318 	case wlan_frm_fmt_raw:
1319 		pdev->sw_tx_encap =
1320 			pdev->target_tx_tran_caps & wlan_frm_tran_cap_raw
1321 			? 0 : 1;
1322 		pdev->sw_rx_decap =
1323 			pdev->target_rx_tran_caps & wlan_frm_tran_cap_raw
1324 			? 0 : 1;
1325 		break;
1326 	case wlan_frm_fmt_native_wifi:
1327 		pdev->sw_tx_encap =
1328 			pdev->
1329 			target_tx_tran_caps & wlan_frm_tran_cap_native_wifi
1330 			? 0 : 1;
1331 		pdev->sw_rx_decap =
1332 			pdev->
1333 			target_rx_tran_caps & wlan_frm_tran_cap_native_wifi
1334 			? 0 : 1;
1335 		break;
1336 	case wlan_frm_fmt_802_3:
1337 		pdev->sw_tx_encap =
1338 			pdev->target_tx_tran_caps & wlan_frm_tran_cap_8023
1339 			? 0 : 1;
1340 		pdev->sw_rx_decap =
1341 			pdev->target_rx_tran_caps & wlan_frm_tran_cap_8023
1342 			? 0 : 1;
1343 		break;
1344 	default:
1345 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1346 			  "Invalid std frame type; [en/de]cap: f:%x t:%x r:%x",
1347 			  pdev->frame_format,
1348 			  pdev->target_tx_tran_caps, pdev->target_rx_tran_caps);
1349 		ret = -EINVAL;
1350 		goto control_init_fail;
1351 	}
1352 #endif
1353 
1354 	/*
1355 	 * Determine what rx processing steps are done within the host.
1356 	 * Possibilities:
1357 	 * 1.  Nothing - rx->tx forwarding and rx PN entirely within target.
1358 	 *     (This is unlikely; even if the target is doing rx->tx forwarding,
1359 	 *     the host should be doing rx->tx forwarding too, as a back up for
1360 	 *     the target's rx->tx forwarding, in case the target runs short on
1361 	 *     memory, and can't store rx->tx frames that are waiting for
1362 	 *     missing prior rx frames to arrive.)
1363 	 * 2.  Just rx -> tx forwarding.
1364 	 *     This is the typical configuration for HL, and a likely
1365 	 *     configuration for LL STA or small APs (e.g. retail APs).
1366 	 * 3.  Both PN check and rx -> tx forwarding.
1367 	 *     This is the typical configuration for large LL APs.
1368 	 * Host-side PN check without rx->tx forwarding is not a valid
1369 	 * configuration, since the PN check needs to be done prior to
1370 	 * the rx->tx forwarding.
1371 	 */
1372 	if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
1373 		/*
1374 		 * PN check, rx-tx forwarding and rx reorder is done by
1375 		 * the target
1376 		 */
1377 		if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev))
1378 			pdev->rx_opt_proc = ol_rx_in_order_deliver;
1379 		else
1380 			pdev->rx_opt_proc = ol_rx_fwd_check;
1381 	} else {
1382 		if (ol_cfg_rx_pn_check(pdev->ctrl_pdev)) {
1383 			if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) {
1384 				/*
1385 				 * PN check done on host,
1386 				 * rx->tx forwarding not done at all.
1387 				 */
1388 				pdev->rx_opt_proc = ol_rx_pn_check_only;
1389 			} else if (ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1390 				/*
1391 				 * Both PN check and rx->tx forwarding done
1392 				 * on host.
1393 				 */
1394 				pdev->rx_opt_proc = ol_rx_pn_check;
1395 			} else {
1396 #define TRACESTR01 "invalid config: if rx PN check is on the host,"\
1397 "rx->tx forwarding check needs to also be on the host"
1398 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1399 					  QDF_TRACE_LEVEL_ERROR,
1400 					  "%s: %s", __func__, TRACESTR01);
1401 #undef TRACESTR01
1402 				ret = -EINVAL;
1403 				goto control_init_fail;
1404 			}
1405 		} else {
1406 			/* PN check done on target */
1407 			if ((!ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) &&
1408 			    ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1409 				/*
1410 				 * rx->tx forwarding done on host (possibly as
1411 				 * back-up for target-side primary rx->tx
1412 				 * forwarding)
1413 				 */
1414 				pdev->rx_opt_proc = ol_rx_fwd_check;
1415 			} else {
1416 				/*
1417 				 * rx->tx forwarding either done in target,
1418 				 * or not done at all
1419 				 */
1420 				pdev->rx_opt_proc = ol_rx_deliver;
1421 			}
1422 		}
1423 	}
1424 
1425 	/* initialize mutexes for tx desc alloc and peer lookup */
1426 	qdf_spinlock_create(&pdev->peer_ref_mutex);
1427 	qdf_spinlock_create(&pdev->rx.mutex);
1428 	qdf_spinlock_create(&pdev->last_real_peer_mutex);
1429 	qdf_spinlock_create(&pdev->peer_map_unmap_lock);
1430 	OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
1431 
1432 	if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK) {
1433 		ret = -ENOMEM;
1434 		goto reorder_trace_attach_fail;
1435 	}
1436 
1437 	if (OL_RX_PN_TRACE_ATTACH(pdev) != A_OK) {
1438 		ret = -ENOMEM;
1439 		goto pn_trace_attach_fail;
1440 	}
1441 
1442 	/*
1443 	 * WDI event attach
1444 	 */
1445 	wdi_event_attach(pdev);
1446 
1447 	/*
1448 	 * Initialize rx PN check characteristics for different security types.
1449 	 */
1450 	qdf_mem_zero(&pdev->rx_pn[0], sizeof(pdev->rx_pn));
1451 
1452 	/* TKIP: 48-bit TSC, CCMP: 48-bit PN */
1453 	pdev->rx_pn[htt_sec_type_tkip].len =
1454 		pdev->rx_pn[htt_sec_type_tkip_nomic].len =
1455 			pdev->rx_pn[htt_sec_type_aes_ccmp].len = 48;
1456 
1457 	pdev->rx_pn[htt_sec_type_aes_ccmp_256].len =
1458 		pdev->rx_pn[htt_sec_type_aes_gcmp].len =
1459 			pdev->rx_pn[htt_sec_type_aes_gcmp_256].len = 48;
1460 
1461 	pdev->rx_pn[htt_sec_type_tkip].cmp =
1462 		pdev->rx_pn[htt_sec_type_tkip_nomic].cmp =
1463 			pdev->rx_pn[htt_sec_type_aes_ccmp].cmp = ol_rx_pn_cmp48;
1464 
1465 	pdev->rx_pn[htt_sec_type_aes_ccmp_256].cmp =
1466 		pdev->rx_pn[htt_sec_type_aes_gcmp].cmp =
1467 		    pdev->rx_pn[htt_sec_type_aes_gcmp_256].cmp = ol_rx_pn_cmp48;
1468 
1469 	/* WAPI: 128-bit PN */
1470 	pdev->rx_pn[htt_sec_type_wapi].len = 128;
1471 	pdev->rx_pn[htt_sec_type_wapi].cmp = ol_rx_pn_wapi_cmp;
1472 
1473 	OL_RX_REORDER_TIMEOUT_INIT(pdev);
1474 
1475 	ol_txrx_dbg("Created pdev %pK", pdev);
1476 
1477 	pdev->cfg.host_addba = ol_cfg_host_addba(pdev->ctrl_pdev);
1478 
1479 #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
1480 #define OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT 3
1481 
1482 /* #if 1 -- TODO: clean this up */
1483 #define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT	\
1484 	/* avg = 100% * new + 0% * old */ \
1485 	(1 << OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT)
1486 /*
1487  * #else
1488  * #define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT
1489  *	//avg = 25% * new + 25% * old
1490  *	(1 << (OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT-2))
1491  * #endif
1492  */
1493 	pdev->rssi_update_shift = OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT;
1494 	pdev->rssi_new_weight = OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT;
1495 #endif
1496 
1497 	ol_txrx_local_peer_id_pool_init(pdev);
1498 
1499 	pdev->cfg.ll_pause_txq_limit =
1500 		ol_tx_cfg_max_tx_queue_depth_ll(pdev->ctrl_pdev);
1501 
1502 	/* TX flow control for peer who is in very bad link status */
1503 	ol_tx_badpeer_flow_cl_init(pdev);
1504 
1505 #ifdef QCA_COMPUTE_TX_DELAY
1506 	qdf_mem_zero(&pdev->tx_delay, sizeof(pdev->tx_delay));
1507 	qdf_spinlock_create(&pdev->tx_delay.mutex);
1508 
1509 	/* initialize compute interval with 5 seconds (ESE default) */
1510 	pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(5000);
1511 	{
1512 		uint32_t bin_width_1000ticks;
1513 
1514 		bin_width_1000ticks =
1515 			qdf_system_msecs_to_ticks
1516 				(QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS
1517 				 * 1000);
1518 		/*
1519 		 * Compute a factor and shift that together are equal to the
1520 		 * inverse of the bin_width time, so that rather than dividing
1521 		 * by the bin width time, approximately the same result can be
1522 		 * obtained much more efficiently by a multiply + shift.
1523 		 * multiply_factor >> shift = 1 / bin_width_time, so
1524 		 * multiply_factor = (1 << shift) / bin_width_time.
1525 		 *
1526 		 * Pick the shift semi-arbitrarily.
1527 		 * If we knew statically what the bin_width would be, we could
1528 		 * choose a shift that minimizes the error.
1529 		 * Since the bin_width is determined dynamically, simply use a
1530 		 * shift that is about half of the uint32_t size.  This should
1531 		 * result in a relatively large multiplier value, which
1532 		 * minimizes error from rounding the multiplier to an integer.
1533 		 * The rounding error only becomes significant if the tick units
1534 		 * are on the order of 1 microsecond.  In most systems, it is
1535 		 * expected that the tick units will be relatively low-res,
1536 		 * on the order of 1 millisecond.  In such systems the rounding
1537 		 * error is negligible.
1538 		 * It would be more accurate to dynamically try out different
1539 		 * shifts and choose the one that results in the smallest
1540 		 * rounding error, but that extra level of fidelity is
1541 		 * not needed.
1542 		 */
1543 		pdev->tx_delay.hist_internal_bin_width_shift = 16;
1544 		pdev->tx_delay.hist_internal_bin_width_mult =
1545 			((1 << pdev->tx_delay.hist_internal_bin_width_shift) *
1546 			 1000 + (bin_width_1000ticks >> 1)) /
1547 			bin_width_1000ticks;
1548 	}
1549 #endif /* QCA_COMPUTE_TX_DELAY */
1550 
1551 	/* Thermal Mitigation */
1552 	ol_tx_throttle_init(pdev);
1553 
1554 	ol_tso_seg_list_init(pdev, desc_pool_size);
1555 
1556 	ol_tso_num_seg_list_init(pdev, desc_pool_size);
1557 
1558 	ol_tx_register_flow_control(pdev);
1559 
1560 	return 0;            /* success */
1561 
1562 pn_trace_attach_fail:
1563 	OL_RX_REORDER_TRACE_DETACH(pdev);
1564 
1565 reorder_trace_attach_fail:
1566 	qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1567 	qdf_spinlock_destroy(&pdev->rx.mutex);
1568 	qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
1569 	qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
1570 	OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1571 
1572 control_init_fail:
1573 desc_alloc_fail:
1574 	for (i = 0; i < fail_idx; i++)
1575 		htt_tx_desc_free(pdev->htt_pdev,
1576 			(ol_tx_desc_find(pdev, i))->htt_tx_desc);
1577 
1578 	qdf_mem_multi_pages_free(pdev->osdev,
1579 		&pdev->tx_desc.desc_pages, 0, true);
1580 
1581 page_alloc_fail:
1582 	if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1583 		htt_ipa_uc_detach(pdev->htt_pdev);
1584 uc_attach_fail:
1585 	htt_detach(pdev->htt_pdev);
1586 htt_attach_fail:
1587 	ol_tx_desc_dup_detect_deinit(pdev);
1588 ol_attach_fail:
1589 	return ret;            /* fail */
1590 }
1591 
1592 /**
1593  * ol_txrx_pdev_attach_target() - send target configuration
1594  *
1595  * @soc_hdl - data path soc handle
1596  * @pdev_id - device instance id
1597  *
1598  * The majority of the data SW setup are done by the pdev_attach
1599  * functions, but this function completes the data SW setup by
1600  * sending datapath configuration messages to the target.
1601  *
1602  * Return: 0 - success 1 - failure
1603  */
ol_txrx_pdev_attach_target(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)1604 static int ol_txrx_pdev_attach_target(struct cdp_soc_t *soc_hdl,
1605 				      uint8_t pdev_id)
1606 {
1607 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1608 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1609 
1610 	if (!pdev)
1611 		return QDF_STATUS_E_FAULT;
1612 
1613 	return htt_attach_target(pdev->htt_pdev) == QDF_STATUS_SUCCESS ? 0:1;
1614 }
1615 
1616 /**
1617  * ol_tx_free_descs_inuse - free tx descriptors which are in use
1618  * @pdev - the physical device for which tx descs need to be freed
1619  *
1620  * Cycle through the list of TX descriptors (for a pdev) which are in use,
1621  * for which TX completion has not been received and free them. Should be
1622  * called only when the interrupts are off and all lower layer RX is stopped.
1623  * Otherwise there may be a race condition with TX completions.
1624  *
1625  * Return: None
1626  */
ol_tx_free_descs_inuse(ol_txrx_pdev_handle pdev)1627 static void ol_tx_free_descs_inuse(ol_txrx_pdev_handle pdev)
1628 {
1629 	int i;
1630 	void *htt_tx_desc;
1631 	struct ol_tx_desc_t *tx_desc;
1632 	int num_freed_tx_desc = 0;
1633 
1634 	for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1635 		tx_desc = ol_tx_desc_find(pdev, i);
1636 		/*
1637 		 * Confirm that each tx descriptor is "empty", i.e. it has
1638 		 * no tx frame attached.
1639 		 * In particular, check that there are no frames that have
1640 		 * been given to the target to transmit, for which the
1641 		 * target has never provided a response.
1642 		 *
1643 		 * Rome supports mgmt Tx via HTT interface, not via WMI.
1644 		 * When mgmt frame is sent, 2 tx desc is allocated:
1645 		 * mgmt_txrx_desc is allocated in wlan_mgmt_txrx_mgmt_frame_tx,
1646 		 * ol_tx_desc is allocated in ol_txrx_mgmt_send_ext.
1647 		 * They point to same net buffer.
1648 		 * net buffer is mapped in htt_tx_desc_init.
1649 		 *
1650 		 * When SSR during Rome STA connected, deauth frame is sent,
1651 		 * but no tx complete since firmware hung already.
1652 		 * Pending mgmt frames are unmapped and freed when destroy
1653 		 * vdev.
1654 		 * hdd_reset_all_adapters->hdd_stop_adapter->hdd_vdev_destroy
1655 		 * ->wma_handle_vdev_detach->wlan_mgmt_txrx_vdev_drain
1656 		 * ->wma_mgmt_frame_fill_peer_cb
1657 		 * ->mgmt_txrx_tx_completion_handler.
1658 		 *
1659 		 * Don't need unmap and free net buffer of mgmt frames again
1660 		 * during data path clean up, just free ol_tx_desc.
1661 		 * hdd_wlan_stop_modules->cds_post_disable->cdp_pdev_pre_detach
1662 		 * ->ol_txrx_pdev_pre_detach->ol_tx_free_descs_inuse.
1663 		 */
1664 		if (qdf_atomic_read(&tx_desc->ref_cnt)) {
1665 			if (!ol_tx_get_is_mgmt_over_wmi_enabled() &&
1666 			    tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) {
1667 				qdf_atomic_init(&tx_desc->ref_cnt);
1668 				ol_txrx_dbg("Pending mgmt frames nbuf unmapped and freed already when vdev destroyed");
1669 				/* free the tx desc */
1670 				ol_tx_desc_free(pdev, tx_desc);
1671 			} else {
1672 				ol_txrx_dbg("Warning: freeing tx frame (no compltn)");
1673 				ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);
1674 			}
1675 			num_freed_tx_desc++;
1676 		}
1677 		htt_tx_desc = tx_desc->htt_tx_desc;
1678 		htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
1679 	}
1680 
1681 	if (num_freed_tx_desc)
1682 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1683 		"freed %d tx frames for which no resp from target",
1684 		num_freed_tx_desc);
1685 
1686 }
1687 
1688 /**
1689  * ol_txrx_pdev_pre_detach() - detach the data SW state
1690  * @soc_hdl - datapath soc handle
1691  * @pdev_id - the data physical device id being removed
1692  * @force - delete the pdev (and its vdevs and peers) even if
1693  * there are outstanding references by the target to the vdevs
1694  * and peers within the pdev
1695  *
1696  * This function is used when the WLAN driver is being removed to
1697  * detach the host data component within the driver.
1698  *
1699  * Return: none
1700  */
ol_txrx_pdev_pre_detach(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,int force)1701 static void ol_txrx_pdev_pre_detach(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1702 					  int force)
1703 {
1704 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1705 	struct ol_txrx_pdev_t *pdev = ol_txrx_get_pdev_from_pdev_id(soc,
1706 								    pdev_id);
1707 
1708 	/* preconditions */
1709 	TXRX_ASSERT2(pdev);
1710 
1711 	/* check that the pdev has no vdevs allocated */
1712 	TXRX_ASSERT1(TAILQ_EMPTY(&pdev->vdev_list));
1713 
1714 #ifdef QCA_SUPPORT_TX_THROTTLE
1715 	/* Thermal Mitigation */
1716 	qdf_timer_stop(&pdev->tx_throttle.phase_timer);
1717 	qdf_timer_free(&pdev->tx_throttle.phase_timer);
1718 #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
1719 	qdf_timer_stop(&pdev->tx_throttle.tx_timer);
1720 	qdf_timer_free(&pdev->tx_throttle.tx_timer);
1721 #endif
1722 #endif
1723 
1724 	if (force) {
1725 		/*
1726 		 * The assertion above confirms that all vdevs within this pdev
1727 		 * were detached.  However, they may not have actually been
1728 		 * deleted.
1729 		 * If the vdev had peers which never received a PEER_UNMAP msg
1730 		 * from the target, then there are still zombie peer objects,
1731 		 * and the vdev parents of the zombie peers are also zombies,
1732 		 * hanging around until their final peer gets deleted.
1733 		 * Go through the peer hash table and delete any peers left.
1734 		 * As a side effect, this will complete the deletion of any
1735 		 * vdevs that are waiting for their peers to finish deletion.
1736 		 */
1737 		ol_txrx_dbg("Force delete for pdev %pK",
1738 			   pdev);
1739 		ol_txrx_peer_find_hash_erase(pdev);
1740 		ol_txrx_peer_free_inactive_list(pdev);
1741 	}
1742 
1743 	/* to get flow pool status before freeing descs */
1744 	ol_tx_dump_flow_pool_info(cds_get_context(QDF_MODULE_ID_SOC));
1745 	ol_tx_free_descs_inuse(pdev);
1746 	ol_tx_deregister_flow_control(pdev);
1747 
1748 	/*
1749 	 * ol_tso_seg_list_deinit should happen after
1750 	 * ol_tx_deinit_tx_desc_inuse as it tries to access the tso seg freelist
1751 	 * which is being de-initilized in ol_tso_seg_list_deinit
1752 	 */
1753 	ol_tso_seg_list_deinit(pdev);
1754 	ol_tso_num_seg_list_deinit(pdev);
1755 
1756 	/* Stop the communication between HTT and target at first */
1757 	htt_detach_target(pdev->htt_pdev);
1758 
1759 	qdf_mem_multi_pages_free(pdev->osdev,
1760 		&pdev->tx_desc.desc_pages, 0, true);
1761 	pdev->tx_desc.freelist = NULL;
1762 
1763 	/* Detach micro controller data path offload resource */
1764 	if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1765 		htt_ipa_uc_detach(pdev->htt_pdev);
1766 
1767 	htt_detach(pdev->htt_pdev);
1768 	ol_tx_desc_dup_detect_deinit(pdev);
1769 
1770 	qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1771 	qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
1772 	qdf_spinlock_destroy(&pdev->rx.mutex);
1773 	qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
1774 #ifdef QCA_SUPPORT_TX_THROTTLE
1775 	/* Thermal Mitigation */
1776 	qdf_spinlock_destroy(&pdev->tx_throttle.mutex);
1777 #endif
1778 
1779 	/* TX flow control for peer who is in very bad link status */
1780 	ol_tx_badpeer_flow_cl_deinit(pdev);
1781 
1782 	OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1783 
1784 	OL_RX_REORDER_TRACE_DETACH(pdev);
1785 	OL_RX_PN_TRACE_DETACH(pdev);
1786 
1787 	htt_pktlogmod_exit(pdev);
1788 
1789 	/*
1790 	 * WDI event detach
1791 	 */
1792 	wdi_event_detach(pdev);
1793 
1794 	ol_txrx_local_peer_id_cleanup(pdev);
1795 
1796 #ifdef QCA_COMPUTE_TX_DELAY
1797 	qdf_spinlock_destroy(&pdev->tx_delay.mutex);
1798 #endif
1799 
1800 	return;
1801 }
1802 
1803 /**
1804  * ol_txrx_pdev_detach() - delete the data SW state
1805  * @soc_hdl - data path soc handle
1806  * @pdev_id - device instance id
1807  * @force - delete the pdev (and its vdevs and peers) even if
1808  * there are outstanding references by the target to the vdevs
1809  * and peers within the pdev
1810  *
1811  * This function is used when the WLAN driver is being removed to
1812  * remove the host data component within the driver.
1813  * All virtual devices within the physical device need to be deleted
1814  * (ol_txrx_vdev_detach) before the physical device itself is deleted.
1815  *
1816  * Return: Success or Failure
1817  */
ol_txrx_pdev_detach(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,int force)1818 static QDF_STATUS ol_txrx_pdev_detach(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1819 				      int force)
1820 {
1821 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1822 	struct ol_txrx_pdev_t *pdev = ol_txrx_get_pdev_from_pdev_id(soc,
1823 								    pdev_id);
1824 	struct ol_txrx_stats_req_internal *req, *temp_req;
1825 	int i = 0;
1826 
1827 	if (!soc) {
1828 		ol_txrx_err("soc is NULL");
1829 		return QDF_STATUS_E_FAILURE;
1830 	}
1831 
1832 	/*checking to ensure txrx pdev structure is not NULL */
1833 	if (!pdev) {
1834 		ol_txrx_err("pdev is NULL");
1835 		return QDF_STATUS_E_FAILURE;
1836 	}
1837 
1838 	qdf_spin_lock_bh(&pdev->req_list_spinlock);
1839 	if (pdev->req_list_depth > 0)
1840 		ol_txrx_err(
1841 			"Warning: the txrx req list is not empty, depth=%d",
1842 			pdev->req_list_depth
1843 			);
1844 	TAILQ_FOREACH_SAFE(req, &pdev->req_list, req_list_elem, temp_req) {
1845 		TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
1846 		pdev->req_list_depth--;
1847 		ol_txrx_err(
1848 			"%d: %pK,verbose(%d), concise(%d), up_m(0x%x), reset_m(0x%x)",
1849 			i++,
1850 			req,
1851 			req->base.print.verbose,
1852 			req->base.print.concise,
1853 			req->base.stats_type_upload_mask,
1854 			req->base.stats_type_reset_mask
1855 			);
1856 		qdf_mem_free(req);
1857 	}
1858 	qdf_spin_unlock_bh(&pdev->req_list_spinlock);
1859 
1860 	qdf_spinlock_destroy(&pdev->req_list_spinlock);
1861 	qdf_spinlock_destroy(&pdev->tx_mutex);
1862 
1863 	OL_RX_REORDER_TIMEOUT_CLEANUP(pdev);
1864 
1865 	if (pdev->cfg.is_high_latency)
1866 		ol_tx_sched_detach(pdev);
1867 
1868 	htt_deregister_rx_pkt_dump_callback(pdev->htt_pdev);
1869 
1870 	htt_pdev_free(pdev->htt_pdev);
1871 	ol_txrx_peer_find_detach(pdev);
1872 	ol_txrx_tso_stats_deinit(pdev);
1873 	ol_txrx_fw_stats_desc_pool_deinit(pdev);
1874 
1875 	ol_txrx_pdev_txq_log_destroy(pdev);
1876 	ol_txrx_pdev_grp_stat_destroy(pdev);
1877 
1878 	ol_txrx_debugfs_exit(pdev);
1879 	ol_unregister_peer_recovery_notifier();
1880 
1881 	soc->pdev_list[pdev->id] = NULL;
1882 	qdf_mem_free(pdev);
1883 
1884 	return QDF_STATUS_SUCCESS;
1885 }
1886 
1887 #if defined(QCA_HL_NETDEV_FLOW_CONTROL)
1888 
1889 /**
1890  * ol_txrx_vdev_per_vdev_tx_desc_init() - initialise per vdev tx desc count
1891  * related variables.
1892  * @vdev: the virtual device object
1893  *
1894  * Return: None
1895  */
1896 static inline void
ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t * vdev)1897 ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t *vdev)
1898 {
1899 	qdf_atomic_init(&vdev->tx_desc_count);
1900 	vdev->tx_desc_limit = 0;
1901 	vdev->queue_restart_th = 0;
1902 	vdev->prio_q_paused = 0;
1903 	vdev->queue_stop_th = 0;
1904 }
1905 #else
1906 
1907 static inline void
ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t * vdev)1908 ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t *vdev)
1909 {
1910 }
1911 #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
1912 
1913 /**
1914  * ol_txrx_vdev_attach - Allocate and initialize the data object
1915  * for a new virtual device.
1916  *
1917  * @@soc_hdl - data path soc handle
1918  * @pdev_id - physical device instance id
1919  * @vdev_mac_addr - the MAC address of the virtual device
1920  * @vdev_id - the ID used to identify the virtual device to the target
1921  * @op_mode - whether this virtual device is operating as an AP,
1922  * an IBSS, or a STA
1923  * @subtype:  Subtype of the operating vdev
1924  *
1925  * Return: QDF_STATUS_SUCCESS on success,
1926 	   QDF error code on failure
1927  */
1928 static QDF_STATUS
ol_txrx_vdev_attach(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,struct cdp_vdev_info * vdev_info)1929 ol_txrx_vdev_attach(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1930 		    struct cdp_vdev_info *vdev_info)
1931 {
1932 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1933 	struct ol_txrx_pdev_t *pdev = ol_txrx_get_pdev_from_pdev_id(soc,
1934 								    pdev_id);
1935 	uint8_t *vdev_mac_addr = vdev_info->vdev_mac_addr;
1936 	uint8_t vdev_id = vdev_info->vdev_id;
1937 	enum wlan_op_mode op_mode = vdev_info->op_mode;
1938 	enum wlan_op_subtype subtype = vdev_info->subtype;
1939 	enum QDF_OPMODE qdf_opmode = vdev_info->qdf_opmode;
1940 
1941 	struct ol_txrx_vdev_t *vdev;
1942 	QDF_STATUS qdf_status;
1943 
1944 	/* preconditions */
1945 	TXRX_ASSERT2(pdev);
1946 	TXRX_ASSERT2(vdev_mac_addr);
1947 
1948 	if (qdf_unlikely(!soc)) {
1949 		ol_txrx_err("soc is NULL");
1950 		return QDF_STATUS_E_INVAL;
1951 	}
1952 
1953 	vdev = qdf_mem_malloc(sizeof(*vdev));
1954 	if (!vdev)
1955 		return QDF_STATUS_E_NOMEM;    /* failure */
1956 
1957 	/* store provided params */
1958 	vdev->pdev = pdev;
1959 	vdev->vdev_id = vdev_id;
1960 	vdev->opmode = op_mode;
1961 	vdev->subtype = subtype;
1962 	vdev->qdf_opmode = qdf_opmode;
1963 
1964 	vdev->delete.pending = 0;
1965 	vdev->safemode = 0;
1966 	vdev->drop_unenc = 1;
1967 	vdev->num_filters = 0;
1968 	vdev->fwd_tx_packets = 0;
1969 	vdev->fwd_rx_packets = 0;
1970 
1971 	ol_txrx_vdev_per_vdev_tx_desc_init(vdev);
1972 
1973 	qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr,
1974 		     QDF_MAC_ADDR_SIZE);
1975 
1976 	TAILQ_INIT(&vdev->peer_list);
1977 	vdev->last_real_peer = NULL;
1978 
1979 #ifdef QCA_IBSS_SUPPORT
1980 	vdev->ibss_peer_num = 0;
1981 	vdev->ibss_peer_heart_beat_timer = 0;
1982 #endif
1983 
1984 	ol_txrx_vdev_txqs_init(vdev);
1985 
1986 	qdf_spinlock_create(&vdev->ll_pause.mutex);
1987 	vdev->ll_pause.paused_reason = 0;
1988 	vdev->ll_pause.txq.head = vdev->ll_pause.txq.tail = NULL;
1989 	vdev->ll_pause.txq.depth = 0;
1990 	qdf_atomic_init(&vdev->delete.detaching);
1991 	qdf_timer_init(pdev->osdev,
1992 			       &vdev->ll_pause.timer,
1993 			       ol_tx_vdev_ll_pause_queue_send, vdev,
1994 			       QDF_TIMER_TYPE_SW);
1995 	qdf_atomic_init(&vdev->os_q_paused);
1996 	qdf_atomic_set(&vdev->os_q_paused, 0);
1997 	vdev->tx_fl_lwm = 0;
1998 	vdev->tx_fl_hwm = 0;
1999 	vdev->rx = NULL;
2000 	vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
2001 	qdf_mem_zero(&vdev->last_peer_mac_addr,
2002 			sizeof(union ol_txrx_align_mac_addr_t));
2003 	qdf_spinlock_create(&vdev->flow_control_lock);
2004 	vdev->osif_flow_control_cb = NULL;
2005 	vdev->osif_flow_control_is_pause = NULL;
2006 	vdev->osif_fc_ctx = NULL;
2007 
2008 	vdev->txrx_stats.txack_success = 0;
2009 	vdev->txrx_stats.txack_failed = 0;
2010 
2011 	vdev->bundling_required = false;
2012 	qdf_spinlock_create(&vdev->bundle_queue.mutex);
2013 	vdev->bundle_queue.txq.head = NULL;
2014 	vdev->bundle_queue.txq.tail = NULL;
2015 	vdev->bundle_queue.txq.depth = 0;
2016 	qdf_timer_init(
2017 		pdev->osdev,
2018 		&vdev->bundle_queue.timer,
2019 		ol_tx_hl_vdev_bundle_timer,
2020 		vdev, QDF_TIMER_TYPE_SW);
2021 
2022 	/* Default MAX Q depth for every VDEV */
2023 	vdev->ll_pause.max_q_depth =
2024 		ol_tx_cfg_max_tx_queue_depth_ll(vdev->pdev->ctrl_pdev);
2025 	qdf_status = qdf_event_create(&vdev->wait_delete_comp);
2026 
2027 	ol_txrx_vdev_init_tcp_del_ack(vdev);
2028 
2029 	/* add this vdev into the pdev's list */
2030 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
2031 	if (QDF_GLOBAL_MONITOR_MODE == cds_get_conparam())
2032 		pdev->monitor_vdev = vdev;
2033 
2034 	ol_txrx_hl_tdls_flag_reset(soc_hdl, vdev_id, false);
2035 
2036 	ol_txrx_dbg(
2037 		   "Created vdev %pK ("QDF_MAC_ADDR_FMT")",
2038 		   vdev,
2039 		   QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
2040 
2041 	/*
2042 	 * We've verified that htt_op_mode == wlan_op_mode,
2043 	 * so no translation is needed.
2044 	 */
2045 	htt_vdev_attach(pdev->htt_pdev, vdev_id, op_mode);
2046 
2047 	return QDF_STATUS_SUCCESS;
2048 }
2049 
2050 /**
2051  * ol_txrx_vdev_register - Link a vdev's data object with the
2052  * matching OS shim vdev object.
2053  *
2054  * @soc_hdl: datapath soc handle
2055  * @vdev_id: the virtual device's id
2056  * @osif_vdev: the virtual device's OS shim object
2057  * @txrx_ops: (pointers to)functions used for tx and rx data xfer
2058  *
2059  *  The data object for a virtual device is created by the
2060  *  function ol_txrx_vdev_attach.  However, rather than fully
2061  *  linking the data vdev object with the vdev objects from the
2062  *  other subsystems that the data vdev object interacts with,
2063  *  the txrx_vdev_attach function focuses primarily on creating
2064  *  the data vdev object. After the creation of both the data
2065  *  vdev object and the OS shim vdev object, this
2066  *  txrx_osif_vdev_attach function is used to connect the two
2067  *  vdev objects, so the data SW can use the OS shim vdev handle
2068  *  when passing rx data received by a vdev up to the OS shim.
2069  */
ol_txrx_vdev_register(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,ol_osif_vdev_handle osif_vdev,struct ol_txrx_ops * txrx_ops)2070 static QDF_STATUS ol_txrx_vdev_register(struct cdp_soc_t *soc_hdl,
2071 					uint8_t vdev_id,
2072 					ol_osif_vdev_handle osif_vdev,
2073 					struct ol_txrx_ops *txrx_ops)
2074 {
2075 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2076 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
2077 								     vdev_id);
2078 
2079 	if (qdf_unlikely(!vdev) || qdf_unlikely(!txrx_ops)) {
2080 		qdf_print("vdev/txrx_ops is NULL!");
2081 		qdf_assert(0);
2082 		return QDF_STATUS_E_FAILURE;
2083 	}
2084 
2085 	vdev->osif_dev = osif_vdev;
2086 	vdev->rx = txrx_ops->rx.rx;
2087 	vdev->stats_rx = txrx_ops->rx.stats_rx;
2088 	vdev->tx_comp = txrx_ops->tx.tx_comp;
2089 	vdev->vdev_del_notify = txrx_ops->vdev_del_notify;
2090 	txrx_ops->tx.tx = ol_tx_data;
2091 
2092 	return QDF_STATUS_SUCCESS;
2093 }
2094 
2095 /**
2096  * ol_txrx_set_privacy_filters - set the privacy filter
2097  * @vdev - the data virtual device object
2098  * @filter - filters to be set
2099  * @num - the number of filters
2100  *
2101  * Rx related. Set the privacy filters. When rx packets, check
2102  * the ether type, filter type and packet type to decide whether
2103  * discard these packets.
2104  */
2105 static void
ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,void * filters,uint32_t num)2106 ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,
2107 			    void *filters, uint32_t num)
2108 {
2109 	qdf_mem_copy(vdev->privacy_filters, filters,
2110 		     num * sizeof(struct privacy_exemption));
2111 	vdev->num_filters = num;
2112 }
2113 
2114 #if defined(CONFIG_HL_SUPPORT) || defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
2115 
2116 static void
ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)2117 ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2118 {
2119 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
2120 	int i;
2121 	struct ol_tx_desc_t *tx_desc;
2122 
2123 	qdf_spin_lock_bh(&pdev->tx_mutex);
2124 	for (i = 0; i < pdev->tx_desc.pool_size; i++) {
2125 		tx_desc = ol_tx_desc_find(pdev, i);
2126 		if (tx_desc->vdev == vdev)
2127 			tx_desc->vdev = NULL;
2128 	}
2129 	qdf_spin_unlock_bh(&pdev->tx_mutex);
2130 }
2131 
2132 #else
2133 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)2134 static void ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2135 {
2136 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
2137 	struct ol_tx_flow_pool_t *pool;
2138 	int i;
2139 	struct ol_tx_desc_t *tx_desc;
2140 
2141 	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
2142 	for (i = 0; i < pdev->tx_desc.pool_size; i++) {
2143 		tx_desc = ol_tx_desc_find(pdev, i);
2144 		if (!qdf_atomic_read(&tx_desc->ref_cnt))
2145 			/* not in use */
2146 			continue;
2147 
2148 		pool = tx_desc->pool;
2149 		qdf_spin_lock_bh(&pool->flow_pool_lock);
2150 		if (tx_desc->vdev == vdev)
2151 			tx_desc->vdev = NULL;
2152 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
2153 	}
2154 	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
2155 }
2156 
2157 #else
2158 static void
ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)2159 ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2160 {
2161 }
2162 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
2163 #endif /* CONFIG_HL_SUPPORT */
2164 
2165 /**
2166  * ol_txrx_vdev_detach - Deallocate the specified data virtual
2167  * device object.
2168  * @soc_hdl - data path soc handle
2169  * @vdev_id: vdev id
2170  * @callback: function to call (if non-NULL) once the vdev has
2171  * been wholly deleted
2172  * @callback_context: context to provide in the callback
2173  *
2174  * All peers associated with the virtual device need to be deleted
2175  * (ol_txrx_peer_detach) before the virtual device itself is deleted.
2176  * However, for the peers to be fully deleted, the peer deletion has to
2177  * percolate through the target data FW and back up to the host data SW.
2178  * Thus, even though the host control SW may have issued a peer_detach
2179  * call for each of the vdev's peers, the peer objects may still be
2180  * allocated, pending removal of all references to them by the target FW.
2181  * In this case, though the vdev_detach function call will still return
2182  * immediately, the vdev itself won't actually be deleted, until the
2183  * deletions of all its peers complete.
2184  * The caller can provide a callback function pointer to be notified when
2185  * the vdev deletion actually happens - whether it's directly within the
2186  * vdev_detach call, or if it's deferred until all in-progress peer
2187  * deletions have completed.
2188  */
2189 static QDF_STATUS
ol_txrx_vdev_detach(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,ol_txrx_vdev_delete_cb callback,void * context)2190 ol_txrx_vdev_detach(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2191 		    ol_txrx_vdev_delete_cb callback, void *context)
2192 {
2193 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2194 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
2195 								     vdev_id);
2196 	struct ol_txrx_pdev_t *pdev;
2197 	ol_txrx_vdev_delete_cb vdev_del_notify;
2198 	void *vdev_del_context;
2199 
2200 	if (qdf_unlikely(!vdev))
2201 		return QDF_STATUS_E_FAILURE;
2202 
2203 	/* preconditions */
2204 	TXRX_ASSERT2(vdev);
2205 	pdev = vdev->pdev;
2206 
2207 	/* prevent anyone from restarting the ll_pause timer again */
2208 	qdf_atomic_set(&vdev->delete.detaching, 1);
2209 
2210 	vdev_del_notify = vdev->vdev_del_notify;
2211 	vdev_del_context = vdev->osif_dev;
2212 	ol_txrx_vdev_tx_queue_free(vdev);
2213 
2214 	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
2215 	qdf_timer_stop(&vdev->ll_pause.timer);
2216 	vdev->ll_pause.is_q_timer_on = false;
2217 	while (vdev->ll_pause.txq.head) {
2218 		qdf_nbuf_t next = qdf_nbuf_next(vdev->ll_pause.txq.head);
2219 
2220 		qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
2221 		qdf_nbuf_tx_free(vdev->ll_pause.txq.head, QDF_NBUF_PKT_ERROR);
2222 		vdev->ll_pause.txq.head = next;
2223 	}
2224 	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
2225 
2226 	/* ll_pause timer should be deleted without any locks held, and
2227 	 * no timer function should be executed after this point because
2228 	 * qdf_timer_free is deleting the timer synchronously.
2229 	 */
2230 	qdf_timer_free(&vdev->ll_pause.timer);
2231 	qdf_spinlock_destroy(&vdev->ll_pause.mutex);
2232 
2233 	qdf_timer_free(&vdev->bundle_queue.timer);
2234 	qdf_spinlock_destroy(&vdev->bundle_queue.mutex);
2235 
2236 	qdf_spin_lock_bh(&vdev->flow_control_lock);
2237 	vdev->osif_flow_control_cb = NULL;
2238 	vdev->osif_flow_control_is_pause = NULL;
2239 	vdev->osif_fc_ctx = NULL;
2240 	qdf_spin_unlock_bh(&vdev->flow_control_lock);
2241 	qdf_spinlock_destroy(&vdev->flow_control_lock);
2242 
2243 	/* remove the vdev from its parent pdev's list */
2244 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
2245 
2246 	/*
2247 	 * Use peer_ref_mutex while accessing peer_list, in case
2248 	 * a peer is in the process of being removed from the list.
2249 	 */
2250 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
2251 	/* check that the vdev has no peers allocated */
2252 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
2253 		/* debug print - will be removed later */
2254 		ol_txrx_dbg(
2255 			   "not deleting vdev object %pK ("QDF_MAC_ADDR_FMT") until deletion finishes for all its peers",
2256 			   vdev,
2257 			   QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
2258 		/* indicate that the vdev needs to be deleted */
2259 		vdev->delete.pending = 1;
2260 		vdev->delete.callback = callback;
2261 		vdev->delete.context = context;
2262 		qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2263 		return QDF_STATUS_E_FAILURE;
2264 	}
2265 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2266 	qdf_event_destroy(&vdev->wait_delete_comp);
2267 
2268 	ol_txrx_dbg(
2269 		   "deleting vdev obj %pK ("QDF_MAC_ADDR_FMT")",
2270 		   vdev,
2271 		   QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
2272 
2273 	htt_vdev_detach(pdev->htt_pdev, vdev->vdev_id);
2274 
2275 	/*
2276 	 * The ol_tx_desc_free might access the invalid content of vdev referred
2277 	 * by tx desc, since this vdev might be detached in another thread
2278 	 * asynchronous.
2279 	 *
2280 	 * Go through tx desc pool to set corresponding tx desc's vdev to NULL
2281 	 * when detach this vdev, and add vdev checking in the ol_tx_desc_free
2282 	 * to avoid crash.
2283 	 *
2284 	 */
2285 	ol_txrx_tx_desc_reset_vdev(vdev);
2286 
2287 	/*
2288 	 * Doesn't matter if there are outstanding tx frames -
2289 	 * they will be freed once the target sends a tx completion
2290 	 * message for them.
2291 	 */
2292 	qdf_mem_free(vdev);
2293 	if (callback)
2294 		callback(context);
2295 
2296 	if (vdev_del_notify)
2297 		vdev_del_notify(vdev_del_context);
2298 
2299 	return QDF_STATUS_SUCCESS;
2300 }
2301 
2302 /**
2303  * ol_txrx_flush_rx_frames() - flush cached rx frames
2304  * @peer: peer
2305  * @drop: set flag to drop frames
2306  *
2307  * Return: None
2308  */
ol_txrx_flush_rx_frames(struct ol_txrx_peer_t * peer,bool drop)2309 void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
2310 			     bool drop)
2311 {
2312 	struct ol_txrx_cached_bufq_t *bufqi;
2313 	struct ol_rx_cached_buf *cache_buf;
2314 	QDF_STATUS ret;
2315 	ol_txrx_rx_fp data_rx = NULL;
2316 
2317 	if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
2318 		qdf_atomic_dec(&peer->flush_in_progress);
2319 		return;
2320 	}
2321 
2322 	qdf_assert(peer->vdev);
2323 	qdf_spin_lock_bh(&peer->peer_info_lock);
2324 	bufqi = &peer->bufq_info;
2325 
2326 	if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->rx)
2327 		data_rx = peer->vdev->rx;
2328 	else
2329 		drop = true;
2330 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2331 
2332 	qdf_spin_lock_bh(&bufqi->bufq_lock);
2333 	cache_buf = list_entry((&bufqi->cached_bufq)->next,
2334 				typeof(*cache_buf), list);
2335 	while (!list_empty(&bufqi->cached_bufq)) {
2336 		list_del(&cache_buf->list);
2337 		bufqi->curr--;
2338 		qdf_assert(bufqi->curr >= 0);
2339 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
2340 		if (drop) {
2341 			qdf_nbuf_free(cache_buf->buf);
2342 		} else {
2343 			/* Flush the cached frames to HDD */
2344 			ret = data_rx(peer->vdev->osif_dev, cache_buf->buf);
2345 			if (ret != QDF_STATUS_SUCCESS)
2346 				qdf_nbuf_free(cache_buf->buf);
2347 		}
2348 		qdf_mem_free(cache_buf);
2349 		qdf_spin_lock_bh(&bufqi->bufq_lock);
2350 		cache_buf = list_entry((&bufqi->cached_bufq)->next,
2351 				typeof(*cache_buf), list);
2352 	}
2353 	bufqi->qdepth_no_thresh = bufqi->curr;
2354 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
2355 	qdf_atomic_dec(&peer->flush_in_progress);
2356 }
2357 
ol_txrx_flush_cache_rx_queue(void)2358 static void ol_txrx_flush_cache_rx_queue(void)
2359 {
2360 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
2361 	struct ol_txrx_peer_t *peer;
2362 	struct ol_txrx_vdev_t *vdev;
2363 	ol_txrx_pdev_handle pdev;
2364 
2365 	if (qdf_unlikely(!soc))
2366 		return;
2367 
2368 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
2369 	if (!pdev)
2370 		return;
2371 
2372 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2373 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2374 			ol_txrx_flush_rx_frames(peer, 1);
2375 		}
2376 	}
2377 }
2378 
2379 /* Define short name to use in cds_trigger_recovery */
2380 #define PEER_DEL_TIMEOUT QDF_PEER_DELETION_TIMEDOUT
2381 
2382 /**
2383  * ol_txrx_dump_peer_access_list() - dump peer access list
2384  * @peer: peer handle
2385  *
2386  * This function will dump if any peer debug ids are still accessing peer
2387  *
2388  * Return: None
2389  */
ol_txrx_dump_peer_access_list(ol_txrx_peer_handle peer)2390 static void ol_txrx_dump_peer_access_list(ol_txrx_peer_handle peer)
2391 {
2392 	u32 i;
2393 	u32 pending_ref;
2394 
2395 	for (i = 0; i < PEER_DEBUG_ID_MAX; i++) {
2396 		pending_ref = qdf_atomic_read(&peer->access_list[i]);
2397 		if (pending_ref)
2398 			ol_txrx_info_high("id %d pending refs %d",
2399 					  i, pending_ref);
2400 	}
2401 }
2402 
2403 /**
2404  * ol_txrx_peer_attach - Allocate and set up references for a
2405  * data peer object.
2406  * @soc_hdl - data path soc handle
2407  * @vdev_id - virtual device instance id
2408  * @peer_mac_addr - MAC address of the new peer
2409  *
2410  * When an association with a peer starts, the host's control SW
2411  * uses this function to inform the host data SW.
2412  * The host data SW allocates its own peer object, and stores a
2413  * reference to the control peer object within the data peer object.
2414  * The host data SW also stores a reference to the virtual device
2415  * that the peer is associated with.  This virtual device handle is
2416  * used when the data SW delivers rx data frames to the OS shim layer.
2417  * The host data SW returns a handle to the new peer data object,
2418  * so a reference within the control peer object can be set to the
2419  * data peer object.
2420  *
2421  * Return: QDF status code
2422  */
2423 static QDF_STATUS
ol_txrx_peer_attach(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_mac_addr,enum cdp_peer_type peer_type)2424 ol_txrx_peer_attach(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2425 		    uint8_t *peer_mac_addr, enum cdp_peer_type peer_type)
2426 {
2427 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2428 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
2429 								     vdev_id);
2430 	struct ol_txrx_peer_t *peer;
2431 	struct ol_txrx_peer_t *temp_peer;
2432 	uint8_t i;
2433 	bool wait_on_deletion = false;
2434 	unsigned long rc;
2435 	struct ol_txrx_pdev_t *pdev;
2436 	bool cmp_wait_mac = false;
2437 	uint8_t zero_mac_addr[QDF_MAC_ADDR_SIZE] = { 0, 0, 0, 0, 0, 0 };
2438 	u8 check_valid = 0;
2439 
2440 	/* preconditions */
2441 	TXRX_ASSERT2(vdev);
2442 	TXRX_ASSERT2(peer_mac_addr);
2443 
2444 	pdev = vdev->pdev;
2445 	TXRX_ASSERT2(pdev);
2446 
2447 	if (pdev->enable_peer_unmap_conf_support)
2448 		check_valid = 1;
2449 
2450 	if (qdf_mem_cmp(&zero_mac_addr, &vdev->last_peer_mac_addr,
2451 				QDF_MAC_ADDR_SIZE))
2452 		cmp_wait_mac = true;
2453 
2454 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
2455 	/* check for duplicate existing peer */
2456 	TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
2457 		if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
2458 			(union ol_txrx_align_mac_addr_t *)peer_mac_addr) &&
2459 			(check_valid == 0 || temp_peer->valid)) {
2460 			ol_txrx_info_high(
2461 				"vdev_id %d ("QDF_MAC_ADDR_FMT") already exists",
2462 				vdev->vdev_id,
2463 				QDF_MAC_ADDR_REF(peer_mac_addr));
2464 			if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
2465 				vdev->wait_on_peer_id = temp_peer->local_id;
2466 				qdf_event_reset(&vdev->wait_delete_comp);
2467 				wait_on_deletion = true;
2468 				break;
2469 			} else {
2470 				qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2471 				return QDF_STATUS_E_FAILURE;
2472 			}
2473 		}
2474 		if (cmp_wait_mac && !ol_txrx_peer_find_mac_addr_cmp(
2475 					&temp_peer->mac_addr,
2476 					&vdev->last_peer_mac_addr) &&
2477 					(check_valid == 0 ||
2478 					 temp_peer->valid)) {
2479 			ol_txrx_info_high(
2480 				"vdev_id %d ("QDF_MAC_ADDR_FMT") old peer exists",
2481 				vdev->vdev_id,
2482 				QDF_MAC_ADDR_REF(vdev->last_peer_mac_addr.raw));
2483 			if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
2484 				vdev->wait_on_peer_id = temp_peer->local_id;
2485 				qdf_event_reset(&vdev->wait_delete_comp);
2486 				wait_on_deletion = true;
2487 				break;
2488 			} else {
2489 				qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2490 				ol_txrx_err("peer not found");
2491 				return QDF_STATUS_E_FAILURE;
2492 			}
2493 		}
2494 	}
2495 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2496 
2497 	qdf_mem_zero(&vdev->last_peer_mac_addr,
2498 			sizeof(union ol_txrx_align_mac_addr_t));
2499 	if (wait_on_deletion) {
2500 		/* wait for peer deletion */
2501 		rc = qdf_wait_for_event_completion(&vdev->wait_delete_comp,
2502 					   PEER_DELETION_TIMEOUT);
2503 		if (QDF_STATUS_SUCCESS != rc) {
2504 			ol_txrx_err("error waiting for peer_id(%d) deletion, status %d",
2505 				    vdev->wait_on_peer_id, (int) rc);
2506 			/* Added for debugging only */
2507 			ol_txrx_dump_peer_access_list(temp_peer);
2508 			wlan_roam_debug_dump_table();
2509 			vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
2510 
2511 			return QDF_STATUS_E_FAILURE;
2512 		}
2513 	}
2514 
2515 	peer = qdf_mem_malloc(sizeof(*peer));
2516 	if (!peer)
2517 		return QDF_STATUS_E_NOMEM;
2518 
2519 	/* store provided params */
2520 	peer->vdev = vdev;
2521 	qdf_mem_copy(&peer->mac_addr.raw[0], peer_mac_addr,
2522 		     QDF_MAC_ADDR_SIZE);
2523 
2524 	ol_txrx_peer_txqs_init(pdev, peer);
2525 
2526 	INIT_LIST_HEAD(&peer->bufq_info.cached_bufq);
2527 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
2528 	/* add this peer into the vdev's list */
2529 	TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
2530 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2531 	/* check whether this is a real peer (peer mac addr != vdev mac addr) */
2532 	if (ol_txrx_peer_find_mac_addr_cmp(&vdev->mac_addr, &peer->mac_addr)) {
2533 		qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
2534 		vdev->last_real_peer = peer;
2535 		qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
2536 	}
2537 
2538 	peer->rx_opt_proc = pdev->rx_opt_proc;
2539 
2540 	ol_rx_peer_init(pdev, peer);
2541 
2542 	/* initialize the peer_id */
2543 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
2544 		peer->peer_ids[i] = HTT_INVALID_PEER;
2545 
2546 	qdf_spinlock_create(&peer->peer_info_lock);
2547 	qdf_spinlock_create(&peer->bufq_info.bufq_lock);
2548 
2549 	peer->bufq_info.thresh = OL_TXRX_CACHED_BUFQ_THRESH;
2550 
2551 	qdf_atomic_init(&peer->delete_in_progress);
2552 	qdf_atomic_init(&peer->flush_in_progress);
2553 	qdf_atomic_init(&peer->ref_cnt);
2554 	qdf_atomic_init(&peer->del_ref_cnt);
2555 
2556 	for (i = 0; i < PEER_DEBUG_ID_MAX; i++)
2557 		qdf_atomic_init(&peer->access_list[i]);
2558 
2559 	/* keep one reference for attach */
2560 	ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_PEER_ATTACH);
2561 
2562 	/* Set a flag to indicate peer create is pending in firmware */
2563 	qdf_atomic_init(&peer->fw_create_pending);
2564 	qdf_atomic_set(&peer->fw_create_pending, 1);
2565 
2566 	peer->valid = 1;
2567 	qdf_timer_init(pdev->osdev, &peer->peer_unmap_timer,
2568 		       peer_unmap_timer_handler, peer, QDF_TIMER_TYPE_SW);
2569 
2570 	ol_txrx_peer_find_hash_add(pdev, peer);
2571 
2572 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2573 		   "vdev %pK created peer %pK ref_cnt %d ("QDF_MAC_ADDR_FMT")",
2574 		   vdev, peer, qdf_atomic_read(&peer->ref_cnt),
2575 		   QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2576 	/*
2577 	 * For every peer MAp message search and set if bss_peer
2578 	 */
2579 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
2580 				QDF_MAC_ADDR_SIZE))
2581 		peer->bss_peer = 1;
2582 
2583 	/*
2584 	 * The peer starts in the "disc" state while association is in progress.
2585 	 * Once association completes, the peer will get updated to "auth" state
2586 	 * by a call to ol_txrx_peer_state_update if the peer is in open mode,
2587 	 * or else to the "conn" state. For non-open mode, the peer will
2588 	 * progress to "auth" state once the authentication completes.
2589 	 */
2590 	peer->state = OL_TXRX_PEER_STATE_INVALID;
2591 	ol_txrx_peer_state_update(soc_hdl, peer->mac_addr.raw,
2592 				  OL_TXRX_PEER_STATE_DISC);
2593 
2594 #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
2595 	peer->rssi_dbm = HTT_RSSI_INVALID;
2596 #endif
2597 	if ((QDF_GLOBAL_MONITOR_MODE == cds_get_conparam()) &&
2598 	    !pdev->self_peer) {
2599 		pdev->self_peer = peer;
2600 		/*
2601 		 * No Tx in monitor mode, otherwise results in target assert.
2602 		 * Setting disable_intrabss_fwd to true
2603 		 */
2604 		ol_vdev_rx_set_intrabss_fwd(soc_hdl, vdev_id, true);
2605 	}
2606 
2607 	ol_txrx_local_peer_id_alloc(pdev, peer);
2608 
2609 	return QDF_STATUS_SUCCESS;
2610 }
2611 
2612 #undef PEER_DEL_TIMEOUT
2613 
2614 /*
2615  * Discarding tx filter - removes all data frames (disconnected state)
2616  */
ol_tx_filter_discard(struct ol_txrx_msdu_info_t * tx_msdu_info)2617 static A_STATUS ol_tx_filter_discard(struct ol_txrx_msdu_info_t *tx_msdu_info)
2618 {
2619 	return A_ERROR;
2620 }
2621 
2622 /*
2623  * Non-autentication tx filter - filters out data frames that are not
2624  * related to authentication, but allows EAPOL (PAE) or WAPI (WAI)
2625  * data frames (connected state)
2626  */
ol_tx_filter_non_auth(struct ol_txrx_msdu_info_t * tx_msdu_info)2627 static A_STATUS ol_tx_filter_non_auth(struct ol_txrx_msdu_info_t *tx_msdu_info)
2628 {
2629 	return
2630 		(tx_msdu_info->htt.info.ethertype == ETHERTYPE_PAE ||
2631 		 tx_msdu_info->htt.info.ethertype ==
2632 		 ETHERTYPE_WAI) ? A_OK : A_ERROR;
2633 }
2634 
2635 /*
2636  * Pass-through tx filter - lets all data frames through (authenticated state)
2637  */
ol_tx_filter_pass_thru(struct ol_txrx_msdu_info_t * tx_msdu_info)2638 static A_STATUS ol_tx_filter_pass_thru(struct ol_txrx_msdu_info_t *tx_msdu_info)
2639 {
2640 	return A_OK;
2641 }
2642 
2643 /**
2644  * ol_txrx_peer_get_peer_mac_addr() - return mac_addr from peer handle.
2645  * @peer: handle to peer
2646  *
2647  * returns mac addrs for module which do not know peer type
2648  *
2649  * Return: the mac_addr from peer
2650  */
2651 static uint8_t *
ol_txrx_peer_get_peer_mac_addr(void * ppeer)2652 ol_txrx_peer_get_peer_mac_addr(void *ppeer)
2653 {
2654 	ol_txrx_peer_handle peer = ppeer;
2655 
2656 	if (!peer)
2657 		return NULL;
2658 
2659 	return peer->mac_addr.raw;
2660 }
2661 
2662 /**
2663  * ol_txrx_get_pn_info() - Returns pn info from peer
2664  * @soc_hdl: soc handle
2665  * @peer_mac: mac address of the peer
2666  * @vdev_id: vdev identifier
2667  * @last_pn_valid: return last_rmf_pn_valid value from peer.
2668  * @last_pn: return last_rmf_pn value from peer.
2669  * @rmf_pn_replays: return rmf_pn_replays value from peer.
2670  *
2671  * Return: NONE
2672  */
2673 static void
ol_txrx_get_pn_info(struct cdp_soc_t * soc_hdl,uint8_t * peer_mac,uint8_t vdev_id,uint8_t ** last_pn_valid,uint64_t ** last_pn,uint32_t ** rmf_pn_replays)2674 ol_txrx_get_pn_info(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
2675 		    uint8_t vdev_id, uint8_t **last_pn_valid,
2676 		    uint64_t **last_pn, uint32_t **rmf_pn_replays)
2677 {
2678 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2679 	ol_txrx_pdev_handle pdev;
2680 	ol_txrx_peer_handle peer;
2681 
2682 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
2683 	if (!pdev) {
2684 		ol_txrx_err("pdev is NULL");
2685 		return;
2686 	}
2687 
2688 	peer =  ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
2689 						    PEER_DEBUG_ID_OL_INTERNAL);
2690 	if (!peer)
2691 		return;
2692 
2693 	*last_pn_valid = &peer->last_rmf_pn_valid;
2694 	*last_pn = &peer->last_rmf_pn;
2695 	*rmf_pn_replays = &peer->rmf_pn_replays;
2696 
2697 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
2698 }
2699 
2700 /**
2701  * ol_txrx_get_opmode() - Return operation mode of vdev
2702  * @soc_hdl: Datapath soc handle
2703  * @vdev_id: id of vdev
2704  *
2705  * Return: interface opmode if SUCCESS,
2706  *	   0 if interface does not exist.
2707  */
ol_txrx_get_opmode(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)2708 static int ol_txrx_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
2709 {
2710 	struct ol_txrx_vdev_t *vdev;
2711 
2712 	vdev = (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
2713 	if (!vdev) {
2714 		ol_txrx_err("vdev for id %d is NULL", vdev_id);
2715 		return 0;
2716 	}
2717 
2718 	return vdev->opmode;
2719 }
2720 
2721 /**
2722  * ol_txrx_get_peer_state() - Return peer state of peer
2723  * @soc_hdl: datapath soc handle
2724  * @vdev_id: virtual interface id
2725  * @peer_mac: peer mac addr
2726  * @slowpath: called from slow path or not
2727  *
2728  * Return: return peer state
2729  */
ol_txrx_get_peer_state(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_mac,bool slowpath)2730 static int ol_txrx_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2731 				  uint8_t *peer_mac, bool slowpath)
2732 {
2733 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2734 	ol_txrx_pdev_handle pdev =
2735 		ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
2736 	ol_txrx_peer_handle peer;
2737 	enum ol_txrx_peer_state peer_state;
2738 
2739 	if (!pdev)
2740 		return QDF_STATUS_E_FAILURE;
2741 
2742 	peer =  ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
2743 						    PEER_DEBUG_ID_OL_INTERNAL);
2744 	if (!peer)
2745 		return QDF_STATUS_E_FAILURE;
2746 
2747 	peer_state = peer->state;
2748 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
2749 	if (peer->vdev->vdev_id != vdev_id)
2750 		return OL_TXRX_PEER_STATE_INVALID;
2751 
2752 	return peer_state;
2753 }
2754 
2755 /**
2756  * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
2757  * @soc_hdl: datapath soc handle
2758  x @vdev_id: virtual interface id
2759  *
2760  * Return: vdev mac address
2761  */
2762 static uint8_t *
ol_txrx_get_vdev_mac_addr(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)2763 ol_txrx_get_vdev_mac_addr(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
2764 {
2765 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2766 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
2767 								     vdev_id);
2768 
2769 	if (!vdev)
2770 		return NULL;
2771 
2772 	return vdev->mac_addr.raw;
2773 }
2774 
2775 #ifdef currently_unused
2776 /**
2777  * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
2778  * vdev
2779  * @vdev: vdev handle
2780  *
2781  * Return: Handle to struct qdf_mac_addr
2782  */
2783 struct qdf_mac_addr *
ol_txrx_get_vdev_struct_mac_addr(ol_txrx_vdev_handle vdev)2784 ol_txrx_get_vdev_struct_mac_addr(ol_txrx_vdev_handle vdev)
2785 {
2786 	return (struct qdf_mac_addr *)&(vdev->mac_addr);
2787 }
2788 #endif
2789 
2790 #ifdef currently_unused
2791 /**
2792  * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
2793  * @vdev: vdev handle
2794  *
2795  * Return: Handle to pdev
2796  */
ol_txrx_get_pdev_from_vdev(ol_txrx_vdev_handle vdev)2797 ol_txrx_pdev_handle ol_txrx_get_pdev_from_vdev(ol_txrx_vdev_handle vdev)
2798 {
2799 	return vdev->pdev;
2800 }
2801 #endif
2802 
2803 /**
2804  * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
2805  * @soc_hdl: datapath soc handle
2806  * @vdev_id: virtual interface id
2807  *
2808  * Return: Handle to control pdev
2809  */
2810 static struct cdp_cfg *
ol_txrx_get_ctrl_pdev_from_vdev(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)2811 ol_txrx_get_ctrl_pdev_from_vdev(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
2812 {
2813 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2814 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
2815 								     vdev_id);
2816 
2817 	if (!vdev)
2818 		return NULL;
2819 
2820 	return vdev->pdev->ctrl_pdev;
2821 }
2822 
2823 /**
2824  * ol_txrx_is_rx_fwd_disabled() - returns the rx_fwd_disabled status on vdev
2825  * @vdev: vdev handle
2826  *
2827  * Return: Rx Fwd disabled status
2828  */
2829 static uint8_t
ol_txrx_is_rx_fwd_disabled(struct cdp_vdev * pvdev)2830 ol_txrx_is_rx_fwd_disabled(struct cdp_vdev *pvdev)
2831 {
2832 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
2833 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)
2834 					vdev->pdev->ctrl_pdev;
2835 	return cfg->rx_fwd_disabled;
2836 }
2837 
2838 #ifdef QCA_IBSS_SUPPORT
2839 /**
2840  * ol_txrx_update_ibss_add_peer_num_of_vdev() - update and return peer num
2841  * @soc_hdl: datapath soc handle
2842  * @vdev_id: virtual interface id
2843  * @peer_num_delta: peer nums to be adjusted
2844  *
2845  * Return: -1 for failure or total peer nums after adjustment.
2846  */
2847 static int16_t
ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,int16_t peer_num_delta)2848 ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_soc_t *soc_hdl,
2849 					 uint8_t vdev_id,
2850 					 int16_t peer_num_delta)
2851 {
2852 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2853 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
2854 								     vdev_id);
2855 	int16_t new_peer_num;
2856 
2857 	if (!vdev)
2858 		return QDF_STATUS_E_FAILURE;
2859 
2860 	new_peer_num = vdev->ibss_peer_num + peer_num_delta;
2861 	if (new_peer_num > MAX_PEERS || new_peer_num < 0)
2862 		return OL_TXRX_INVALID_NUM_PEERS;
2863 
2864 	vdev->ibss_peer_num = new_peer_num;
2865 
2866 	return new_peer_num;
2867 }
2868 
2869 /**
2870  * ol_txrx_set_ibss_vdev_heart_beat_timer() - Update ibss vdev heart
2871  * beat timer
2872  * @soc_hdl: Datapath soc handle
2873  * @vdev_id: id of vdev
2874  * @timer_value_sec: new heart beat timer value
2875  *
2876  * Return: Old timer value set in vdev.
2877  */
2878 static uint16_t
ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint16_t timer_value_sec)2879 ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_soc_t *soc_hdl,
2880 				       uint8_t vdev_id,
2881 				       uint16_t timer_value_sec)
2882 {
2883 	struct ol_txrx_vdev_t *vdev =
2884 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
2885 	uint16_t old_timer_value = vdev->ibss_peer_heart_beat_timer;
2886 
2887 	vdev->ibss_peer_heart_beat_timer = timer_value_sec;
2888 
2889 	return old_timer_value;
2890 }
2891 #else /* !QCA_IBSS_SUPPORT */
2892 static inline int16_t
ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,int16_t peer_num_delta)2893 ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_soc_t *soc_hdl,
2894 					 uint8_t vdev_id,
2895 					 int16_t peer_num_delta)
2896 {
2897 	return 0;
2898 }
2899 
ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint16_t timer_value_sec)2900 static uint16_t ol_txrx_set_ibss_vdev_heart_beat_timer(
2901 				struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2902 				uint16_t timer_value_sec)
2903 {
2904 	return 0;
2905 }
2906 #endif /* QCA_IBSS_SUPPORT */
2907 
2908 #ifdef WLAN_FEATURE_DSRC
2909 /**
2910  * ol_txrx_set_ocb_chan_info() - set OCB channel info to vdev.
2911  * @soc_hdl: Datapath soc handle
2912  * @vdev_id: id of vdev
2913  * @ocb_set_chan: OCB channel information to be set in vdev.
2914  *
2915  * Return: NONE
2916  */
2917 static void
ol_txrx_set_ocb_chan_info(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,struct ol_txrx_ocb_set_chan ocb_set_chan)2918 ol_txrx_set_ocb_chan_info(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2919 			  struct ol_txrx_ocb_set_chan ocb_set_chan)
2920 {
2921 	struct ol_txrx_vdev_t *vdev =
2922 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
2923 
2924 	if (qdf_unlikely(!vdev)) {
2925 		ol_txrx_err("pdev is NULL");
2926 		return;
2927 	}
2928 
2929 	vdev->ocb_channel_info = ocb_set_chan.ocb_channel_info;
2930 	vdev->ocb_channel_count = ocb_set_chan.ocb_channel_count;
2931 }
2932 
2933 /**
2934  * ol_txrx_get_ocb_chan_info() - return handle to vdev ocb_channel_info
2935  * @soc_hdl: Datapath soc handle
2936  * @vdev_id: id of vdev
2937  *
2938  * Return: handle to struct ol_txrx_ocb_chan_info
2939  */
2940 static struct ol_txrx_ocb_chan_info *
ol_txrx_get_ocb_chan_info(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)2941 ol_txrx_get_ocb_chan_info(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
2942 {
2943 	struct ol_txrx_vdev_t *vdev =
2944 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
2945 
2946 	if (qdf_unlikely(!vdev)) {
2947 		ol_txrx_err("pdev is NULL");
2948 		return NULL;
2949 	}
2950 
2951 	return vdev->ocb_channel_info;
2952 }
2953 #endif
2954 
ol_txrx_peer_state_update(struct cdp_soc_t * soc_hdl,uint8_t * peer_mac,enum ol_txrx_peer_state state)2955 QDF_STATUS ol_txrx_peer_state_update(struct cdp_soc_t *soc_hdl,
2956 				     uint8_t *peer_mac,
2957 				     enum ol_txrx_peer_state state)
2958 {
2959 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2960 	ol_txrx_pdev_handle pdev =
2961 		ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
2962 	struct ol_txrx_peer_t *peer;
2963 	int    peer_ref_cnt;
2964 
2965 	if (qdf_unlikely(!pdev)) {
2966 		ol_txrx_err("Pdev is NULL");
2967 		qdf_assert(0);
2968 		return QDF_STATUS_E_INVAL;
2969 	}
2970 
2971 	peer =  ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
2972 						    PEER_DEBUG_ID_OL_INTERNAL);
2973 	if (!peer) {
2974 		ol_txrx_err(
2975 			   "peer is null for peer_mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
2976 			   peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
2977 			   peer_mac[4], peer_mac[5]);
2978 		return QDF_STATUS_E_INVAL;
2979 	}
2980 
2981 	/* TODO: Should we send WMI command of the connection state? */
2982 	/* avoid multiple auth state change. */
2983 	if (peer->state == state) {
2984 #ifdef TXRX_PRINT_VERBOSE_ENABLE
2985 		ol_txrx_dbg("no state change, returns directly");
2986 #endif
2987 		peer_ref_cnt = ol_txrx_peer_release_ref
2988 						(peer,
2989 						 PEER_DEBUG_ID_OL_INTERNAL);
2990 		return QDF_STATUS_SUCCESS;
2991 	}
2992 
2993 	ol_txrx_dbg("change from %d to %d",
2994 		    peer->state, state);
2995 
2996 	peer->tx_filter = (state == OL_TXRX_PEER_STATE_AUTH)
2997 		? ol_tx_filter_pass_thru
2998 		: ((state == OL_TXRX_PEER_STATE_CONN)
2999 		   ? ol_tx_filter_non_auth
3000 		   : ol_tx_filter_discard);
3001 
3002 	if (peer->vdev->pdev->cfg.host_addba) {
3003 		if (state == OL_TXRX_PEER_STATE_AUTH) {
3004 			int tid;
3005 			/*
3006 			 * Pause all regular (non-extended) TID tx queues until
3007 			 * data arrives and ADDBA negotiation has completed.
3008 			 */
3009 			ol_txrx_dbg("pause peer and unpause mgmt/non-qos");
3010 			ol_txrx_peer_pause(peer); /* pause all tx queues */
3011 			/* unpause mgmt and non-QoS tx queues */
3012 			for (tid = OL_TX_NUM_QOS_TIDS;
3013 			     tid < OL_TX_NUM_TIDS; tid++)
3014 				ol_txrx_peer_tid_unpause(peer, tid);
3015 		}
3016 	}
3017 	peer_ref_cnt = ol_txrx_peer_release_ref(peer,
3018 						PEER_DEBUG_ID_OL_INTERNAL);
3019 	/*
3020 	 * after ol_txrx_peer_release_ref, peer object cannot be accessed
3021 	 * if the return code was 0
3022 	 */
3023 	if (peer_ref_cnt > 0)
3024 		/*
3025 		 * Set the state after the Pause to avoid the race condiction
3026 		 * with ADDBA check in tx path
3027 		 */
3028 		peer->state = state;
3029 	return QDF_STATUS_SUCCESS;
3030 }
3031 
3032 void
ol_txrx_peer_keyinstalled_state_update(struct ol_txrx_peer_t * peer,uint8_t val)3033 ol_txrx_peer_keyinstalled_state_update(struct ol_txrx_peer_t *peer, uint8_t val)
3034 {
3035 	peer->keyinstalled = val;
3036 }
3037 
3038 void
ol_txrx_peer_update(ol_txrx_vdev_handle vdev,uint8_t * peer_mac,union ol_txrx_peer_update_param_t * param,enum ol_txrx_peer_update_select_t select)3039 ol_txrx_peer_update(ol_txrx_vdev_handle vdev,
3040 		    uint8_t *peer_mac,
3041 		    union ol_txrx_peer_update_param_t *param,
3042 		    enum ol_txrx_peer_update_select_t select)
3043 {
3044 	struct ol_txrx_peer_t *peer;
3045 
3046 	peer = ol_txrx_peer_find_hash_find_get_ref(vdev->pdev, peer_mac, 0, 1,
3047 						   PEER_DEBUG_ID_OL_INTERNAL);
3048 	if (!peer) {
3049 		ol_txrx_dbg("peer is null");
3050 		return;
3051 	}
3052 
3053 	switch (select) {
3054 	case ol_txrx_peer_update_qos_capable:
3055 	{
3056 		/* save qos_capable here txrx peer,
3057 		 * when HTT_ISOC_T2H_MSG_TYPE_PEER_INFO comes then save.
3058 		 */
3059 		peer->qos_capable = param->qos_capable;
3060 		/*
3061 		 * The following function call assumes that the peer has a
3062 		 * single ID. This is currently true, and
3063 		 * is expected to remain true.
3064 		 */
3065 		htt_peer_qos_update(peer->vdev->pdev->htt_pdev,
3066 				    peer->peer_ids[0],
3067 				    peer->qos_capable);
3068 		break;
3069 	}
3070 	case ol_txrx_peer_update_uapsdMask:
3071 	{
3072 		peer->uapsd_mask = param->uapsd_mask;
3073 		htt_peer_uapsdmask_update(peer->vdev->pdev->htt_pdev,
3074 					  peer->peer_ids[0],
3075 					  peer->uapsd_mask);
3076 		break;
3077 	}
3078 	case ol_txrx_peer_update_peer_security:
3079 	{
3080 		enum ol_sec_type sec_type = param->sec_type;
3081 		enum htt_sec_type peer_sec_type = htt_sec_type_none;
3082 
3083 		switch (sec_type) {
3084 		case ol_sec_type_none:
3085 			peer_sec_type = htt_sec_type_none;
3086 			break;
3087 		case ol_sec_type_wep128:
3088 			peer_sec_type = htt_sec_type_wep128;
3089 			break;
3090 		case ol_sec_type_wep104:
3091 			peer_sec_type = htt_sec_type_wep104;
3092 			break;
3093 		case ol_sec_type_wep40:
3094 			peer_sec_type = htt_sec_type_wep40;
3095 			break;
3096 		case ol_sec_type_tkip:
3097 			peer_sec_type = htt_sec_type_tkip;
3098 			break;
3099 		case ol_sec_type_tkip_nomic:
3100 			peer_sec_type = htt_sec_type_tkip_nomic;
3101 			break;
3102 		case ol_sec_type_aes_ccmp:
3103 			peer_sec_type = htt_sec_type_aes_ccmp;
3104 			break;
3105 		case ol_sec_type_wapi:
3106 			peer_sec_type = htt_sec_type_wapi;
3107 			break;
3108 		default:
3109 			peer_sec_type = htt_sec_type_none;
3110 			break;
3111 		}
3112 
3113 		peer->security[txrx_sec_ucast].sec_type =
3114 			peer->security[txrx_sec_mcast].sec_type =
3115 				peer_sec_type;
3116 
3117 		break;
3118 	}
3119 	default:
3120 	{
3121 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3122 			  "ERROR: unknown param %d in %s", select,
3123 			  __func__);
3124 		break;
3125 	}
3126 	} /* switch */
3127 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
3128 }
3129 
3130 uint8_t
ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t * txrx_pdev,uint16_t peer_id)3131 ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
3132 {
3133 
3134 	struct ol_txrx_peer_t *peer;
3135 
3136 	peer = ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
3137 	if (peer)
3138 		return peer->uapsd_mask;
3139 	return 0;
3140 }
3141 
3142 uint8_t
ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t * txrx_pdev,uint16_t peer_id)3143 ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
3144 {
3145 
3146 	struct ol_txrx_peer_t *peer_t =
3147 		ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
3148 	if (peer_t)
3149 		return peer_t->qos_capable;
3150 	return 0;
3151 }
3152 
3153 /**
3154  * ol_txrx_peer_free_tids() - free tids for the peer
3155  * @peer: peer handle
3156  *
3157  * Return: None
3158  */
ol_txrx_peer_free_tids(ol_txrx_peer_handle peer)3159 static inline void ol_txrx_peer_free_tids(ol_txrx_peer_handle peer)
3160 {
3161 	int i = 0;
3162 	/*
3163 	 * 'array' is allocated in addba handler and is supposed to be
3164 	 * freed in delba handler. There is the case (for example, in
3165 	 * SSR) where delba handler is not called. Because array points
3166 	 * to address of 'base' by default and is reallocated in addba
3167 	 * handler later, only free the memory when the array does not
3168 	 * point to base.
3169 	 */
3170 	for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
3171 		if (peer->tids_rx_reorder[i].array !=
3172 		    &peer->tids_rx_reorder[i].base) {
3173 			ol_txrx_dbg("delete reorder arr, tid:%d", i);
3174 			qdf_mem_free(peer->tids_rx_reorder[i].array);
3175 			ol_rx_reorder_init(&peer->tids_rx_reorder[i],
3176 					   (uint8_t)i);
3177 		}
3178 	}
3179 }
3180 
3181 /**
3182  * ol_txrx_peer_drop_pending_frames() - drop pending frames in the RX queue
3183  * @peer: peer handle
3184  *
3185  * Drop pending packets pertaining to the peer from the RX thread queue.
3186  *
3187  * Return: None
3188  */
ol_txrx_peer_drop_pending_frames(struct ol_txrx_peer_t * peer)3189 static void ol_txrx_peer_drop_pending_frames(struct ol_txrx_peer_t *peer)
3190 {
3191 	p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
3192 
3193 	if (sched_ctx)
3194 		cds_drop_rxpkt_by_staid(sched_ctx, peer->local_id);
3195 }
3196 
3197 /**
3198  * ol_txrx_peer_release_ref() - release peer reference
3199  * @peer: peer handle
3200  *
3201  * Release peer reference and delete peer if refcount is 0
3202  *
3203  * Return: Resulting peer ref_cnt after this function is invoked
3204  */
ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,enum peer_debug_id_type debug_id)3205 int ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,
3206 			     enum peer_debug_id_type debug_id)
3207 {
3208 	int    rc;
3209 	struct ol_txrx_vdev_t *vdev;
3210 	struct ol_txrx_pdev_t *pdev;
3211 	bool ref_silent = true;
3212 	int access_list = 0;
3213 	uint32_t err_code = 0;
3214 	int del_rc;
3215 
3216 	/* preconditions */
3217 	TXRX_ASSERT2(peer);
3218 
3219 	vdev = peer->vdev;
3220 	if (!vdev) {
3221 		ol_txrx_err("The vdev is not present anymore");
3222 		return -EINVAL;
3223 	}
3224 
3225 	pdev = vdev->pdev;
3226 	if (!pdev) {
3227 		ol_txrx_err("The pdev is not present anymore");
3228 		err_code = 0xbad2;
3229 		goto ERR_STATE;
3230 	}
3231 
3232 	if (debug_id >= PEER_DEBUG_ID_MAX || debug_id < 0) {
3233 		ol_txrx_err("incorrect debug_id %d ", debug_id);
3234 		err_code = 0xbad3;
3235 		goto ERR_STATE;
3236 	}
3237 
3238 	if (debug_id == PEER_DEBUG_ID_OL_RX_THREAD)
3239 		ref_silent = true;
3240 
3241 	if (!ref_silent)
3242 		wlan_roam_debug_log(vdev->vdev_id, DEBUG_PEER_UNREF_DELETE,
3243 				    DEBUG_INVALID_PEER_ID, &peer->mac_addr.raw,
3244 				    peer, 0xdead,
3245 				    qdf_atomic_read(&peer->ref_cnt));
3246 
3247 
3248 	/*
3249 	 * Hold the lock all the way from checking if the peer ref count
3250 	 * is zero until the peer references are removed from the hash
3251 	 * table and vdev list (if the peer ref count is zero).
3252 	 * This protects against a new HL tx operation starting to use the
3253 	 * peer object just after this function concludes it's done being used.
3254 	 * Furthermore, the lock needs to be held while checking whether the
3255 	 * vdev's list of peers is empty, to make sure that list is not modified
3256 	 * concurrently with the empty check.
3257 	 */
3258 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
3259 
3260 	/*
3261 	 * Check for the reference count before deleting the peer
3262 	 * as we noticed that sometimes we are re-entering this
3263 	 * function again which is leading to dead-lock.
3264 	 * (A double-free should never happen, so assert if it does.)
3265 	 */
3266 	rc = qdf_atomic_read(&(peer->ref_cnt));
3267 
3268 	if (rc == 0) {
3269 		qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3270 		ol_txrx_err("The Peer is not present anymore");
3271 		qdf_assert(0);
3272 		return -EACCES;
3273 	}
3274 	/*
3275 	 * now decrement rc; this will be the return code.
3276 	 * 0 : peer deleted
3277 	 * >0: peer ref removed, but still has other references
3278 	 * <0: sanity failed - no changes to the state of the peer
3279 	 */
3280 	rc--;
3281 
3282 	if (!qdf_atomic_read(&peer->access_list[debug_id])) {
3283 		qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3284 		ol_txrx_err("peer %pK ref was not taken by %d",
3285 			    peer, debug_id);
3286 		ol_txrx_dump_peer_access_list(peer);
3287 		QDF_BUG(0);
3288 		return -EACCES;
3289 	}
3290 	qdf_atomic_dec(&peer->access_list[debug_id]);
3291 
3292 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
3293 		u16 peer_id;
3294 		wlan_roam_debug_log(vdev->vdev_id,
3295 				    DEBUG_DELETING_PEER_OBJ,
3296 				    DEBUG_INVALID_PEER_ID,
3297 				    &peer->mac_addr.raw, peer, 0,
3298 				    qdf_atomic_read(&peer->ref_cnt));
3299 		peer_id = peer->local_id;
3300 
3301 		/* Drop all pending frames in the rx thread queue */
3302 		ol_txrx_peer_drop_pending_frames(peer);
3303 
3304 		/* remove the reference to the peer from the hash table */
3305 		ol_txrx_peer_find_hash_remove(pdev, peer);
3306 
3307 		/* remove the peer from its parent vdev's list */
3308 		TAILQ_REMOVE(&peer->vdev->peer_list, peer, peer_list_elem);
3309 
3310 		/* cleanup the Rx reorder queues for this peer */
3311 		ol_rx_peer_cleanup(vdev, peer);
3312 
3313 		qdf_spinlock_destroy(&peer->peer_info_lock);
3314 		qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
3315 
3316 		/* peer is removed from peer_list */
3317 		qdf_atomic_set(&peer->delete_in_progress, 0);
3318 
3319 		/*
3320 		 * Set wait_delete_comp event if the current peer id matches
3321 		 * with registered peer id.
3322 		 */
3323 		if (peer_id == vdev->wait_on_peer_id) {
3324 			qdf_event_set(&vdev->wait_delete_comp);
3325 			vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3326 		}
3327 
3328 		qdf_timer_sync_cancel(&peer->peer_unmap_timer);
3329 		qdf_timer_free(&peer->peer_unmap_timer);
3330 
3331 		/* check whether the parent vdev has no peers left */
3332 		if (TAILQ_EMPTY(&vdev->peer_list)) {
3333 			/*
3334 			 * Check if the parent vdev was waiting for its peers
3335 			 * to be deleted, in order for it to be deleted too.
3336 			 */
3337 			if (vdev->delete.pending) {
3338 				ol_txrx_vdev_delete_cb vdev_delete_cb =
3339 					vdev->delete.callback;
3340 				void *vdev_delete_context =
3341 					vdev->delete.context;
3342 				ol_txrx_vdev_delete_cb vdev_del_notify =
3343 						vdev->vdev_del_notify;
3344 				void *vdev_del_context = vdev->osif_dev;
3345 				/*
3346 				 * Now that there are no references to the peer,
3347 				 * we can release the peer reference lock.
3348 				 */
3349 				qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3350 
3351 				/*
3352 				 * The ol_tx_desc_free might access the invalid
3353 				 * content of vdev referred by tx desc, since
3354 				 * this vdev might be detached in another thread
3355 				 * asynchronous.
3356 				 *
3357 				 * Go through tx desc pool to set corresponding
3358 				 * tx desc's vdev to NULL when detach this vdev,
3359 				 * and add vdev checking in the ol_tx_desc_free
3360 				 * to avoid crash.
3361 				 */
3362 				ol_txrx_tx_desc_reset_vdev(vdev);
3363 				ol_txrx_dbg(
3364 					"deleting vdev object %pK ("QDF_MAC_ADDR_FMT") - its last peer is done",
3365 					vdev,
3366 					QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
3367 				/* all peers are gone, go ahead and delete it */
3368 				qdf_mem_free(vdev);
3369 				if (vdev_delete_cb)
3370 					vdev_delete_cb(vdev_delete_context);
3371 
3372 				if (vdev_del_notify)
3373 					vdev_del_notify(vdev_del_context);
3374 			} else {
3375 				qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3376 			}
3377 		} else {
3378 			qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3379 		}
3380 
3381 		del_rc = qdf_atomic_read(&peer->del_ref_cnt);
3382 
3383 		ol_txrx_info_high("[%d][%d]: Deleting peer %pK ref_cnt -> %d del_ref_cnt -> %d %s",
3384 				  debug_id,
3385 				  qdf_atomic_read(&peer->access_list[debug_id]),
3386 				  peer, rc, del_rc,
3387 				  qdf_atomic_read(&peer->fw_create_pending) ==
3388 				  1 ? "(No Maps received)" : "");
3389 
3390 		ol_txrx_peer_tx_queue_free(pdev, peer);
3391 
3392 		/* Remove mappings from peer_id to peer object */
3393 		ol_txrx_peer_clear_map_peer(pdev, peer);
3394 
3395 		/* Remove peer pointer from local peer ID map */
3396 		ol_txrx_local_peer_id_free(pdev, peer);
3397 
3398 		ol_txrx_peer_free_tids(peer);
3399 
3400 		ol_txrx_dump_peer_access_list(peer);
3401 
3402 		if (QDF_GLOBAL_MONITOR_MODE == cds_get_conparam() &&
3403 		    pdev->self_peer == peer)
3404 			pdev->self_peer = NULL;
3405 
3406 		if (!del_rc)
3407 			qdf_mem_free(peer);
3408 	} else {
3409 		access_list = qdf_atomic_read(&peer->access_list[debug_id]);
3410 		qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3411 		if (!ref_silent)
3412 			ol_txrx_info_high("[%d][%d]: ref delete peer %pK ref_cnt -> %d",
3413 					  debug_id, access_list, peer, rc);
3414 	}
3415 	return rc;
3416 ERR_STATE:
3417 	wlan_roam_debug_log(vdev->vdev_id, DEBUG_PEER_UNREF_DELETE,
3418 			    DEBUG_INVALID_PEER_ID, &peer->mac_addr.raw,
3419 			    peer, err_code, qdf_atomic_read(&peer->ref_cnt));
3420 	return -EINVAL;
3421 }
3422 
3423 /**
3424  * ol_txrx_clear_peer_internal() - ol internal function to clear peer
3425  * @peer: pointer to ol txrx peer structure
3426  *
3427  * Return: QDF Status
3428  */
3429 static QDF_STATUS
ol_txrx_clear_peer_internal(struct ol_txrx_peer_t * peer)3430 ol_txrx_clear_peer_internal(struct ol_txrx_peer_t *peer)
3431 {
3432 	p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
3433 	/* Drop pending Rx frames in CDS */
3434 	if (sched_ctx)
3435 		cds_drop_rxpkt_by_staid(sched_ctx, peer->local_id);
3436 
3437 	/* Purge the cached rx frame queue */
3438 	ol_txrx_flush_rx_frames(peer, 1);
3439 
3440 	qdf_spin_lock_bh(&peer->peer_info_lock);
3441 	peer->state = OL_TXRX_PEER_STATE_DISC;
3442 	qdf_spin_unlock_bh(&peer->peer_info_lock);
3443 
3444 	return QDF_STATUS_SUCCESS;
3445 }
3446 
3447 /**
3448  * ol_txrx_clear_peer() - clear peer
3449  * peer_addr: peer mac address
3450  *
3451  * Return: QDF Status
3452  */
3453 static QDF_STATUS
ol_txrx_clear_peer(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,struct qdf_mac_addr peer_addr)3454 ol_txrx_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3455 		   struct qdf_mac_addr peer_addr)
3456 {
3457 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
3458 	struct ol_txrx_pdev_t *pdev =
3459 			ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
3460 	struct ol_txrx_peer_t *peer;
3461 	QDF_STATUS status;
3462 
3463 	if (!pdev) {
3464 		ol_txrx_err("Unable to find pdev!");
3465 		return QDF_STATUS_E_FAILURE;
3466 	}
3467 
3468 	peer = ol_txrx_peer_get_ref_by_addr(pdev, peer_addr.bytes,
3469 					    PEER_DEBUG_ID_OL_INTERNAL);
3470 
3471 	/* Return success, if the peer is already cleared by
3472 	 * data path via peer detach function.
3473 	 */
3474 	if (!peer)
3475 		return QDF_STATUS_SUCCESS;
3476 
3477 	ol_txrx_dbg("Clear peer rx frames: " QDF_MAC_ADDR_FMT,
3478 		    QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3479 	ol_txrx_clear_peer_internal(peer);
3480 	status = ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
3481 
3482 	return status;
3483 }
3484 
3485 /**
3486  * peer_unmap_timer_handler() - peer unmap timer function
3487  * @data: peer object pointer
3488  *
3489  * Return: none
3490  */
peer_unmap_timer_handler(void * data)3491 void peer_unmap_timer_handler(void *data)
3492 {
3493 	ol_txrx_peer_handle peer = (ol_txrx_peer_handle)data;
3494 
3495 	if (!peer)
3496 		return;
3497 
3498 	ol_txrx_err("all unmap events not received for peer %pK, ref_cnt %d",
3499 		    peer, qdf_atomic_read(&peer->ref_cnt));
3500 	ol_txrx_err("peer %pK ("QDF_MAC_ADDR_FMT")",
3501 		    peer,
3502 		    QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3503 	ol_register_peer_recovery_notifier(peer);
3504 
3505 	cds_trigger_recovery(QDF_PEER_UNMAP_TIMEDOUT);
3506 }
3507 
3508 
3509 /**
3510  * ol_txrx_peer_detach() - Delete a peer's data object.
3511 
3512  * @soc_hdl: datapath soc handle
3513  * @vdev_id: virtual interface id
3514  * @peer_mac: peer MAC address
3515  * @bitmap: bitmap indicating special handling of request.
3516  * @peer_type: link or mld peer
3517  * When the host's control SW disassociates a peer, it calls
3518  * this function to detach and delete the peer. The reference
3519  * stored in the control peer object to the data peer
3520  * object (set up by a call to ol_peer_store()) is provided.
3521  *
3522  * Return: SUCCESS or Failure
3523  */
ol_txrx_peer_detach(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_mac,uint32_t bitmap,enum cdp_peer_type peer_type)3524 static QDF_STATUS ol_txrx_peer_detach(struct cdp_soc_t *soc_hdl,
3525 				      uint8_t vdev_id, uint8_t *peer_mac,
3526 				      uint32_t bitmap,
3527 				      enum cdp_peer_type peer_type)
3528 {
3529 	ol_txrx_peer_handle peer;
3530 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
3531 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
3532 								     vdev_id);
3533 
3534 	if (!vdev)
3535 		return QDF_STATUS_E_FAILURE;
3536 
3537 	peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)vdev->pdev,
3538 					 peer_mac);
3539 	if (!peer)
3540 		return QDF_STATUS_E_FAILURE;
3541 
3542 	ol_txrx_info_high("peer %pK, peer->ref_cnt %d",
3543 			  peer, qdf_atomic_read(&peer->ref_cnt));
3544 
3545 	/* redirect peer's rx delivery function to point to a discard func */
3546 	peer->rx_opt_proc = ol_rx_discard;
3547 
3548 	peer->valid = 0;
3549 
3550 	/* flush all rx packets before clearing up the peer local_id */
3551 	ol_txrx_clear_peer_internal(peer);
3552 
3553 	/* debug print to dump rx reorder state */
3554 	/* htt_rx_reorder_log_print(vdev->pdev->htt_pdev); */
3555 
3556 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
3557 		   "%s:peer %pK ("QDF_MAC_ADDR_FMT")",
3558 		   __func__, peer,
3559 		   QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3560 
3561 	qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
3562 	if (vdev->last_real_peer == peer)
3563 		vdev->last_real_peer = NULL;
3564 	qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
3565 	htt_rx_reorder_log_print(peer->vdev->pdev->htt_pdev);
3566 
3567 	/*
3568 	 * set delete_in_progress to identify that wma
3569 	 * is waiting for unmap massage for this peer
3570 	 */
3571 	qdf_atomic_set(&peer->delete_in_progress, 1);
3572 
3573 	if (!(bitmap & (1 << CDP_PEER_DO_NOT_START_UNMAP_TIMER))) {
3574 		if (vdev->opmode == wlan_op_mode_sta) {
3575 			qdf_mem_copy(&peer->vdev->last_peer_mac_addr,
3576 				&peer->mac_addr,
3577 				sizeof(union ol_txrx_align_mac_addr_t));
3578 
3579 			/*
3580 			 * Create a timer to track unmap events when the
3581 			 * sta peer gets deleted.
3582 			 */
3583 			qdf_timer_start(&peer->peer_unmap_timer,
3584 					OL_TXRX_PEER_UNMAP_TIMEOUT);
3585 			ol_txrx_info_high
3586 				("started peer_unmap_timer for peer %pK",
3587 				  peer);
3588 		}
3589 	}
3590 
3591 	/*
3592 	 * Remove the reference added during peer_attach.
3593 	 * The peer will still be left allocated until the
3594 	 * PEER_UNMAP message arrives to remove the other
3595 	 * reference, added by the PEER_MAP message.
3596 	 */
3597 	peer->state = OL_TXRX_PEER_STATE_INVALID;
3598 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_PEER_ATTACH);
3599 
3600 	return QDF_STATUS_SUCCESS;
3601 }
3602 
3603 /**
3604  * ol_txrx_peer_detach_force_delete() - Detach and delete a peer's data object
3605  * @soc_hdl - datapath soc handle
3606  * @vdev_id - virtual interface id
3607  * @peer_mac - peer mac address
3608  *
3609  * Detach a peer and force peer object to be removed. It is called during
3610  * roaming scenario when the firmware has already deleted a peer.
3611  * Remove it from the peer_id_to_object map. Peer object is actually freed
3612  * when last reference is deleted.
3613  *
3614  * Return: None
3615  */
ol_txrx_peer_detach_force_delete(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_mac)3616 static void ol_txrx_peer_detach_force_delete(struct cdp_soc_t *soc_hdl,
3617 					     uint8_t vdev_id, uint8_t *peer_mac)
3618 {
3619 	struct ol_txrx_peer_t *peer;
3620 	struct ol_txrx_pdev_t *pdev;
3621 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
3622 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
3623 								     vdev_id);
3624 
3625 	if (!vdev || !vdev->pdev)
3626 		return;
3627 
3628 	pdev = vdev->pdev;
3629 	peer = ol_txrx_find_peer_by_addr(ol_txrx_pdev_t_to_cdp_pdev(pdev),
3630 					 peer_mac);
3631 	if (!peer)
3632 		return;
3633 
3634 	/* Clear the peer_id_to_obj map entries */
3635 	ol_txrx_peer_remove_obj_map_entries(pdev, peer);
3636 	ol_txrx_peer_detach(soc_hdl, vdev_id, peer_mac,
3637 			    1 << CDP_PEER_DELETE_NO_SPECIAL,
3638 			    CDP_LINK_PEER_TYPE);
3639 }
3640 
3641 /**
3642  * ol_txrx_peer_detach_sync() - peer detach sync callback
3643  * @soc_hdl - datapath soc handle
3644  * @vdev_id - virtual interface id
3645  * @peer_mac - peer mac address
3646  * @peer_unmap_sync - peer unmap sync cb.
3647  * @bitmap - bitmap indicating special handling of request.
3648  *
3649  * Return: None
3650  */
ol_txrx_peer_detach_sync(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_mac,ol_txrx_peer_unmap_sync_cb peer_unmap_sync,uint32_t bitmap)3651 static void ol_txrx_peer_detach_sync(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3652 				     uint8_t *peer_mac,
3653 				     ol_txrx_peer_unmap_sync_cb peer_unmap_sync,
3654 				     uint32_t bitmap)
3655 {
3656 	struct ol_txrx_pdev_t *pdev;
3657 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
3658 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
3659 								     vdev_id);
3660 
3661 	if (!vdev || !vdev->pdev)
3662 		return;
3663 
3664 	pdev = vdev->pdev;
3665 	if (!pdev->peer_unmap_sync_cb)
3666 		pdev->peer_unmap_sync_cb = peer_unmap_sync;
3667 
3668 	ol_txrx_peer_detach(soc_hdl, vdev_id, peer_mac, bitmap,
3669 			    CDP_LINK_PEER_TYPE);
3670 }
3671 
3672 /**
3673  * ol_txrx_peer_unmap_sync_cb_set() - set peer unmap sync callback
3674  * @soc_hdl - datapath soc handle
3675  * pdev_id - physical device instance id
3676  * @peer_unmap_sync - peer unmap sync callback
3677  *
3678  * Return: None
3679  */
ol_txrx_peer_unmap_sync_cb_set(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,ol_txrx_peer_unmap_sync_cb peer_unmap_sync)3680 static void ol_txrx_peer_unmap_sync_cb_set(
3681 				struct cdp_soc_t *soc_hdl,
3682 				uint8_t pdev_id,
3683 				ol_txrx_peer_unmap_sync_cb peer_unmap_sync)
3684 {
3685 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
3686 	struct ol_txrx_pdev_t *pdev = ol_txrx_get_pdev_from_pdev_id(soc,
3687 								    pdev_id);
3688 
3689 	if (!pdev)
3690 		return;
3691 
3692 	if (!pdev->peer_unmap_sync_cb)
3693 		pdev->peer_unmap_sync_cb = peer_unmap_sync;
3694 }
3695 
3696 /**
3697  * ol_txrx_peer_flush_frags() - Flush fragments for a particular peer
3698  * @soc_hdl - datapath soc handle
3699  * @vdev_id - virtual device id
3700  * @peer_mac - peer mac address
3701  *
3702  * Return: None
3703  */
3704 static void
ol_txrx_peer_flush_frags(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_mac)3705 ol_txrx_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3706 			 uint8_t *peer_mac)
3707 {
3708 	struct ol_txrx_peer_t *peer;
3709 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
3710 	struct ol_txrx_pdev_t *pdev =
3711 		ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
3712 
3713 	if (!pdev)
3714 		return;
3715 
3716 	peer =  ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
3717 						    PEER_DEBUG_ID_OL_INTERNAL);
3718 	if (!peer)
3719 		return;
3720 
3721 	ol_rx_reorder_peer_cleanup(peer->vdev, peer);
3722 
3723 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
3724 }
3725 
3726 /**
3727  * ol_txrx_dump_tx_desc() - dump tx desc total and free count
3728  * @txrx_pdev: Pointer to txrx pdev
3729  *
3730  * Return: none
3731  */
ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)3732 static void ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)
3733 {
3734 	struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
3735 	uint32_t total, num_free;
3736 
3737 	if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3738 		total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3739 	else
3740 		total = ol_tx_get_desc_global_pool_size(pdev);
3741 
3742 	num_free = ol_tx_get_total_free_desc(pdev);
3743 
3744 	ol_txrx_info_high(
3745 		   "total tx credit %d num_free %d",
3746 		   total, num_free);
3747 
3748 }
3749 
3750 /**
3751  * ol_txrx_wait_for_pending_tx() - wait for tx queue to be empty
3752  * @timeout: timeout in ms
3753  *
3754  * Wait for tx queue to be empty, return timeout error if
3755  * queue doesn't empty before timeout occurs.
3756  *
3757  * Return:
3758  *    QDF_STATUS_SUCCESS if the queue empties,
3759  *    QDF_STATUS_E_TIMEOUT in case of timeout,
3760  *    QDF_STATUS_E_FAULT in case of missing handle
3761  */
ol_txrx_wait_for_pending_tx(int timeout)3762 static QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout)
3763 {
3764 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
3765 	struct ol_txrx_pdev_t *txrx_pdev;
3766 
3767 	if (qdf_unlikely(!soc))
3768 		return QDF_STATUS_E_FAULT;
3769 
3770 	txrx_pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
3771 	if (!txrx_pdev) {
3772 		ol_txrx_err("txrx context is null");
3773 		return QDF_STATUS_E_FAULT;
3774 	}
3775 
3776 	while (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev)) {
3777 		qdf_sleep(OL_ATH_TX_DRAIN_WAIT_DELAY);
3778 		if (timeout <= 0) {
3779 			ol_txrx_err("tx frames are pending");
3780 			ol_txrx_dump_tx_desc(txrx_pdev);
3781 			return QDF_STATUS_E_TIMEOUT;
3782 		}
3783 		timeout = timeout - OL_ATH_TX_DRAIN_WAIT_DELAY;
3784 	}
3785 	return QDF_STATUS_SUCCESS;
3786 }
3787 
3788 #ifndef QCA_WIFI_3_0_EMU
3789 #define SUSPEND_DRAIN_WAIT 500
3790 #else
3791 #define SUSPEND_DRAIN_WAIT 3000
3792 #endif
3793 
3794 #ifdef FEATURE_RUNTIME_PM
3795 /**
3796  * ol_txrx_runtime_suspend() - ensure TXRX is ready to runtime suspend
3797  * @soc_hdl: Datapath soc handle
3798  * @pdev_id: id of data path pdev handle
3799  *
3800  * TXRX is ready to runtime suspend if there are no pending packets
3801  * in the tx queue.
3802  *
3803  * Return: QDF_STATUS
3804  */
ol_txrx_runtime_suspend(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)3805 static QDF_STATUS ol_txrx_runtime_suspend(struct cdp_soc_t *soc_hdl,
3806 					  uint8_t pdev_id)
3807 {
3808 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
3809 	struct cdp_pdev *txrx_pdev = (struct cdp_pdev *)
3810 				ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
3811 
3812 	if (ol_txrx_get_tx_pending(txrx_pdev))
3813 		return QDF_STATUS_E_BUSY;
3814 	else
3815 		return QDF_STATUS_SUCCESS;
3816 }
3817 
3818 /**
3819  * ol_txrx_runtime_resume() - ensure TXRX is ready to runtime resume
3820  * @soc_hdl: Datapath soc handle
3821  * @pdev_id: id of data path pdev handle
3822  *
3823  * This is a dummy function for symmetry.
3824  *
3825  * Return: QDF_STATUS_SUCCESS
3826  */
ol_txrx_runtime_resume(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)3827 static QDF_STATUS ol_txrx_runtime_resume(struct cdp_soc_t *soc_hdl,
3828 					 uint8_t pdev_id)
3829 {
3830 	return QDF_STATUS_SUCCESS;
3831 }
3832 #endif
3833 
3834 /**
3835  * ol_txrx_bus_suspend() - bus suspend
3836  * @soc_hdl: Datapath soc handle
3837  * @pdev_id: id of data path pdev handle
3838  *
3839  * Ensure that ol_txrx is ready for bus suspend
3840  *
3841  * Return: QDF_STATUS
3842  */
ol_txrx_bus_suspend(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)3843 static QDF_STATUS ol_txrx_bus_suspend(struct cdp_soc_t *soc_hdl,
3844 				      uint8_t pdev_id)
3845 {
3846 	return ol_txrx_wait_for_pending_tx(SUSPEND_DRAIN_WAIT);
3847 }
3848 
3849 /**
3850  * ol_txrx_bus_resume() - bus resume
3851  * @soc_hdl: Datapath soc handle
3852  * @pdev_id: id of data path pdev handle
3853  *
3854  * Dummy function for symmetry
3855  *
3856  * Return: QDF_STATUS_SUCCESS
3857  */
ol_txrx_bus_resume(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)3858 static QDF_STATUS ol_txrx_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
3859 {
3860 	return QDF_STATUS_SUCCESS;
3861 }
3862 
3863 /**
3864  * ol_txrx_get_tx_pending - Get the number of pending transmit
3865  * frames that are awaiting completion.
3866  *
3867  * @pdev - the data physical device object
3868  *  Mainly used in clean up path to make sure all buffers have been freed
3869  *
3870  * Return: count of pending frames
3871  */
ol_txrx_get_tx_pending(struct cdp_pdev * ppdev)3872 uint32_t ol_txrx_get_tx_pending(struct cdp_pdev *ppdev)
3873 {
3874 	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
3875 	uint32_t total;
3876 
3877 	if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3878 		total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3879 	else
3880 		total = ol_tx_get_desc_global_pool_size(pdev);
3881 
3882 	return total - ol_tx_get_total_free_desc(pdev);
3883 }
3884 
ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev_handle)3885 void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev_handle)
3886 {
3887 	ol_tx_desc_list tx_descs;
3888 	/*
3889 	 * First let hif do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
3890 	 * then let htt do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
3891 	 * which is the same with normal data send complete path
3892 	 */
3893 	htt_tx_pending_discard(pdev_handle->htt_pdev);
3894 
3895 	TAILQ_INIT(&tx_descs);
3896 	ol_tx_queue_discard(pdev_handle, true, &tx_descs);
3897 	/* Discard Frames in Discard List */
3898 	ol_tx_desc_frame_list_free(pdev_handle, &tx_descs, 1 /* error */);
3899 
3900 	ol_tx_discard_target_frms(pdev_handle);
3901 }
3902 
3903 static inline
ol_txrx_stats_ptr_to_u64(struct ol_txrx_stats_req_internal * req)3904 uint64_t ol_txrx_stats_ptr_to_u64(struct ol_txrx_stats_req_internal *req)
3905 {
3906 	return (uint64_t) ((size_t) req);
3907 }
3908 
3909 static inline
ol_txrx_u64_to_stats_ptr(uint64_t cookie)3910 struct ol_txrx_stats_req_internal *ol_txrx_u64_to_stats_ptr(uint64_t cookie)
3911 {
3912 	return (struct ol_txrx_stats_req_internal *)((size_t) cookie);
3913 }
3914 
3915 #ifdef currently_unused
3916 void
ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,uint8_t cfg_stats_type,uint32_t cfg_val)3917 ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
3918 		     uint8_t cfg_stats_type, uint32_t cfg_val)
3919 {
3920 	uint8_t dummy_cookie = 0;
3921 
3922 	htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
3923 			      0 /* reset mask */,
3924 			      cfg_stats_type, cfg_val, dummy_cookie);
3925 }
3926 #endif
3927 
3928 /**
3929  * ol_txrx_fw_stats_desc_pool_init() - Initialize the fw stats descriptor pool
3930  * @pdev: handle to ol txrx pdev
3931  * @pool_size: Size of fw stats descriptor pool
3932  *
3933  * Return: 0 for success, error code on failure.
3934  */
ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t * pdev,uint8_t pool_size)3935 int ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t *pdev,
3936 				    uint8_t pool_size)
3937 {
3938 	int i;
3939 
3940 	if (!pdev) {
3941 		ol_txrx_err("pdev is NULL");
3942 		return -EINVAL;
3943 	}
3944 	pdev->ol_txrx_fw_stats_desc_pool.pool = qdf_mem_malloc(pool_size *
3945 		sizeof(struct ol_txrx_fw_stats_desc_elem_t));
3946 	if (!pdev->ol_txrx_fw_stats_desc_pool.pool)
3947 		return -ENOMEM;
3948 
3949 	pdev->ol_txrx_fw_stats_desc_pool.freelist =
3950 		&pdev->ol_txrx_fw_stats_desc_pool.pool[0];
3951 	pdev->ol_txrx_fw_stats_desc_pool.pool_size = pool_size;
3952 
3953 	for (i = 0; i < (pool_size - 1); i++) {
3954 		pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
3955 		pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
3956 		pdev->ol_txrx_fw_stats_desc_pool.pool[i].next =
3957 			&pdev->ol_txrx_fw_stats_desc_pool.pool[i + 1];
3958 	}
3959 	pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
3960 	pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
3961 	pdev->ol_txrx_fw_stats_desc_pool.pool[i].next = NULL;
3962 	qdf_spinlock_create(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3963 	qdf_atomic_init(&pdev->ol_txrx_fw_stats_desc_pool.initialized);
3964 	qdf_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 1);
3965 	return 0;
3966 }
3967 
3968 /**
3969  * ol_txrx_fw_stats_desc_pool_deinit() - Deinitialize the
3970  * fw stats descriptor pool
3971  * @pdev: handle to ol txrx pdev
3972  *
3973  * Return: None
3974  */
ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t * pdev)3975 void ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t *pdev)
3976 {
3977 	if (!pdev) {
3978 		ol_txrx_err("pdev is NULL");
3979 		return;
3980 	}
3981 	if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
3982 		ol_txrx_err("Pool is not initialized");
3983 		return;
3984 	}
3985 	if (!pdev->ol_txrx_fw_stats_desc_pool.pool) {
3986 		ol_txrx_err("Pool is not allocated");
3987 		return;
3988 	}
3989 	qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3990 	qdf_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 0);
3991 	qdf_mem_free(pdev->ol_txrx_fw_stats_desc_pool.pool);
3992 	pdev->ol_txrx_fw_stats_desc_pool.pool = NULL;
3993 
3994 	pdev->ol_txrx_fw_stats_desc_pool.freelist = NULL;
3995 	pdev->ol_txrx_fw_stats_desc_pool.pool_size = 0;
3996 	qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3997 }
3998 
3999 /**
4000  * ol_txrx_fw_stats_desc_alloc() - Get fw stats descriptor from fw stats
4001  * free descriptor pool
4002  * @pdev: handle to ol txrx pdev
4003  *
4004  * Return: pointer to fw stats descriptor, NULL on failure
4005  */
4006 struct ol_txrx_fw_stats_desc_t
ol_txrx_fw_stats_desc_alloc(struct ol_txrx_pdev_t * pdev)4007 	*ol_txrx_fw_stats_desc_alloc(struct ol_txrx_pdev_t *pdev)
4008 {
4009 	struct ol_txrx_fw_stats_desc_t *desc = NULL;
4010 
4011 	qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
4012 	if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
4013 		qdf_spin_unlock_bh(&pdev->
4014 				   ol_txrx_fw_stats_desc_pool.pool_lock);
4015 		ol_txrx_err("Pool deinitialized");
4016 		return NULL;
4017 	}
4018 	if (pdev->ol_txrx_fw_stats_desc_pool.freelist) {
4019 		desc = &pdev->ol_txrx_fw_stats_desc_pool.freelist->desc;
4020 		pdev->ol_txrx_fw_stats_desc_pool.freelist =
4021 			pdev->ol_txrx_fw_stats_desc_pool.freelist->next;
4022 	}
4023 	qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
4024 
4025 	if (desc)
4026 		ol_txrx_dbg("desc_id %d allocated", desc->desc_id);
4027 	else
4028 		ol_txrx_err("fw stats descriptors are exhausted");
4029 
4030 	return desc;
4031 }
4032 
4033 /**
4034  * ol_txrx_fw_stats_desc_get_req() - Put fw stats descriptor
4035  * back into free pool
4036  * @pdev: handle to ol txrx pdev
4037  * @fw_stats_desc: fw_stats_desc_get descriptor
4038  *
4039  * Return: pointer to request
4040  */
4041 struct ol_txrx_stats_req_internal
ol_txrx_fw_stats_desc_get_req(struct ol_txrx_pdev_t * pdev,unsigned char desc_id)4042 	*ol_txrx_fw_stats_desc_get_req(struct ol_txrx_pdev_t *pdev,
4043 				       unsigned char desc_id)
4044 {
4045 	struct ol_txrx_fw_stats_desc_elem_t *desc_elem;
4046 	struct ol_txrx_stats_req_internal *req;
4047 
4048 	qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
4049 	if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
4050 		qdf_spin_unlock_bh(&pdev->
4051 				   ol_txrx_fw_stats_desc_pool.pool_lock);
4052 		ol_txrx_err("Desc ID %u Pool deinitialized", desc_id);
4053 		return NULL;
4054 	}
4055 	desc_elem = &pdev->ol_txrx_fw_stats_desc_pool.pool[desc_id];
4056 	req = desc_elem->desc.req;
4057 	desc_elem->desc.req = NULL;
4058 	desc_elem->next =
4059 		pdev->ol_txrx_fw_stats_desc_pool.freelist;
4060 	pdev->ol_txrx_fw_stats_desc_pool.freelist = desc_elem;
4061 	qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
4062 	return req;
4063 }
4064 
4065 /**
4066  * ol_txrx_fw_stats_get() - Get fw stats
4067  *
4068  * @soc_hdl: datapath soc handle
4069  * @vdev_id: virtual interface id
4070  * @req: specifications of stats request
4071  * @per_vdev: bool input whether stats requested per vdev or not
4072  * @response_expected: bool input whether expecting response or not
4073  *
4074  * Return: success or failure
4075  */
4076 static A_STATUS
ol_txrx_fw_stats_get(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,struct ol_txrx_stats_req * req,bool per_vdev,bool response_expected)4077 ol_txrx_fw_stats_get(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4078 		     struct ol_txrx_stats_req *req, bool per_vdev,
4079 		     bool response_expected)
4080 {
4081 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
4082 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
4083 								     vdev_id);
4084 	struct ol_txrx_pdev_t *pdev;
4085 	uint8_t cookie = FW_STATS_DESC_POOL_SIZE;
4086 	struct ol_txrx_stats_req_internal *non_volatile_req;
4087 	struct ol_txrx_fw_stats_desc_t *desc = NULL;
4088 	struct ol_txrx_fw_stats_desc_elem_t *elem = NULL;
4089 
4090 	if (!vdev)
4091 		return A_EINVAL;
4092 
4093 	pdev = vdev->pdev;
4094 	if (!pdev ||
4095 	    req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
4096 	    req->stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
4097 		return A_EINVAL;
4098 	}
4099 
4100 	/*
4101 	 * Allocate a non-transient stats request object.
4102 	 * (The one provided as an argument is likely allocated on the stack.)
4103 	 */
4104 	non_volatile_req = qdf_mem_malloc(sizeof(*non_volatile_req));
4105 	if (!non_volatile_req)
4106 		return A_NO_MEMORY;
4107 
4108 	/* copy the caller's specifications */
4109 	non_volatile_req->base = *req;
4110 	non_volatile_req->serviced = 0;
4111 	non_volatile_req->offset = 0;
4112 	if (response_expected) {
4113 		desc = ol_txrx_fw_stats_desc_alloc(pdev);
4114 		if (!desc) {
4115 			qdf_mem_free(non_volatile_req);
4116 			return A_NO_MEMORY;
4117 		}
4118 
4119 		/* use the desc id as the cookie */
4120 		cookie = desc->desc_id;
4121 		desc->req = non_volatile_req;
4122 		qdf_spin_lock_bh(&pdev->req_list_spinlock);
4123 		TAILQ_INSERT_TAIL(&pdev->req_list, non_volatile_req, req_list_elem);
4124 		pdev->req_list_depth++;
4125 		qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4126 	}
4127 
4128 	if (htt_h2t_dbg_stats_get(pdev->htt_pdev,
4129 				  req->stats_type_upload_mask,
4130 				  req->stats_type_reset_mask,
4131 				  HTT_H2T_STATS_REQ_CFG_STAT_TYPE_INVALID, 0,
4132 				  cookie)) {
4133 		if (response_expected) {
4134 			qdf_spin_lock_bh(&pdev->req_list_spinlock);
4135 			TAILQ_REMOVE(&pdev->req_list, non_volatile_req,
4136 				     req_list_elem);
4137 			pdev->req_list_depth--;
4138 			qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4139 			if (desc) {
4140 				qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.
4141 						 pool_lock);
4142 				desc->req = NULL;
4143 				elem = container_of(desc,
4144 						    struct ol_txrx_fw_stats_desc_elem_t,
4145 						    desc);
4146 				elem->next =
4147 					pdev->ol_txrx_fw_stats_desc_pool.freelist;
4148 				pdev->ol_txrx_fw_stats_desc_pool.freelist = elem;
4149 				qdf_spin_unlock_bh(&pdev->
4150 						   ol_txrx_fw_stats_desc_pool.
4151 						   pool_lock);
4152 			}
4153 		}
4154 
4155 		qdf_mem_free(non_volatile_req);
4156 		return A_ERROR;
4157 	}
4158 
4159 	if (response_expected == false)
4160 		qdf_mem_free(non_volatile_req);
4161 
4162 	return A_OK;
4163 }
4164 
4165 void
ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,uint8_t cookie,uint8_t * stats_info_list)4166 ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
4167 			 uint8_t cookie, uint8_t *stats_info_list)
4168 {
4169 	enum htt_dbg_stats_type type;
4170 	enum htt_cmn_dbg_stats_type cmn_type = HTT_DBG_CMN_NUM_STATS_INVALID;
4171 	enum htt_dbg_stats_status status;
4172 	int length;
4173 	uint8_t *stats_data;
4174 	struct ol_txrx_stats_req_internal *req, *tmp;
4175 	int more = 0;
4176 	int found = 0;
4177 
4178 	if (cookie >= FW_STATS_DESC_POOL_SIZE) {
4179 		ol_txrx_err("Cookie is not valid");
4180 		return;
4181 	}
4182 	req = ol_txrx_fw_stats_desc_get_req(pdev, (uint8_t)cookie);
4183 	if (!req) {
4184 		ol_txrx_err("Request not retrieved for cookie %u",
4185 			    (uint8_t)cookie);
4186 		return;
4187 	}
4188 	qdf_spin_lock_bh(&pdev->req_list_spinlock);
4189 	TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
4190 		if (req == tmp) {
4191 			found = 1;
4192 			break;
4193 		}
4194 	}
4195 	qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4196 
4197 	if (!found) {
4198 		ol_txrx_err(
4199 			"req(%pK) from firmware can't be found in the list", req);
4200 		return;
4201 	}
4202 
4203 	do {
4204 		htt_t2h_dbg_stats_hdr_parse(stats_info_list, &type, &status,
4205 					    &length, &stats_data);
4206 		if (status == HTT_DBG_STATS_STATUS_SERIES_DONE)
4207 			break;
4208 		if (status == HTT_DBG_STATS_STATUS_PRESENT ||
4209 		    status == HTT_DBG_STATS_STATUS_PARTIAL) {
4210 			uint8_t *buf;
4211 			int bytes = 0;
4212 
4213 			if (status == HTT_DBG_STATS_STATUS_PARTIAL)
4214 				more = 1;
4215 			if (req->base.print.verbose || req->base.print.concise)
4216 				/* provide the header along with the data */
4217 				htt_t2h_stats_print(stats_info_list,
4218 						    req->base.print.concise);
4219 
4220 			switch (type) {
4221 			case HTT_DBG_STATS_WAL_PDEV_TXRX:
4222 				bytes = sizeof(struct wlan_dbg_stats);
4223 				if (req->base.copy.buf) {
4224 					int lmt;
4225 
4226 					lmt = sizeof(struct wlan_dbg_stats);
4227 					if (req->base.copy.byte_limit < lmt)
4228 						lmt = req->base.copy.byte_limit;
4229 					buf = req->base.copy.buf + req->offset;
4230 					qdf_mem_copy(buf, stats_data, lmt);
4231 				}
4232 				break;
4233 			case HTT_DBG_STATS_RX_REORDER:
4234 				bytes = sizeof(struct rx_reorder_stats);
4235 				if (req->base.copy.buf) {
4236 					int lmt;
4237 
4238 					lmt = sizeof(struct rx_reorder_stats);
4239 					if (req->base.copy.byte_limit < lmt)
4240 						lmt = req->base.copy.byte_limit;
4241 					buf = req->base.copy.buf + req->offset;
4242 					qdf_mem_copy(buf, stats_data, lmt);
4243 				}
4244 				break;
4245 			case HTT_DBG_STATS_RX_RATE_INFO:
4246 				bytes = sizeof(wlan_dbg_rx_rate_info_t);
4247 				if (req->base.copy.buf) {
4248 					int lmt;
4249 
4250 					lmt = sizeof(wlan_dbg_rx_rate_info_t);
4251 					if (req->base.copy.byte_limit < lmt)
4252 						lmt = req->base.copy.byte_limit;
4253 					buf = req->base.copy.buf + req->offset;
4254 					qdf_mem_copy(buf, stats_data, lmt);
4255 				}
4256 				break;
4257 
4258 			case HTT_DBG_STATS_TX_RATE_INFO:
4259 				bytes = sizeof(wlan_dbg_tx_rate_info_t);
4260 				if (req->base.copy.buf) {
4261 					int lmt;
4262 
4263 					lmt = sizeof(wlan_dbg_tx_rate_info_t);
4264 					if (req->base.copy.byte_limit < lmt)
4265 						lmt = req->base.copy.byte_limit;
4266 					buf = req->base.copy.buf + req->offset;
4267 					qdf_mem_copy(buf, stats_data, lmt);
4268 				}
4269 				break;
4270 
4271 			case HTT_DBG_STATS_TX_PPDU_LOG:
4272 				bytes = 0;
4273 				/* TO DO: specify how many bytes are present */
4274 				/* TO DO: add copying to the requestor's buf */
4275 				fallthrough;
4276 			case HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO:
4277 				bytes = sizeof(struct
4278 						rx_remote_buffer_mgmt_stats);
4279 				if (req->base.copy.buf) {
4280 					int limit;
4281 
4282 					limit = sizeof(struct
4283 						rx_remote_buffer_mgmt_stats);
4284 					if (req->base.copy.byte_limit < limit)
4285 						limit = req->base.copy.
4286 							byte_limit;
4287 					buf = req->base.copy.buf + req->offset;
4288 					qdf_mem_copy(buf, stats_data, limit);
4289 				}
4290 				break;
4291 
4292 			case HTT_DBG_STATS_TXBF_INFO:
4293 				bytes = sizeof(struct wlan_dbg_txbf_data_stats);
4294 				if (req->base.copy.buf) {
4295 					int limit;
4296 
4297 					limit = sizeof(struct
4298 						wlan_dbg_txbf_data_stats);
4299 					if (req->base.copy.byte_limit < limit)
4300 						limit = req->base.copy.
4301 							byte_limit;
4302 					buf = req->base.copy.buf + req->offset;
4303 					qdf_mem_copy(buf, stats_data, limit);
4304 				}
4305 				break;
4306 
4307 			case HTT_DBG_STATS_SND_INFO:
4308 				bytes = sizeof(struct wlan_dbg_txbf_snd_stats);
4309 				if (req->base.copy.buf) {
4310 					int limit;
4311 
4312 					limit = sizeof(struct
4313 						wlan_dbg_txbf_snd_stats);
4314 					if (req->base.copy.byte_limit < limit)
4315 						limit = req->base.copy.
4316 							byte_limit;
4317 					buf = req->base.copy.buf + req->offset;
4318 					qdf_mem_copy(buf, stats_data, limit);
4319 				}
4320 				break;
4321 
4322 			case HTT_DBG_STATS_TX_SELFGEN_INFO:
4323 				bytes = sizeof(struct
4324 					wlan_dbg_tx_selfgen_stats);
4325 				if (req->base.copy.buf) {
4326 					int limit;
4327 
4328 					limit = sizeof(struct
4329 						wlan_dbg_tx_selfgen_stats);
4330 					if (req->base.copy.byte_limit < limit)
4331 						limit = req->base.copy.
4332 							byte_limit;
4333 					buf = req->base.copy.buf + req->offset;
4334 					qdf_mem_copy(buf, stats_data, limit);
4335 				}
4336 				break;
4337 
4338 			case HTT_DBG_STATS_ERROR_INFO:
4339 				bytes =
4340 				  sizeof(struct wlan_dbg_wifi2_error_stats);
4341 				if (req->base.copy.buf) {
4342 					int limit;
4343 
4344 					limit = sizeof(struct
4345 						wlan_dbg_wifi2_error_stats);
4346 					if (req->base.copy.byte_limit < limit)
4347 						limit = req->base.copy.
4348 							byte_limit;
4349 					buf = req->base.copy.buf + req->offset;
4350 					qdf_mem_copy(buf, stats_data, limit);
4351 				}
4352 				break;
4353 
4354 			case HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT:
4355 				bytes =
4356 				  sizeof(struct rx_txbf_musu_ndpa_pkts_stats);
4357 				if (req->base.copy.buf) {
4358 					int limit;
4359 
4360 					limit = sizeof(struct
4361 						rx_txbf_musu_ndpa_pkts_stats);
4362 					if (req->base.copy.byte_limit <	limit)
4363 						limit =
4364 						req->base.copy.byte_limit;
4365 					buf = req->base.copy.buf + req->offset;
4366 					qdf_mem_copy(buf, stats_data, limit);
4367 				}
4368 				break;
4369 
4370 			default:
4371 				break;
4372 			}
4373 			buf = req->base.copy.buf ?
4374 				req->base.copy.buf : stats_data;
4375 
4376 			/* Not implemented for MCL */
4377 			if (req->base.callback.fp)
4378 				req->base.callback.fp(req->base.callback.ctxt,
4379 						      cmn_type, buf, bytes);
4380 		}
4381 		stats_info_list += length;
4382 	} while (1);
4383 
4384 	if (!more) {
4385 		qdf_spin_lock_bh(&pdev->req_list_spinlock);
4386 		TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
4387 			if (req == tmp) {
4388 				TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
4389 				pdev->req_list_depth--;
4390 				qdf_mem_free(req);
4391 				break;
4392 			}
4393 		}
4394 		qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4395 	}
4396 }
4397 
4398 #ifndef ATH_PERF_PWR_OFFLOAD /*---------------------------------------------*/
ol_txrx_debug(ol_txrx_vdev_handle vdev,int debug_specs)4399 int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs)
4400 {
4401 	if (debug_specs & TXRX_DBG_MASK_OBJS) {
4402 #if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
4403 		ol_txrx_pdev_display(vdev->pdev, 0);
4404 #else
4405 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
4406 			  "The pdev,vdev,peer display functions are disabled.\n To enable them, recompile with TXRX_DEBUG_LEVEL > 5");
4407 #endif
4408 	}
4409 	if (debug_specs & TXRX_DBG_MASK_STATS)
4410 		ol_txrx_stats_display(vdev->pdev,
4411 				      QDF_STATS_VERBOSITY_LEVEL_HIGH);
4412 	if (debug_specs & TXRX_DBG_MASK_PROT_ANALYZE) {
4413 #if defined(ENABLE_TXRX_PROT_ANALYZE)
4414 		ol_txrx_prot_ans_display(vdev->pdev);
4415 #else
4416 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
4417 			  "txrx protocol analysis is disabled.\n To enable it, recompile with ENABLE_TXRX_PROT_ANALYZE defined");
4418 #endif
4419 	}
4420 	if (debug_specs & TXRX_DBG_MASK_RX_REORDER_TRACE) {
4421 #if defined(ENABLE_RX_REORDER_TRACE)
4422 		ol_rx_reorder_trace_display(vdev->pdev, 0, 0);
4423 #else
4424 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
4425 			  "rx reorder seq num trace is disabled.\n To enable it, recompile with ENABLE_RX_REORDER_TRACE defined");
4426 #endif
4427 
4428 	}
4429 	return 0;
4430 }
4431 #endif
4432 
4433 #ifdef currently_unused
ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,int max_subfrms_ampdu,int max_subfrms_amsdu)4434 int ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,
4435 		     int max_subfrms_ampdu, int max_subfrms_amsdu)
4436 {
4437 	return htt_h2t_aggr_cfg_msg(vdev->pdev->htt_pdev,
4438 				    max_subfrms_ampdu, max_subfrms_amsdu);
4439 }
4440 #endif
4441 
4442 #if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
ol_txrx_pdev_display(ol_txrx_pdev_handle pdev,int indent)4443 void ol_txrx_pdev_display(ol_txrx_pdev_handle pdev, int indent)
4444 {
4445 	struct ol_txrx_vdev_t *vdev;
4446 
4447 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4448 		  "%*s%s:\n", indent, " ", "txrx pdev");
4449 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4450 		  "%*spdev object: %pK", indent + 4, " ", pdev);
4451 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4452 		  "%*svdev list:", indent + 4, " ");
4453 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4454 		      ol_txrx_vdev_display(vdev, indent + 8);
4455 	}
4456 	ol_txrx_peer_find_display(pdev, indent + 4);
4457 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4458 		  "%*stx desc pool: %d elems @ %pK", indent + 4, " ",
4459 		  pdev->tx_desc.pool_size, pdev->tx_desc.array);
4460 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, " ");
4461 	htt_display(pdev->htt_pdev, indent);
4462 }
4463 
ol_txrx_vdev_display(ol_txrx_vdev_handle vdev,int indent)4464 void ol_txrx_vdev_display(ol_txrx_vdev_handle vdev, int indent)
4465 {
4466 	struct ol_txrx_peer_t *peer;
4467 
4468 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4469 		  "%*stxrx vdev: %pK\n", indent, " ", vdev);
4470 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4471 		  "%*sID: %d\n", indent + 4, " ", vdev->vdev_id);
4472 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4473 		  "%*sMAC addr: %d:%d:%d:%d:%d:%d",
4474 		  indent + 4, " ",
4475 		  vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
4476 		  vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
4477 		  vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
4478 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4479 		  "%*speer list:", indent + 4, " ");
4480 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4481 		      ol_txrx_peer_display(peer, indent + 8);
4482 	}
4483 }
4484 
ol_txrx_peer_display(ol_txrx_peer_handle peer,int indent)4485 void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent)
4486 {
4487 	int i;
4488 
4489 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4490 		  "%*stxrx peer: %pK", indent, " ", peer);
4491 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
4492 		if (peer->peer_ids[i] != HTT_INVALID_PEER) {
4493 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4494 				  "%*sID: %d", indent + 4, " ",
4495 				  peer->peer_ids[i]);
4496 		}
4497 	}
4498 }
4499 #endif /* TXRX_DEBUG_LEVEL */
4500 
4501 /**
4502  * ol_txrx_stats() - update ol layer stats
4503  * @vdev_id: vdev_id
4504  * @buffer: pointer to buffer
4505  * @buf_len: length of the buffer
4506  *
4507  * Return: length of string
4508  */
4509 static int
ol_txrx_stats(uint8_t vdev_id,char * buffer,unsigned int buf_len)4510 ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned int buf_len)
4511 {
4512 	uint32_t len = 0;
4513 
4514 	struct ol_txrx_vdev_t *vdev =
4515 			(struct ol_txrx_vdev_t *)
4516 			ol_txrx_get_vdev_from_vdev_id(vdev_id);
4517 
4518 	if (!vdev) {
4519 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4520 			  "%s: vdev is NULL", __func__);
4521 		snprintf(buffer, buf_len, "vdev not found");
4522 		return len;
4523 	}
4524 
4525 	len = scnprintf(buffer, buf_len,
4526 			"\n\nTXRX stats:\nllQueue State : %s\npause %u unpause %u\noverflow %u\nllQueue timer state : %s",
4527 			((vdev->ll_pause.is_q_paused == false) ?
4528 			 "UNPAUSED" : "PAUSED"),
4529 			vdev->ll_pause.q_pause_cnt,
4530 			vdev->ll_pause.q_unpause_cnt,
4531 			vdev->ll_pause.q_overflow_cnt,
4532 			((vdev->ll_pause.is_q_timer_on == false)
4533 			 ? "NOT-RUNNING" : "RUNNING"));
4534 	return len;
4535 }
4536 
4537 #ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
4538 /**
4539  * ol_txrx_disp_peer_cached_bufq_stats() - display peer cached_bufq stats
4540  * @peer: peer pointer
4541  *
4542  * Return: None
4543  */
ol_txrx_disp_peer_cached_bufq_stats(struct ol_txrx_peer_t * peer)4544 static void ol_txrx_disp_peer_cached_bufq_stats(struct ol_txrx_peer_t *peer)
4545 {
4546 	txrx_nofl_info("cached_bufq: curr %d drops %d hwm %d whatifs %d thresh %d",
4547 		       peer->bufq_info.curr,
4548 		       peer->bufq_info.dropped,
4549 		       peer->bufq_info.high_water_mark,
4550 		       peer->bufq_info.qdepth_no_thresh,
4551 		       peer->bufq_info.thresh);
4552 }
4553 
4554 /**
4555  * ol_txrx_disp_peer_stats() - display peer stats
4556  * @pdev: pdev pointer
4557  *
4558  * Return: None
4559  */
ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)4560 static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4561 {	int i;
4562 	struct ol_txrx_peer_t *peer;
4563 	struct hif_opaque_softc *osc =  cds_get_context(QDF_MODULE_ID_HIF);
4564 
4565 	if (osc && hif_is_load_or_unload_in_progress(HIF_GET_SOFTC(osc)))
4566 		return;
4567 
4568 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
4569 		qdf_spin_lock_bh(&pdev->peer_ref_mutex);
4570 		qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
4571 		peer = pdev->local_peer_ids.map[i];
4572 		if (peer) {
4573 			ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
4574 		}
4575 		qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
4576 		qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
4577 
4578 		if (peer) {
4579 			txrx_nofl_info("stats: peer 0x%pK local peer id %d",
4580 				       peer, i);
4581 			ol_txrx_disp_peer_cached_bufq_stats(peer);
4582 			ol_txrx_peer_release_ref(peer,
4583 						 PEER_DEBUG_ID_OL_INTERNAL);
4584 		}
4585 	}
4586 }
4587 #else
ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)4588 static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4589 {
4590 	txrx_nofl_info("peer stats not supported w/o QCA_SUPPORT_TXRX_LOCAL_PEER_ID");
4591 }
4592 #endif
4593 
ol_txrx_stats_display(ol_txrx_pdev_handle pdev,enum qdf_stats_verbosity_level level)4594 void ol_txrx_stats_display(ol_txrx_pdev_handle pdev,
4595 			   enum qdf_stats_verbosity_level level)
4596 {
4597 	u64 tx_dropped =
4598 		pdev->stats.pub.tx.dropped.download_fail.pkts
4599 		  + pdev->stats.pub.tx.dropped.target_discard.pkts
4600 		  + pdev->stats.pub.tx.dropped.no_ack.pkts
4601 		  + pdev->stats.pub.tx.dropped.target_drop.pkts
4602 		  + pdev->stats.pub.tx.dropped.others.pkts;
4603 
4604 	if (level == QDF_STATS_VERBOSITY_LEVEL_LOW) {
4605 		txrx_nofl_dbg("STATS |%u %u|TX: %lld tso %lld ok %lld drops(%u-%lld %u-%lld %u-%lld %u-%lld ?-%lld hR-%lld)|RX: %lld drops(E %lld PI %lld ME %lld) fwd(S %d F %d SF %d)|",
4606 			      pdev->tx_desc.num_free,
4607 			      pdev->tx_desc.pool_size,
4608 			      pdev->stats.pub.tx.from_stack.pkts,
4609 			      pdev->stats.pub.tx.tso.tso_pkts.pkts,
4610 			      pdev->stats.pub.tx.delivered.pkts,
4611 			      htt_tx_status_download_fail,
4612 			      pdev->stats.pub.tx.dropped.download_fail.pkts,
4613 			      htt_tx_status_discard,
4614 			      pdev->stats.pub.tx.dropped.
4615 					target_discard.pkts,
4616 			      htt_tx_status_no_ack,
4617 			      pdev->stats.pub.tx.dropped.no_ack.pkts,
4618 			      htt_tx_status_drop,
4619 			      pdev->stats.pub.tx.dropped.target_drop.pkts,
4620 			      pdev->stats.pub.tx.dropped.others.pkts,
4621 			      pdev->stats.pub.tx.dropped.host_reject.pkts,
4622 			      pdev->stats.pub.rx.delivered.pkts,
4623 			      pdev->stats.pub.rx.dropped_err.pkts,
4624 			      pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4625 			      pdev->stats.pub.rx.dropped_mic_err.pkts,
4626 			      pdev->stats.pub.rx.intra_bss_fwd.
4627 					packets_stack,
4628 			      pdev->stats.pub.rx.intra_bss_fwd.
4629 					packets_fwd,
4630 			      pdev->stats.pub.rx.intra_bss_fwd.
4631 					packets_stack_n_fwd);
4632 		return;
4633 	}
4634 
4635 	txrx_nofl_info("TX PATH Statistics:");
4636 	txrx_nofl_info("sent %lld msdus (%lld B), host rejected %lld (%lld B), dropped %lld (%lld B)",
4637 		       pdev->stats.pub.tx.from_stack.pkts,
4638 		       pdev->stats.pub.tx.from_stack.bytes,
4639 		       pdev->stats.pub.tx.dropped.host_reject.pkts,
4640 		       pdev->stats.pub.tx.dropped.host_reject.bytes,
4641 			  tx_dropped,
4642 		       pdev->stats.pub.tx.dropped.download_fail.bytes
4643 			  + pdev->stats.pub.tx.dropped.target_discard.bytes
4644 			  + pdev->stats.pub.tx.dropped.target_drop.bytes
4645 			  + pdev->stats.pub.tx.dropped.no_ack.bytes);
4646 	txrx_nofl_info("successfully delivered: %lld (%lld B), download fail: %lld (%lld B), target discard: %lld (%lld B), no ack: %lld (%lld B),target drop: %lld (%lld B), others: %lld (%lld B)",
4647 		       pdev->stats.pub.tx.delivered.pkts,
4648 		       pdev->stats.pub.tx.delivered.bytes,
4649 		       pdev->stats.pub.tx.dropped.download_fail.pkts,
4650 		       pdev->stats.pub.tx.dropped.download_fail.bytes,
4651 		       pdev->stats.pub.tx.dropped.target_discard.pkts,
4652 		       pdev->stats.pub.tx.dropped.target_discard.bytes,
4653 		       pdev->stats.pub.tx.dropped.no_ack.pkts,
4654 		       pdev->stats.pub.tx.dropped.no_ack.bytes,
4655 		       pdev->stats.pub.tx.dropped.target_drop.pkts,
4656 		       pdev->stats.pub.tx.dropped.target_drop.bytes,
4657 		       pdev->stats.pub.tx.dropped.others.pkts,
4658 		       pdev->stats.pub.tx.dropped.others.bytes);
4659 	txrx_nofl_info("Tx completions per HTT message:\n"
4660 		       "Single Packet  %d\n"
4661 		       " 2-10 Packets  %d\n"
4662 		       "11-20 Packets  %d\n"
4663 		       "21-30 Packets  %d\n"
4664 		       "31-40 Packets  %d\n"
4665 		       "41-50 Packets  %d\n"
4666 		       "51-60 Packets  %d\n"
4667 		       "  60+ Packets  %d\n",
4668 		       pdev->stats.pub.tx.comp_histogram.pkts_1,
4669 		       pdev->stats.pub.tx.comp_histogram.pkts_2_10,
4670 		       pdev->stats.pub.tx.comp_histogram.pkts_11_20,
4671 		       pdev->stats.pub.tx.comp_histogram.pkts_21_30,
4672 		       pdev->stats.pub.tx.comp_histogram.pkts_31_40,
4673 		       pdev->stats.pub.tx.comp_histogram.pkts_41_50,
4674 		       pdev->stats.pub.tx.comp_histogram.pkts_51_60,
4675 		       pdev->stats.pub.tx.comp_histogram.pkts_61_plus);
4676 
4677 	txrx_nofl_info("RX PATH Statistics:");
4678 	txrx_nofl_info("%lld ppdus, %lld mpdus, %lld msdus, %lld bytes\n"
4679 		       "dropped: err %lld (%lld B), peer_invalid %lld (%lld B), mic_err %lld (%lld B)\n"
4680 		       "msdus with frag_ind: %d msdus with offload_ind: %d",
4681 		       pdev->stats.priv.rx.normal.ppdus,
4682 		       pdev->stats.priv.rx.normal.mpdus,
4683 		       pdev->stats.pub.rx.delivered.pkts,
4684 		       pdev->stats.pub.rx.delivered.bytes,
4685 		       pdev->stats.pub.rx.dropped_err.pkts,
4686 		       pdev->stats.pub.rx.dropped_err.bytes,
4687 		       pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4688 		       pdev->stats.pub.rx.dropped_peer_invalid.bytes,
4689 		       pdev->stats.pub.rx.dropped_mic_err.pkts,
4690 		       pdev->stats.pub.rx.dropped_mic_err.bytes,
4691 		       pdev->stats.pub.rx.msdus_with_frag_ind,
4692 		       pdev->stats.pub.rx.msdus_with_offload_ind);
4693 
4694 	txrx_nofl_info("  fwd to stack %d, fwd to fw %d, fwd to stack & fw  %d\n",
4695 		       pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
4696 		       pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
4697 		       pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
4698 
4699 	txrx_nofl_info("packets per HTT message:\n"
4700 		       "Single Packet  %d\n"
4701 		       " 2-10 Packets  %d\n"
4702 		       "11-20 Packets  %d\n"
4703 		       "21-30 Packets  %d\n"
4704 		       "31-40 Packets  %d\n"
4705 		       "41-50 Packets  %d\n"
4706 		       "51-60 Packets  %d\n"
4707 		       "  60+ Packets  %d\n",
4708 		       pdev->stats.pub.rx.rx_ind_histogram.pkts_1,
4709 		       pdev->stats.pub.rx.rx_ind_histogram.pkts_2_10,
4710 		       pdev->stats.pub.rx.rx_ind_histogram.pkts_11_20,
4711 		       pdev->stats.pub.rx.rx_ind_histogram.pkts_21_30,
4712 		       pdev->stats.pub.rx.rx_ind_histogram.pkts_31_40,
4713 		       pdev->stats.pub.rx.rx_ind_histogram.pkts_41_50,
4714 		       pdev->stats.pub.rx.rx_ind_histogram.pkts_51_60,
4715 		       pdev->stats.pub.rx.rx_ind_histogram.pkts_61_plus);
4716 
4717 	ol_txrx_disp_peer_stats(pdev);
4718 }
4719 
ol_txrx_stats_clear(ol_txrx_pdev_handle pdev)4720 void ol_txrx_stats_clear(ol_txrx_pdev_handle pdev)
4721 {
4722 	qdf_mem_zero(&pdev->stats, sizeof(pdev->stats));
4723 }
4724 
4725 #if defined(ENABLE_TXRX_PROT_ANALYZE)
4726 
ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev)4727 void ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev)
4728 {
4729 	ol_txrx_prot_an_display(pdev->prot_an_tx_sent);
4730 	ol_txrx_prot_an_display(pdev->prot_an_rx_sent);
4731 }
4732 
4733 #endif /* ENABLE_TXRX_PROT_ANALYZE */
4734 
4735 #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
ol_txrx_peer_rssi(ol_txrx_peer_handle peer)4736 int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer)
4737 {
4738 	return (peer->rssi_dbm == HTT_RSSI_INVALID) ?
4739 	       OL_TXRX_RSSI_INVALID : peer->rssi_dbm;
4740 }
4741 #endif /* #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI */
4742 
4743 #ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
4744 A_STATUS
ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,ol_txrx_peer_handle peer,ol_txrx_peer_stats_t * stats)4745 ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
4746 			ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats)
4747 {
4748 	qdf_assert(pdev && peer && stats);
4749 	qdf_spin_lock_bh(&pdev->peer_stat_mutex);
4750 	qdf_mem_copy(stats, &peer->stats, sizeof(*stats));
4751 	qdf_spin_unlock_bh(&pdev->peer_stat_mutex);
4752 	return A_OK;
4753 }
4754 #endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
4755 
4756 /**
4757  * ol_vdev_rx_set_intrabss_fwd() - Get fw stats
4758  *
4759  * @soc_hdl: datapath soc handle
4760  * @vdev_id: virtual interface id
4761  * @val: enable or disable
4762  *
4763  * Return: void
4764  */
ol_vdev_rx_set_intrabss_fwd(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,bool val)4765 static void ol_vdev_rx_set_intrabss_fwd(struct cdp_soc_t *soc_hdl,
4766 					uint8_t vdev_id, bool val)
4767 {
4768 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
4769 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
4770 								     vdev_id);
4771 
4772 	if (!vdev)
4773 		return;
4774 
4775 	vdev->disable_intrabss_fwd = val;
4776 }
4777 
4778 /**
4779  * ol_txrx_update_mac_id() - update mac_id for vdev
4780  * @soc_hdl: Datapath soc handle
4781  * @vdev_id: vdev id
4782  * @mac_id: mac id
4783  *
4784  * Return: none
4785  */
ol_txrx_update_mac_id(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t mac_id)4786 static void ol_txrx_update_mac_id(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4787 				  uint8_t mac_id)
4788 {
4789 	struct ol_txrx_vdev_t *vdev =
4790 			(struct ol_txrx_vdev_t *)
4791 			ol_txrx_get_vdev_from_vdev_id(vdev_id);
4792 
4793 	if (!vdev) {
4794 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4795 			  "%s: Invalid vdev_id %d", __func__, vdev_id);
4796 		return;
4797 	}
4798 	vdev->mac_id = mac_id;
4799 }
4800 
4801 /**
4802  * ol_txrx_get_tx_ack_count() - get tx ack count
4803  * @soc_hdl: Datapath soc handle
4804  * @vdev_id: vdev_id
4805  *
4806  * Return: tx ack count
4807  */
ol_txrx_get_tx_ack_stats(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)4808 static uint32_t ol_txrx_get_tx_ack_stats(struct cdp_soc_t *soc_hdl,
4809 					 uint8_t vdev_id)
4810 {
4811 	struct ol_txrx_vdev_t *vdev =
4812 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
4813 
4814 	if (!vdev) {
4815 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4816 			  "%s: Invalid vdev_id %d", __func__, vdev_id);
4817 		return 0;
4818 	}
4819 
4820 	return vdev->txrx_stats.txack_success;
4821 }
4822 
4823 /**
4824  * ol_txrx_display_stats() - Display OL TXRX display stats
4825  * @soc_hdl: Datapath soc handle
4826  * @value: Module id for which stats needs to be displayed
4827  * @verb_level: verbose level of stats to be displayed
4828  *
4829  * Return: status
4830  */
4831 static QDF_STATUS
ol_txrx_display_stats(struct cdp_soc_t * soc_hdl,uint16_t value,enum qdf_stats_verbosity_level verb_level)4832 ol_txrx_display_stats(struct cdp_soc_t *soc_hdl, uint16_t value,
4833 		      enum qdf_stats_verbosity_level verb_level)
4834 {
4835 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
4836 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(
4837 							soc,
4838 							OL_TXRX_PDEV_ID);
4839 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4840 
4841 	if (!pdev) {
4842 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4843 			  "%s: pdev is NULL", __func__);
4844 		return QDF_STATUS_E_NULL_VALUE;
4845 	}
4846 
4847 	switch (value) {
4848 	case CDP_TXRX_PATH_STATS:
4849 		ol_txrx_stats_display(pdev, verb_level);
4850 		break;
4851 	case CDP_TXRX_TSO_STATS:
4852 		ol_txrx_stats_display_tso(pdev);
4853 		break;
4854 	case CDP_DUMP_TX_FLOW_POOL_INFO:
4855 		if (verb_level == QDF_STATS_VERBOSITY_LEVEL_LOW)
4856 			ol_tx_dump_flow_pool_info_compact(pdev);
4857 		else
4858 			ol_tx_dump_flow_pool_info(soc_hdl);
4859 		break;
4860 	case CDP_TXRX_DESC_STATS:
4861 		qdf_nbuf_tx_desc_count_display();
4862 		break;
4863 	case CDP_WLAN_RX_BUF_DEBUG_STATS:
4864 		htt_display_rx_buf_debug(pdev->htt_pdev);
4865 		break;
4866 #ifdef CONFIG_HL_SUPPORT
4867 	case CDP_SCHEDULER_STATS:
4868 		ol_tx_sched_cur_state_display(pdev);
4869 		ol_tx_sched_stats_display(pdev);
4870 		break;
4871 	case CDP_TX_QUEUE_STATS:
4872 		ol_tx_queue_log_display(pdev);
4873 		break;
4874 #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
4875 	case CDP_CREDIT_STATS:
4876 		ol_tx_dump_group_credit_stats(pdev);
4877 		break;
4878 #endif
4879 
4880 #ifdef DEBUG_HL_LOGGING
4881 	case CDP_BUNDLE_STATS:
4882 		htt_dump_bundle_stats(pdev->htt_pdev);
4883 		break;
4884 #endif
4885 #endif
4886 	default:
4887 		status = QDF_STATUS_E_INVAL;
4888 		break;
4889 	}
4890 	return status;
4891 }
4892 
4893 /**
4894  * ol_txrx_clear_stats() - Clear OL TXRX stats
4895  * @soc - ol soc handle
4896  * @pdev_id: pdev identifier
4897  * @value - Module id for which stats needs to be cleared
4898  *
4899  * Return: 0 - success/ non-zero failure
4900  */
ol_txrx_clear_stats(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint8_t value)4901 static QDF_STATUS ol_txrx_clear_stats(struct cdp_soc_t *soc_hdl,
4902 				      uint8_t pdev_id, uint8_t value)
4903 {
4904 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
4905 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
4906 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4907 
4908 	if (!pdev) {
4909 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4910 			  "%s: pdev is NULL", __func__);
4911 		return QDF_STATUS_E_INVAL;
4912 	}
4913 
4914 	switch (value) {
4915 	case CDP_TXRX_PATH_STATS:
4916 		ol_txrx_stats_clear(pdev);
4917 		break;
4918 	case CDP_TXRX_TSO_STATS:
4919 		ol_txrx_tso_stats_clear(pdev);
4920 		break;
4921 	case CDP_DUMP_TX_FLOW_POOL_INFO:
4922 		ol_tx_clear_flow_pool_stats();
4923 		break;
4924 	case CDP_TXRX_DESC_STATS:
4925 		qdf_nbuf_tx_desc_count_clear();
4926 		break;
4927 #ifdef CONFIG_HL_SUPPORT
4928 	case CDP_SCHEDULER_STATS:
4929 		ol_tx_sched_stats_clear(pdev);
4930 		break;
4931 	case CDP_TX_QUEUE_STATS:
4932 		ol_tx_queue_log_clear(pdev);
4933 		break;
4934 #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
4935 	case CDP_CREDIT_STATS:
4936 		ol_tx_clear_group_credit_stats(pdev);
4937 		break;
4938 #endif
4939 	case CDP_BUNDLE_STATS:
4940 		htt_clear_bundle_stats(pdev->htt_pdev);
4941 		break;
4942 #endif
4943 	default:
4944 		status = QDF_STATUS_E_INVAL;
4945 		break;
4946 	}
4947 
4948 	return status;
4949 }
4950 
4951 /**
4952  * ol_txrx_drop_nbuf_list() - drop an nbuf list
4953  * @buf_list: buffer list to be dropepd
4954  *
4955  * Return: int (number of bufs dropped)
4956  */
ol_txrx_drop_nbuf_list(qdf_nbuf_t buf_list)4957 static inline int ol_txrx_drop_nbuf_list(qdf_nbuf_t buf_list)
4958 {
4959 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
4960 	ol_txrx_pdev_handle pdev;
4961 	int num_dropped = 0;
4962 	qdf_nbuf_t buf, next_buf;
4963 
4964 	if (qdf_unlikely(!soc))
4965 		return 0;
4966 
4967 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
4968 	if (!pdev) {
4969 		ol_txrx_err("pdev is NULL");
4970 		return 0;
4971 	}
4972 
4973 	buf = buf_list;
4974 	while (buf) {
4975 		QDF_NBUF_CB_RX_PEER_CACHED_FRM(buf) = 1;
4976 		next_buf = qdf_nbuf_queue_next(buf);
4977 		if (pdev)
4978 			TXRX_STATS_MSDU_INCR(pdev,
4979 				 rx.dropped_peer_invalid, buf);
4980 		qdf_nbuf_free(buf);
4981 		buf = next_buf;
4982 		num_dropped++;
4983 	}
4984 	return num_dropped;
4985 }
4986 
4987 /**
4988  * ol_rx_data_handler() - data rx handler
4989  * @pdev: dev handle
4990  * @buf_list: buffer list
4991  * @staid: Station id
4992  *
4993  * Return: None
4994  */
ol_rx_data_handler(struct ol_txrx_pdev_t * pdev,qdf_nbuf_t buf_list,uint16_t staid)4995 static void ol_rx_data_handler(struct ol_txrx_pdev_t *pdev,
4996 			       qdf_nbuf_t buf_list, uint16_t staid)
4997 {
4998 	void *osif_dev;
4999 	uint8_t drop_count = 0;
5000 	qdf_nbuf_t buf, next_buf;
5001 	QDF_STATUS ret;
5002 	ol_txrx_rx_fp data_rx = NULL;
5003 	struct ol_txrx_peer_t *peer;
5004 
5005 	if (qdf_unlikely(!pdev))
5006 		goto free_buf;
5007 
5008 	/* Do not use peer directly. Derive peer from staid to
5009 	 * make sure that peer is valid.
5010 	 */
5011 	peer = ol_txrx_peer_get_ref_by_local_id((struct cdp_pdev *)pdev,
5012 			staid, PEER_DEBUG_ID_OL_RX_THREAD);
5013 	if (!peer)
5014 		goto free_buf;
5015 
5016 	qdf_spin_lock_bh(&peer->peer_info_lock);
5017 	if (qdf_unlikely(!(peer->state >= OL_TXRX_PEER_STATE_CONN) ||
5018 					 !peer->vdev->rx)) {
5019 		qdf_spin_unlock_bh(&peer->peer_info_lock);
5020 		ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
5021 		goto free_buf;
5022 	}
5023 
5024 	data_rx = peer->vdev->rx;
5025 	osif_dev = peer->vdev->osif_dev;
5026 	qdf_spin_unlock_bh(&peer->peer_info_lock);
5027 
5028 	qdf_spin_lock_bh(&peer->bufq_info.bufq_lock);
5029 	if (!list_empty(&peer->bufq_info.cached_bufq)) {
5030 		qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
5031 		/* Flush the cached frames to HDD before passing new rx frame */
5032 		ol_txrx_flush_rx_frames(peer, 0);
5033 	} else
5034 		qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
5035 
5036 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
5037 
5038 	buf = buf_list;
5039 	while (buf) {
5040 		next_buf = qdf_nbuf_queue_next(buf);
5041 		qdf_nbuf_set_next(buf, NULL);   /* Add NULL terminator */
5042 		ret = data_rx(osif_dev, buf);
5043 		if (ret != QDF_STATUS_SUCCESS) {
5044 			ol_txrx_err("Frame Rx to HDD failed");
5045 			if (pdev)
5046 				TXRX_STATS_MSDU_INCR(pdev, rx.dropped_err, buf);
5047 			qdf_nbuf_free(buf);
5048 		}
5049 		buf = next_buf;
5050 	}
5051 	return;
5052 
5053 free_buf:
5054 	drop_count = ol_txrx_drop_nbuf_list(buf_list);
5055 	ol_txrx_warn("Dropped frames %u", drop_count);
5056 }
5057 
5058 /**
5059  * ol_rx_data_cb() - data rx callback
5060  * @context: dev handle
5061  * @buf_list: buffer list
5062  * @staid: Station id
5063  *
5064  * Return: None
5065  */
5066 static inline void
ol_rx_data_cb(void * context,qdf_nbuf_t buf_list,uint16_t staid)5067 ol_rx_data_cb(void *context, qdf_nbuf_t buf_list, uint16_t staid)
5068 {
5069 	struct ol_txrx_pdev_t *pdev = context;
5070 
5071 	ol_rx_data_handler(pdev, buf_list, staid);
5072 }
5073 
5074 /* print for every 16th packet */
5075 #define OL_TXRX_PRINT_RATE_LIMIT_THRESH 0x0f
5076 struct ol_rx_cached_buf *cache_buf;
5077 
5078 /** helper function to drop packets
5079  *  Note: caller must hold the cached buq lock before invoking
5080  *  this function. Also, it assumes that the pointers passed in
5081  *  are valid (non-NULL)
5082  */
ol_txrx_drop_frames(struct ol_txrx_cached_bufq_t * bufqi,qdf_nbuf_t rx_buf_list)5083 static inline void ol_txrx_drop_frames(
5084 					struct ol_txrx_cached_bufq_t *bufqi,
5085 					qdf_nbuf_t rx_buf_list)
5086 {
5087 	uint32_t dropped = ol_txrx_drop_nbuf_list(rx_buf_list);
5088 
5089 	bufqi->dropped += dropped;
5090 	bufqi->qdepth_no_thresh += dropped;
5091 
5092 	if (bufqi->qdepth_no_thresh > bufqi->high_water_mark)
5093 		bufqi->high_water_mark = bufqi->qdepth_no_thresh;
5094 }
5095 
ol_txrx_enqueue_rx_frames(struct ol_txrx_peer_t * peer,struct ol_txrx_cached_bufq_t * bufqi,qdf_nbuf_t rx_buf_list)5096 static QDF_STATUS ol_txrx_enqueue_rx_frames(
5097 					struct ol_txrx_peer_t *peer,
5098 					struct ol_txrx_cached_bufq_t *bufqi,
5099 					qdf_nbuf_t rx_buf_list)
5100 {
5101 	struct ol_rx_cached_buf *cache_buf;
5102 	qdf_nbuf_t buf, next_buf;
5103 	static uint32_t count;
5104 
5105 	if ((count++ & OL_TXRX_PRINT_RATE_LIMIT_THRESH) == 0)
5106 		ol_txrx_info_high(
5107 		   "Data on the peer before it is registered bufq->curr %d bufq->drops %d",
5108 		   bufqi->curr, bufqi->dropped);
5109 
5110 	qdf_spin_lock_bh(&bufqi->bufq_lock);
5111 	if (bufqi->curr >= bufqi->thresh) {
5112 		ol_txrx_drop_frames(bufqi, rx_buf_list);
5113 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
5114 		return QDF_STATUS_E_FAULT;
5115 	}
5116 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
5117 
5118 	buf = rx_buf_list;
5119 	while (buf) {
5120 		QDF_NBUF_CB_RX_PEER_CACHED_FRM(buf) = 1;
5121 		next_buf = qdf_nbuf_queue_next(buf);
5122 		cache_buf = qdf_mem_malloc(sizeof(*cache_buf));
5123 		if (!cache_buf) {
5124 			qdf_nbuf_free(buf);
5125 		} else {
5126 			/* Add NULL terminator */
5127 			qdf_nbuf_set_next(buf, NULL);
5128 			cache_buf->buf = buf;
5129 			if (peer && peer->valid) {
5130 				qdf_spin_lock_bh(&bufqi->bufq_lock);
5131 				list_add_tail(&cache_buf->list,
5132 				      &bufqi->cached_bufq);
5133 				bufqi->curr++;
5134 				qdf_spin_unlock_bh(&bufqi->bufq_lock);
5135 			} else {
5136 				qdf_mem_free(cache_buf);
5137 				rx_buf_list = buf;
5138 				qdf_nbuf_set_next(rx_buf_list, next_buf);
5139 				qdf_spin_lock_bh(&bufqi->bufq_lock);
5140 				ol_txrx_drop_frames(bufqi, rx_buf_list);
5141 				qdf_spin_unlock_bh(&bufqi->bufq_lock);
5142 				return QDF_STATUS_E_FAULT;
5143 			}
5144 		}
5145 		buf = next_buf;
5146 	}
5147 	return QDF_STATUS_SUCCESS;
5148 }
5149 /**
5150  * ol_rx_data_process() - process rx frame
5151  * @peer: peer
5152  * @rx_buf_list: rx buffer list
5153  *
5154  * Return: None
5155  */
ol_rx_data_process(struct ol_txrx_peer_t * peer,qdf_nbuf_t rx_buf_list)5156 void ol_rx_data_process(struct ol_txrx_peer_t *peer,
5157 			qdf_nbuf_t rx_buf_list)
5158 {
5159 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5160 	ol_txrx_pdev_handle pdev;
5161 	/*
5162 	 * Firmware data path active response will use shim RX thread
5163 	 * T2H MSG running on SIRQ context,
5164 	 * IPA kernel module API should not be called on SIRQ CTXT
5165 	 */
5166 	ol_txrx_rx_fp data_rx = NULL;
5167 
5168 	if (qdf_unlikely(!soc))
5169 		goto drop_rx_buf;
5170 
5171 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
5172 	if ((!peer) || (!pdev)) {
5173 		ol_txrx_err("peer/pdev is NULL");
5174 		goto drop_rx_buf;
5175 	}
5176 
5177 	qdf_assert(peer->vdev);
5178 
5179 	qdf_spin_lock_bh(&peer->peer_info_lock);
5180 	if (peer->state >= OL_TXRX_PEER_STATE_CONN)
5181 		data_rx = peer->vdev->rx;
5182 	qdf_spin_unlock_bh(&peer->peer_info_lock);
5183 
5184 	/*
5185 	 * If there is a data frame from peer before the peer is
5186 	 * registered for data service, enqueue them on to pending queue
5187 	 * which will be flushed to HDD once that station is registered.
5188 	 */
5189 	if (!data_rx) {
5190 		if (ol_txrx_enqueue_rx_frames(peer, &peer->bufq_info,
5191 					      rx_buf_list)
5192 				!= QDF_STATUS_SUCCESS)
5193 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5194 				  "%s: failed to enqueue rx frm to cached_bufq",
5195 				  __func__);
5196 	} else {
5197 #ifdef WLAN_DP_LEGACY_OL_RX_THREAD
5198 		/*
5199 		 * If the kernel is SMP, schedule rx thread to
5200 		 * better use multicores.
5201 		 */
5202 		if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
5203 			ol_rx_data_handler(pdev, rx_buf_list, peer->local_id);
5204 		} else {
5205 			p_cds_sched_context sched_ctx =
5206 				get_cds_sched_ctxt();
5207 			struct cds_ol_rx_pkt *pkt;
5208 
5209 			if (unlikely(!sched_ctx))
5210 				goto drop_rx_buf;
5211 
5212 			pkt = cds_alloc_ol_rx_pkt(sched_ctx);
5213 			if (!pkt)
5214 				goto drop_rx_buf;
5215 
5216 			pkt->callback = ol_rx_data_cb;
5217 			pkt->context = pdev;
5218 			pkt->Rxpkt = rx_buf_list;
5219 			pkt->staId = peer->local_id;
5220 			cds_indicate_rxpkt(sched_ctx, pkt);
5221 		}
5222 #else                           /* WLAN_DP_LEGACY_OL_RX_THREAD */
5223 		ol_rx_data_handler(pdev, rx_buf_list, peer->local_id);
5224 #endif /* WLAN_DP_LEGACY_OL_RX_THREAD */
5225 	}
5226 
5227 	return;
5228 
5229 drop_rx_buf:
5230 	ol_txrx_drop_nbuf_list(rx_buf_list);
5231 }
5232 
5233 /**
5234  * ol_txrx_register_peer() - register peer
5235  * @sta_desc: sta descriptor
5236  *
5237  * Return: QDF Status
5238  */
ol_txrx_register_peer(struct ol_txrx_desc_type * sta_desc)5239 static QDF_STATUS ol_txrx_register_peer(struct ol_txrx_desc_type *sta_desc)
5240 {
5241 	struct ol_txrx_peer_t *peer;
5242 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5243 	ol_txrx_pdev_handle pdev;
5244 	union ol_txrx_peer_update_param_t param;
5245 	struct privacy_exemption privacy_filter;
5246 
5247 	if (!soc) {
5248 		ol_txrx_err("Soc is NULL");
5249 		return QDF_STATUS_E_INVAL;
5250 	}
5251 
5252 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
5253 
5254 	if (!pdev) {
5255 		ol_txrx_err("Pdev is NULL");
5256 		return QDF_STATUS_E_INVAL;
5257 	}
5258 
5259 	peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
5260 					 sta_desc->peer_addr.bytes);
5261 
5262 	if (!peer)
5263 		return QDF_STATUS_E_FAULT;
5264 
5265 	qdf_spin_lock_bh(&peer->peer_info_lock);
5266 	peer->state = OL_TXRX_PEER_STATE_CONN;
5267 	qdf_spin_unlock_bh(&peer->peer_info_lock);
5268 
5269 	param.qos_capable = sta_desc->is_qos_enabled;
5270 	ol_txrx_peer_update(peer->vdev, peer->mac_addr.raw, &param,
5271 			    ol_txrx_peer_update_qos_capable);
5272 
5273 	if (sta_desc->is_wapi_supported) {
5274 		/*Privacy filter to accept unencrypted WAI frames */
5275 		privacy_filter.ether_type = ETHERTYPE_WAI;
5276 		privacy_filter.filter_type = PRIVACY_FILTER_ALWAYS;
5277 		privacy_filter.packet_type = PRIVACY_FILTER_PACKET_BOTH;
5278 		ol_txrx_set_privacy_filters(peer->vdev, &privacy_filter, 1);
5279 	}
5280 
5281 	ol_txrx_flush_rx_frames(peer, 0);
5282 	return QDF_STATUS_SUCCESS;
5283 }
5284 
5285 /**
5286  * ol_txrx_register_ocb_peer - Function to register the OCB peer
5287  * @mac_addr: MAC address of the self peer
5288  *
5289  * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure
5290  */
ol_txrx_register_ocb_peer(uint8_t * mac_addr)5291 static QDF_STATUS ol_txrx_register_ocb_peer(uint8_t *mac_addr)
5292 {
5293 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5294 	ol_txrx_pdev_handle pdev;
5295 	ol_txrx_peer_handle peer;
5296 
5297 	if (!soc) {
5298 		ol_txrx_err("Unable to find soc!");
5299 		return QDF_STATUS_E_FAILURE;
5300 	}
5301 
5302 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
5303 
5304 	if (!pdev) {
5305 		ol_txrx_err("Unable to find pdev!");
5306 		return QDF_STATUS_E_FAILURE;
5307 	}
5308 
5309 	peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
5310 					 mac_addr);
5311 	if (!peer) {
5312 		ol_txrx_err("Unable to find OCB peer!");
5313 		return QDF_STATUS_E_FAILURE;
5314 	}
5315 
5316 	ol_txrx_set_ocb_peer(pdev, peer);
5317 
5318 	/* Set peer state to connected */
5319 	ol_txrx_peer_state_update((struct cdp_soc_t *)soc, peer->mac_addr.raw,
5320 				  OL_TXRX_PEER_STATE_AUTH);
5321 
5322 	return QDF_STATUS_SUCCESS;
5323 }
5324 
5325 /**
5326  * ol_txrx_set_ocb_peer - Function to store the OCB peer
5327  * @pdev: Handle to the HTT instance
5328  * @peer: Pointer to the peer
5329  */
ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)5330 void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
5331 			  struct ol_txrx_peer_t *peer)
5332 {
5333 	if (!pdev)
5334 		return;
5335 
5336 	pdev->ocb_peer = peer;
5337 	pdev->ocb_peer_valid = (NULL != peer);
5338 }
5339 
5340 /**
5341  * ol_txrx_get_ocb_peer - Function to retrieve the OCB peer
5342  * @pdev: Handle to the HTT instance
5343  * @peer: Pointer to the returned peer
5344  *
5345  * Return: true if the peer is valid, false if not
5346  */
ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t ** peer)5347 bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
5348 			  struct ol_txrx_peer_t **peer)
5349 {
5350 	int rc;
5351 
5352 	if ((!pdev) || (!peer)) {
5353 		rc = false;
5354 		goto exit;
5355 	}
5356 
5357 	if (pdev->ocb_peer_valid) {
5358 		*peer = pdev->ocb_peer;
5359 		rc = true;
5360 	} else {
5361 		rc = false;
5362 	}
5363 
5364 exit:
5365 	return rc;
5366 }
5367 
5368 /**
5369  * ol_txrx_register_pause_cb() - register pause callback
5370  * @soc_hdl: Datapath soc handle
5371  * @pause_cb: pause callback
5372  *
5373  * Return: QDF status
5374  */
ol_txrx_register_pause_cb(struct cdp_soc_t * soc_hdl,tx_pause_callback pause_cb)5375 static QDF_STATUS ol_txrx_register_pause_cb(struct cdp_soc_t *soc_hdl,
5376 					    tx_pause_callback pause_cb)
5377 {
5378 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
5379 	ol_txrx_pdev_handle pdev;
5380 
5381 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
5382 	if (!pdev || !pause_cb) {
5383 		ol_txrx_err("pdev or pause_cb is NULL");
5384 		return QDF_STATUS_E_INVAL;
5385 	}
5386 	pdev->pause_cb = pause_cb;
5387 	return QDF_STATUS_SUCCESS;
5388 }
5389 
5390 #ifdef RECEIVE_OFFLOAD
5391 /**
5392  * ol_txrx_offld_flush_handler() - offld flush handler
5393  * @context: dev handle
5394  * @rxpkt: rx data
5395  * @staid: station id
5396  *
5397  * This function handles an offld flush indication.
5398  * If the rx thread is enabled, it will be invoked by the rx
5399  * thread else it will be called in the tasklet context
5400  *
5401  * Return: none
5402  */
ol_txrx_offld_flush_handler(void * context,qdf_nbuf_t rxpkt,uint16_t staid)5403 static void ol_txrx_offld_flush_handler(void *context,
5404 					qdf_nbuf_t rxpkt,
5405 					uint16_t staid)
5406 {
5407 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5408 	ol_txrx_pdev_handle pdev;
5409 
5410 	if (qdf_unlikely(!soc)) {
5411 		qdf_assert(0);
5412 		return;
5413 	}
5414 
5415 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
5416 	if (qdf_unlikely(!pdev)) {
5417 		ol_txrx_err("Invalid pdev context");
5418 		qdf_assert(0);
5419 		return;
5420 	}
5421 
5422 	if (pdev->offld_flush_cb)
5423 		pdev->offld_flush_cb(context);
5424 	else
5425 		ol_txrx_err("offld_flush_cb NULL");
5426 }
5427 
5428 /**
5429  * ol_txrx_offld_flush() - offld flush callback
5430  * @data: opaque data pointer
5431  *
5432  * This is the callback registered with CE to trigger
5433  * an offld flush
5434  *
5435  * Return: none
5436  */
ol_txrx_offld_flush(void * data)5437 static void ol_txrx_offld_flush(void *data)
5438 {
5439 	p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
5440 	struct cds_ol_rx_pkt *pkt;
5441 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5442 	ol_txrx_pdev_handle pdev;
5443 
5444 	if (qdf_unlikely(!sched_ctx))
5445 		return;
5446 
5447 	if (qdf_unlikely(!soc))
5448 		return;
5449 
5450 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
5451 	if (qdf_unlikely(!pdev)) {
5452 		ol_txrx_err("TXRX module context is NULL");
5453 		return;
5454 	}
5455 
5456 	if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
5457 		ol_txrx_offld_flush_handler(data, NULL, 0);
5458 	} else {
5459 		pkt = cds_alloc_ol_rx_pkt(sched_ctx);
5460 		if (qdf_unlikely(!pkt))
5461 			return;
5462 
5463 		pkt->callback = ol_txrx_offld_flush_handler;
5464 		pkt->context = data;
5465 		pkt->Rxpkt = NULL;
5466 		pkt->staId = 0;
5467 		cds_indicate_rxpkt(sched_ctx, pkt);
5468 	}
5469 }
5470 
5471 /**
5472  * ol_register_offld_flush_cb() - register the offld flush callback
5473  * @offld_flush_cb: flush callback function
5474  * @offld_init_cb: Allocate and initialize offld data structure.
5475  *
5476  * Store the offld flush callback provided and in turn
5477  * register OL's offld flush handler with CE
5478  *
5479  * Return: none
5480  */
ol_register_offld_flush_cb(void (offld_flush_cb)(void *))5481 static void ol_register_offld_flush_cb(void (offld_flush_cb)(void *))
5482 {
5483 	struct hif_opaque_softc *hif_device;
5484 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5485 	ol_txrx_pdev_handle pdev;
5486 
5487 	if (qdf_unlikely(!soc)) {
5488 		TXRX_ASSERT2(0);
5489 		goto out;
5490 	}
5491 
5492 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
5493 	if (!pdev) {
5494 		ol_txrx_err("pdev NULL!");
5495 		TXRX_ASSERT2(0);
5496 		goto out;
5497 	}
5498 	if (pdev->offld_flush_cb) {
5499 		ol_txrx_info("offld already initialised");
5500 		if (pdev->offld_flush_cb != offld_flush_cb) {
5501 			ol_txrx_err(
5502 				   "offld_flush_cb is differ to previously registered callback")
5503 			TXRX_ASSERT2(0);
5504 			goto out;
5505 		}
5506 		goto out;
5507 	}
5508 	pdev->offld_flush_cb = offld_flush_cb;
5509 	hif_device = cds_get_context(QDF_MODULE_ID_HIF);
5510 
5511 	if (qdf_unlikely(!hif_device)) {
5512 		qdf_assert(0);
5513 		goto out;
5514 	}
5515 
5516 	hif_offld_flush_cb_register(hif_device, ol_txrx_offld_flush);
5517 
5518 out:
5519 	return;
5520 }
5521 
5522 /**
5523  * ol_deregister_offld_flush_cb() - deregister the offld flush callback
5524  *
5525  * Remove the offld flush callback provided and in turn
5526  * deregister OL's offld flush handler with CE
5527  *
5528  * Return: none
5529  */
ol_deregister_offld_flush_cb(void)5530 static void ol_deregister_offld_flush_cb(void)
5531 {
5532 	struct hif_opaque_softc *hif_device;
5533 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5534 	ol_txrx_pdev_handle pdev;
5535 
5536 	if (qdf_unlikely(!soc))
5537 		return;
5538 
5539 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
5540 	if (!pdev) {
5541 		ol_txrx_err("pdev NULL!");
5542 		return;
5543 	}
5544 	hif_device = cds_get_context(QDF_MODULE_ID_HIF);
5545 
5546 	if (qdf_unlikely(!hif_device)) {
5547 		qdf_assert(0);
5548 		return;
5549 	}
5550 
5551 	hif_offld_flush_cb_deregister(hif_device);
5552 
5553 	pdev->offld_flush_cb = NULL;
5554 }
5555 #endif /* RECEIVE_OFFLOAD */
5556 
5557 /**
5558  * ol_register_data_stall_detect_cb() - register data stall callback
5559  * @soc_hdl: Datapath soc handle
5560  * @pdev_id: id of data path pdev handle
5561  * @data_stall_detect_callback: data stall callback function
5562  *
5563  *
5564  * Return: QDF_STATUS Enumeration
5565  */
ol_register_data_stall_detect_cb(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,data_stall_detect_cb data_stall_detect_callback)5566 static QDF_STATUS ol_register_data_stall_detect_cb(
5567 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5568 			data_stall_detect_cb data_stall_detect_callback)
5569 {
5570 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
5571 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
5572 
5573 	if (!pdev) {
5574 		ol_txrx_err("pdev NULL!");
5575 		return QDF_STATUS_E_INVAL;
5576 	}
5577 	pdev->data_stall_detect_callback = data_stall_detect_callback;
5578 	return QDF_STATUS_SUCCESS;
5579 }
5580 
5581 /**
5582  * ol_deregister_data_stall_detect_cb() - de-register data stall callback
5583  * @soc_hdl: Datapath soc handle
5584  * @pdev_id: id of data path pdev handle
5585  * @data_stall_detect_callback: data stall callback function
5586  *
5587  *
5588  * Return: QDF_STATUS Enumeration
5589  */
ol_deregister_data_stall_detect_cb(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,data_stall_detect_cb data_stall_detect_callback)5590 static QDF_STATUS ol_deregister_data_stall_detect_cb(
5591 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5592 			data_stall_detect_cb data_stall_detect_callback)
5593 {
5594 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
5595 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
5596 
5597 	if (!pdev) {
5598 		ol_txrx_err("pdev NULL!");
5599 		return QDF_STATUS_E_INVAL;
5600 	}
5601 	pdev->data_stall_detect_callback = NULL;
5602 	return QDF_STATUS_SUCCESS;
5603 }
5604 
5605 /**
5606  * ol_txrx_post_data_stall_event() - post data stall event
5607  * @indicator: Module triggering data stall
5608  * @data_stall_type: data stall event type
5609  * @pdev_id: pdev id
5610  * @vdev_id_bitmap: vdev id bitmap
5611  * @recovery_type: data stall recovery type
5612  *
5613  * Return: None
5614  */
ol_txrx_post_data_stall_event(struct cdp_soc_t * soc_hdl,enum data_stall_log_event_indicator indicator,enum data_stall_log_event_type data_stall_type,uint32_t pdev_id,uint32_t vdev_id_bitmap,enum data_stall_log_recovery_type recovery_type)5615 static void ol_txrx_post_data_stall_event(
5616 				struct cdp_soc_t *soc_hdl,
5617 				enum data_stall_log_event_indicator indicator,
5618 				enum data_stall_log_event_type data_stall_type,
5619 				uint32_t pdev_id, uint32_t vdev_id_bitmap,
5620 				enum data_stall_log_recovery_type recovery_type)
5621 {
5622 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
5623 	struct data_stall_event_info data_stall_info;
5624 	ol_txrx_pdev_handle pdev;
5625 
5626 	if (qdf_unlikely(!soc)) {
5627 		ol_txrx_err("soc is NULL");
5628 		return;
5629 	}
5630 
5631 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
5632 	if (!pdev) {
5633 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5634 			  "%s: pdev is NULL.", __func__);
5635 		return;
5636 	}
5637 
5638 	if (!pdev->data_stall_detect_callback) {
5639 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5640 			  "%s: data stall cb not registered", __func__);
5641 		return;
5642 	}
5643 
5644 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
5645 		  "%s: data_stall_type: %x pdev_id: %d",
5646 		  __func__, data_stall_type, pdev_id);
5647 
5648 	data_stall_info.indicator = indicator;
5649 	data_stall_info.data_stall_type = data_stall_type;
5650 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
5651 	data_stall_info.pdev_id = pdev_id;
5652 	data_stall_info.recovery_type = recovery_type;
5653 
5654 	if (data_stall_info.data_stall_type ==
5655 				DATA_STALL_LOG_FW_RX_REFILL_FAILED) {
5656 		htt_log_rx_ring_info(pdev->htt_pdev);
5657 		htt_rx_refill_failure(pdev->htt_pdev);
5658 	}
5659 
5660 	pdev->data_stall_detect_callback(&data_stall_info);
5661 }
5662 
5663 void
ol_txrx_dump_pkt(qdf_nbuf_t nbuf,uint32_t nbuf_paddr,int len)5664 ol_txrx_dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len)
5665 {
5666 	qdf_print(" Pkt: VA 0x%pK PA 0x%llx len %d\n",
5667 		  qdf_nbuf_data(nbuf), (unsigned long long int)nbuf_paddr, len);
5668 	print_hex_dump(KERN_DEBUG, "Pkt:   ", DUMP_PREFIX_ADDRESS, 16, 4,
5669 		       qdf_nbuf_data(nbuf), len, true);
5670 }
5671 
ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)5672 struct cdp_vdev *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)
5673 {
5674 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5675 	ol_txrx_vdev_handle vdev = NULL;
5676 
5677 	if (qdf_unlikely(!soc))
5678 		return NULL;
5679 
5680 	vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc, vdev_id);
5681 
5682 	return ol_txrx_vdev_t_to_cdp_vdev(vdev);
5683 }
5684 
ol_txrx_get_vdev_from_soc_vdev_id(struct ol_txrx_soc_t * soc,uint8_t vdev_id)5685 struct ol_txrx_vdev_t *ol_txrx_get_vdev_from_soc_vdev_id(
5686 				struct ol_txrx_soc_t *soc, uint8_t vdev_id)
5687 {
5688 	ol_txrx_pdev_handle pdev;
5689 	ol_txrx_vdev_handle vdev = NULL;
5690 
5691 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
5692 	if (qdf_unlikely(!pdev))
5693 		return NULL;
5694 
5695 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5696 		if (vdev->vdev_id == vdev_id)
5697 			break;
5698 	}
5699 
5700 	return vdev;
5701 }
5702 
5703 /**
5704  * ol_txrx_get_mon_vdev_from_pdev() - get monitor mode vdev from pdev
5705  * @soc_hdl: datapath soc handle
5706  * @pdev_id: the physical device id the virtual device belongs to
5707  *
5708  * Return: vdev id
5709  *            error if not found.
5710  */
ol_txrx_get_mon_vdev_from_pdev(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)5711 uint8_t ol_txrx_get_mon_vdev_from_pdev(struct cdp_soc_t *soc_hdl,
5712 				       uint8_t pdev_id)
5713 {
5714 	struct ol_txrx_soc_t *soc = (struct ol_txrx_soc_t *)soc_hdl;
5715 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
5716 
5717 	if (qdf_unlikely(!pdev))
5718 		return -EINVAL;
5719 
5720 	return pdev->monitor_vdev->vdev_id;
5721 }
5722 
5723 /**
5724  * ol_txrx_set_wisa_mode() - set wisa mode
5725  * @soc_hdl: Datapath soc handle
5726  * @vdev_id: vdev_id
5727  * @enable: enable flag
5728  *
5729  * Return: QDF STATUS
5730  */
ol_txrx_set_wisa_mode(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,bool enable)5731 static QDF_STATUS ol_txrx_set_wisa_mode(struct cdp_soc_t *soc_hdl,
5732 					uint8_t vdev_id, bool enable)
5733 {
5734 	struct ol_txrx_vdev_t *vdev =
5735 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
5736 
5737 	if (!vdev)
5738 		return QDF_STATUS_E_INVAL;
5739 
5740 	vdev->is_wisa_mode_enable = enable;
5741 	return QDF_STATUS_SUCCESS;
5742 }
5743 
5744 /**
5745  * ol_txrx_get_vdev_id() - get interface id from interface context
5746  * @pvdev: vdev handle
5747  *
5748  * Return: virtual interface id
5749  */
ol_txrx_get_vdev_id(struct cdp_vdev * pvdev)5750 static uint16_t ol_txrx_get_vdev_id(struct cdp_vdev *pvdev)
5751 {
5752 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
5753 
5754 	return vdev->vdev_id;
5755 }
5756 
5757 /**
5758  * ol_txrx_soc_attach_target() - attach soc target
5759  * @soc: soc handle
5760  *
5761  * MCL legacy OL do nothing here
5762  *
5763  * Return: 0
5764  */
ol_txrx_soc_attach_target(ol_txrx_soc_handle soc)5765 static QDF_STATUS ol_txrx_soc_attach_target(ol_txrx_soc_handle soc)
5766 {
5767 	/* MCL legacy OL do nothing here */
5768 	return QDF_STATUS_SUCCESS;
5769 }
5770 
5771 /**
5772  * ol_txrx_soc_detach() - detach soc target
5773  * @soc: soc handle
5774  *
5775  * MCL legacy OL do nothing here
5776  *
5777  * Return: none
5778  */
ol_txrx_soc_detach(struct cdp_soc_t * soc)5779 static void ol_txrx_soc_detach(struct cdp_soc_t *soc)
5780 {
5781 	qdf_mem_free(soc);
5782 }
5783 
5784 #ifdef REMOVE_PKT_LOG
5785 /**
5786  * ol_txrx_pkt_log_con_service() - connect packet log service
5787  * @soc_hdl: Datapath soc handle
5788  * @pdev_id: id of data path pdev handle
5789  * @scn: device context
5790  *
5791  * Return: none
5792  */
ol_txrx_pkt_log_con_service(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,void * scn)5793 static void ol_txrx_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
5794 					uint8_t pdev_id, void *scn)
5795 {
5796 }
5797 
5798 /**
5799  * ol_txrx_pkt_log_exit() - cleanup packet log info
5800  * @soc_hdl: Datapath soc handle
5801  * @pdev_id: id of data path pdev handle
5802  *
5803  * Return: none
5804  */
ol_txrx_pkt_log_exit(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)5805 static void ol_txrx_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
5806 {
5807 }
5808 
5809 #else
ol_txrx_pkt_log_con_service(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,void * scn)5810 static void ol_txrx_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
5811 					uint8_t pdev_id, void *scn)
5812 {
5813 	htt_pkt_log_init(soc_hdl, pdev_id, scn);
5814 	pktlog_htc_attach();
5815 }
5816 
ol_txrx_pkt_log_exit(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)5817 static void ol_txrx_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
5818 {
5819 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
5820 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
5821 
5822 	if (!pdev) {
5823 		ol_txrx_err("pdev handle is NULL");
5824 		return;
5825 	}
5826 
5827 	htt_pktlogmod_exit(pdev);
5828 }
5829 #endif
5830 
5831 /* OL wrapper functions for CDP abstraction */
5832 /**
5833  * ol_txrx_wrapper_flush_rx_frames() - flush rx frames on the queue
5834  * @soc: data path soc handle
5835  * @pdev_id: datapath pdev identifier
5836  * @peer_mac: peer mac address
5837  * @drop: rx packets drop or deliver
5838  *
5839  * Return: none
5840  */
ol_txrx_wrapper_flush_rx_frames(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,void * peer_mac,bool drop)5841 static void ol_txrx_wrapper_flush_rx_frames(struct cdp_soc_t *soc_hdl,
5842 					    uint8_t pdev_id, void *peer_mac,
5843 					    bool drop)
5844 {
5845 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
5846 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
5847 	struct ol_txrx_peer_t *peer;
5848 
5849 	if (!pdev) {
5850 		ol_txrx_err("pdev is NULL");
5851 		return;
5852 	}
5853 
5854 	peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
5855 						   PEER_DEBUG_ID_OL_INTERNAL);
5856 	if (!peer) {
5857 		ol_txrx_err("peer "QDF_MAC_ADDR_FMT" not found",
5858 			    QDF_MAC_ADDR_REF(peer_mac));
5859 		return;
5860 	}
5861 
5862 	ol_txrx_flush_rx_frames(peer, drop);
5863 }
5864 
5865 /**
5866  * ol_txrx_wrapper_register_peer() - register peer
5867  * @pdev: pdev handle
5868  * @sta_desc: peer description
5869  *
5870  * Return: QDF STATUS
5871  */
ol_txrx_wrapper_register_peer(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,struct ol_txrx_desc_type * sta_desc)5872 static QDF_STATUS ol_txrx_wrapper_register_peer(
5873 					struct cdp_soc_t *soc_hdl,
5874 					uint8_t pdev_id,
5875 					struct ol_txrx_desc_type *sta_desc)
5876 {
5877 	return ol_txrx_register_peer(sta_desc);
5878 }
5879 
5880 /**
5881  * ol_txrx_wrapper_cfg_is_high_latency() - device is high or low latency device
5882  * @pdev: pdev handle
5883  *
5884  * Return: 1 high latency bus
5885  *         0 low latency bus
5886  */
ol_txrx_wrapper_cfg_is_high_latency(struct cdp_cfg * cfg_pdev)5887 static int ol_txrx_wrapper_cfg_is_high_latency(struct cdp_cfg *cfg_pdev)
5888 {
5889 	return ol_cfg_is_high_latency(cfg_pdev);
5890 }
5891 
5892 /**
5893  * ol_txrx_wrapper_peer_state_update() - specify the peer's authentication state
5894  * @soc_hdl - datapath soc handle
5895  * @peer_mac - mac address of which peer has changed its state
5896  * @state - the new state of the peer
5897  *
5898  *  Specify the peer's authentication state (none, connected, authenticated)
5899  *  to allow the data SW to determine whether to filter out invalid data frames.
5900  *  (In the "connected" state, where security is enabled, but authentication
5901  *  has not completed, tx and rx data frames other than EAPOL or WAPI should
5902  *  be discarded.)
5903  *  This function is only relevant for systems in which the tx and rx filtering
5904  *  are done in the host rather than in the target.
5905  *
5906  * Return: QDF Status
5907  */
ol_txrx_wrapper_peer_state_update(struct cdp_soc_t * soc_hdl,uint8_t * peer_mac,enum ol_txrx_peer_state state)5908 static QDF_STATUS ol_txrx_wrapper_peer_state_update(
5909 						struct cdp_soc_t *soc_hdl,
5910 						uint8_t *peer_mac,
5911 						enum ol_txrx_peer_state state)
5912 {
5913 	return ol_txrx_peer_state_update(soc_hdl, peer_mac, state);
5914 }
5915 
5916 /**
5917  * ol_txrx_wrapper_set_flow_control_parameters() - set flow control parameters
5918  * @cfg_ctx: cfg context
5919  * @cfg_param: cfg parameters
5920  *
5921  * Return: none
5922  */
5923 static void
ol_txrx_wrapper_set_flow_control_parameters(struct cdp_cfg * cfg_pdev,void * cfg_param)5924 ol_txrx_wrapper_set_flow_control_parameters(struct cdp_cfg *cfg_pdev,
5925 		void *cfg_param)
5926 {
5927 	return ol_tx_set_flow_control_parameters(
5928 		cfg_pdev,
5929 		(struct txrx_pdev_cfg_param_t *)cfg_param);
5930 }
5931 
5932 /**
5933  * ol_txrx_get_cfg() - get ini/cgf values in legacy dp
5934  * @soc_hdl: soc context
5935  * @cfg_param: cfg parameters
5936  *
5937  * Return: none
5938  */
ol_txrx_get_cfg(struct cdp_soc_t * soc_hdl,enum cdp_dp_cfg cfg)5939 static uint32_t ol_txrx_get_cfg(struct cdp_soc_t *soc_hdl, enum cdp_dp_cfg cfg)
5940 {
5941 	struct txrx_pdev_cfg_t *cfg_ctx;
5942 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
5943 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(
5944 							soc,
5945 							OL_TXRX_PDEV_ID);
5946 	uint32_t value = 0;
5947 
5948 	if (!pdev) {
5949 		qdf_print("pdev is NULL");
5950 		return 0;
5951 	}
5952 
5953 	cfg_ctx = (struct txrx_pdev_cfg_t *)(pdev->ctrl_pdev);
5954 	switch (cfg) {
5955 	case cfg_dp_enable_data_stall:
5956 		value = cfg_ctx->enable_data_stall_detection;
5957 		break;
5958 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
5959 		value = cfg_ctx->ip_tcp_udp_checksum_offload;
5960 		break;
5961 	case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload:
5962 		value = cfg_ctx->p2p_ip_tcp_udp_checksum_offload;
5963 		break;
5964 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
5965 		value = cfg_ctx->nan_tcp_udp_checksumoffload;
5966 		break;
5967 	case cfg_dp_tso_enable:
5968 		value = cfg_ctx->tso_enable;
5969 		break;
5970 	case cfg_dp_lro_enable:
5971 		value = cfg_ctx->lro_enable;
5972 		break;
5973 	case cfg_dp_sg_enable:
5974 		value = cfg_ctx->sg_enable;
5975 		break;
5976 	case cfg_dp_gro_enable:
5977 		value = cfg_ctx->gro_enable;
5978 		break;
5979 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
5980 	case cfg_dp_tx_flow_start_queue_offset:
5981 		value = cfg_ctx->tx_flow_start_queue_offset;
5982 		break;
5983 	case cfg_dp_tx_flow_stop_queue_threshold:
5984 		value = cfg_ctx->tx_flow_stop_queue_th;
5985 		break;
5986 #endif
5987 	case cfg_dp_ipa_uc_tx_buf_size:
5988 		value = cfg_ctx->uc_tx_buffer_size;
5989 		break;
5990 	case cfg_dp_ipa_uc_tx_partition_base:
5991 		value = cfg_ctx->uc_tx_partition_base;
5992 		break;
5993 	case cfg_dp_ipa_uc_rx_ind_ring_count:
5994 		value = cfg_ctx->uc_rx_indication_ring_count;
5995 		break;
5996 	case cfg_dp_enable_flow_steering:
5997 		value = cfg_ctx->enable_flow_steering;
5998 		break;
5999 	case cfg_dp_reorder_offload_supported:
6000 		value = cfg_ctx->is_full_reorder_offload;
6001 		break;
6002 	case cfg_dp_ce_classify_enable:
6003 		value = cfg_ctx->ce_classify_enabled;
6004 		break;
6005 	case cfg_dp_disable_intra_bss_fwd:
6006 		value = cfg_ctx->disable_intra_bss_fwd;
6007 		break;
6008 	case cfg_dp_pktlog_buffer_size:
6009 		value = cfg_ctx->pktlog_buffer_size;
6010 		break;
6011 	default:
6012 		value =  0;
6013 		break;
6014 	}
6015 
6016 	return value;
6017 }
6018 
6019 /*
6020  * ol_get_pdev_param: function to get parameters from pdev
6021  * @cdp_soc: txrx soc handle
6022  * @pdev_id: id of pdev handle
6023  * @param: parameter type to be get
6024  * @val: parameter type to be get
6025  *
6026  * Return: SUCCESS or FAILURE
6027  */
ol_get_pdev_param(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,enum cdp_pdev_param_type param,cdp_config_param_type * val)6028 static QDF_STATUS ol_get_pdev_param(struct cdp_soc_t *soc_hdl,  uint8_t pdev_id,
6029 				    enum cdp_pdev_param_type param,
6030 				    cdp_config_param_type *val)
6031 {
6032 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
6033 	struct ol_txrx_pdev_t *olpdev = ol_txrx_get_pdev_from_pdev_id(soc,
6034 								      pdev_id);
6035 	struct cdp_pdev *pdev = ol_txrx_pdev_t_to_cdp_pdev(olpdev);
6036 
6037 	if (!pdev)
6038 		return QDF_STATUS_E_FAILURE;
6039 
6040 	switch (param) {
6041 	case CDP_TX_PENDING:
6042 		val->cdp_pdev_param_tx_pending = ol_txrx_get_tx_pending(pdev);
6043 		break;
6044 	default:
6045 		return QDF_STATUS_E_INVAL;
6046 	}
6047 
6048 	return QDF_STATUS_SUCCESS;
6049 }
6050 
6051 /*
6052  * ol_set_pdev_param: function to get parameters from pdev
6053  * @cdp_soc: txrx soc handle
6054  * @pdev_id: id of pdev handle
6055  * @param: parameter type to be get
6056  * @val: parameter type to be get
6057  *
6058  * Return: SUCCESS or FAILURE
6059  */
ol_set_pdev_param(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,enum cdp_pdev_param_type param,cdp_config_param_type val)6060 static QDF_STATUS ol_set_pdev_param(struct cdp_soc_t *soc_hdl,  uint8_t pdev_id,
6061 				    enum cdp_pdev_param_type param,
6062 				    cdp_config_param_type val)
6063 {
6064 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
6065 	struct ol_txrx_pdev_t *olpdev = ol_txrx_get_pdev_from_pdev_id(soc,
6066 								      pdev_id);
6067 	struct cdp_pdev *pdev = ol_txrx_pdev_t_to_cdp_pdev(olpdev);
6068 
6069 	if (!pdev)
6070 		return QDF_STATUS_E_FAILURE;
6071 
6072 	switch (param) {
6073 	case CDP_MONITOR_CHANNEL:
6074 	{
6075 		ol_htt_mon_note_chan(pdev, val.cdp_pdev_param_monitor_chan);
6076 		break;
6077 	}
6078 	default:
6079 		return QDF_STATUS_E_INVAL;
6080 	}
6081 
6082 	return QDF_STATUS_SUCCESS;
6083 }
6084 
6085 #ifdef WDI_EVENT_ENABLE
ol_get_pldev(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)6086 void *ol_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
6087 {
6088 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
6089 	struct ol_txrx_pdev_t *pdev = ol_txrx_get_pdev_from_pdev_id(soc,
6090 								    pdev_id);
6091 
6092 	if (pdev)
6093 		return pdev->pl_dev;
6094 
6095 	return NULL;
6096 }
6097 #endif
6098 
6099 /**
6100  * ol_register_packetdump_callback() - registers
6101  *  tx data packet, tx mgmt. packet and rx data packet
6102  *  dump callback handler.
6103  *
6104  * @soc_hdl: Datapath soc handle
6105  * @pdev_id: id of data path pdev handle
6106  * @ol_tx_packetdump_cb: tx packetdump cb
6107  * @ol_rx_packetdump_cb: rx packetdump cb
6108  *
6109  * This function is used to register tx data pkt, tx mgmt.
6110  * pkt and rx data pkt dump callback
6111  *
6112  * Return: None
6113  *
6114  */
6115 static inline
ol_register_packetdump_callback(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,ol_txrx_pktdump_cb ol_tx_packetdump_cb,ol_txrx_pktdump_cb ol_rx_packetdump_cb)6116 void ol_register_packetdump_callback(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
6117 				     ol_txrx_pktdump_cb ol_tx_packetdump_cb,
6118 				     ol_txrx_pktdump_cb ol_rx_packetdump_cb)
6119 {
6120 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
6121 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
6122 
6123 	if (!pdev) {
6124 		ol_txrx_err("pdev is NULL");
6125 		return;
6126 	}
6127 
6128 	pdev->ol_tx_packetdump_cb = ol_tx_packetdump_cb;
6129 	pdev->ol_rx_packetdump_cb = ol_rx_packetdump_cb;
6130 }
6131 
6132 /**
6133  * ol_deregister_packetdump_callback() - deregidters
6134  *  tx data packet, tx mgmt. packet and rx data packet
6135  *  dump callback handler
6136  * @soc_hdl: Datapath soc handle
6137  * @pdev_id: id of data path pdev handle
6138  *
6139  * This function is used to deregidter tx data pkt.,
6140  * tx mgmt. pkt and rx data pkt. dump callback
6141  *
6142  * Return: None
6143  *
6144  */
6145 static inline
ol_deregister_packetdump_callback(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)6146 void ol_deregister_packetdump_callback(struct cdp_soc_t *soc_hdl,
6147 				       uint8_t pdev_id)
6148 {
6149 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
6150 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
6151 
6152 	if (!pdev) {
6153 		ol_txrx_err("pdev is NULL");
6154 		return;
6155 	}
6156 
6157 	pdev->ol_tx_packetdump_cb = NULL;
6158 	pdev->ol_rx_packetdump_cb = NULL;
6159 }
6160 
6161 static struct cdp_cmn_ops ol_ops_cmn = {
6162 	.txrx_soc_attach_target = ol_txrx_soc_attach_target,
6163 	.txrx_vdev_attach = ol_txrx_vdev_attach,
6164 	.txrx_vdev_detach = ol_txrx_vdev_detach,
6165 	.txrx_pdev_attach = ol_txrx_pdev_attach,
6166 	.txrx_pdev_attach_target = ol_txrx_pdev_attach_target,
6167 	.txrx_pdev_post_attach = ol_txrx_pdev_post_attach,
6168 	.txrx_pdev_pre_detach = ol_txrx_pdev_pre_detach,
6169 	.txrx_pdev_detach = ol_txrx_pdev_detach,
6170 	.txrx_peer_create = ol_txrx_peer_attach,
6171 	.txrx_peer_setup = NULL,
6172 	.txrx_peer_teardown = NULL,
6173 	.txrx_peer_delete = ol_txrx_peer_detach,
6174 	.txrx_peer_delete_sync = ol_txrx_peer_detach_sync,
6175 	.txrx_vdev_register = ol_txrx_vdev_register,
6176 	.txrx_soc_detach = ol_txrx_soc_detach,
6177 	.txrx_get_vdev_mac_addr = ol_txrx_get_vdev_mac_addr,
6178 	.txrx_get_ctrl_pdev_from_vdev = ol_txrx_get_ctrl_pdev_from_vdev,
6179 	.txrx_get_mon_vdev_from_pdev = ol_txrx_get_mon_vdev_from_pdev,
6180 	.txrx_mgmt_send_ext = ol_txrx_mgmt_send_ext,
6181 	.txrx_mgmt_tx_cb_set = ol_txrx_mgmt_tx_cb_set,
6182 	.txrx_data_tx_cb_set = ol_txrx_data_tx_cb_set,
6183 	.txrx_peer_unmap_sync_cb_set = ol_txrx_peer_unmap_sync_cb_set,
6184 	.flush_cache_rx_queue = ol_txrx_flush_cache_rx_queue,
6185 	.txrx_fw_stats_get = ol_txrx_fw_stats_get,
6186 	.display_stats = ol_txrx_display_stats,
6187 	.txrx_get_cfg = ol_txrx_get_cfg,
6188 	/* TODO: Add other functions */
6189 };
6190 
6191 static struct cdp_misc_ops ol_ops_misc = {
6192 	.set_ibss_vdev_heart_beat_timer =
6193 		ol_txrx_set_ibss_vdev_heart_beat_timer,
6194 #ifdef CONFIG_HL_SUPPORT
6195 	.set_wmm_param = ol_txrx_set_wmm_param,
6196 #endif /* CONFIG_HL_SUPPORT */
6197 	.bad_peer_txctl_set_setting = ol_txrx_bad_peer_txctl_set_setting,
6198 	.bad_peer_txctl_update_threshold =
6199 		ol_txrx_bad_peer_txctl_update_threshold,
6200 	.hl_tdls_flag_reset = ol_txrx_hl_tdls_flag_reset,
6201 	.tx_non_std = ol_tx_non_std,
6202 	.get_vdev_id = ol_txrx_get_vdev_id,
6203 	.get_tx_ack_stats = ol_txrx_get_tx_ack_stats,
6204 	.set_wisa_mode = ol_txrx_set_wisa_mode,
6205 	.txrx_data_stall_cb_register = ol_register_data_stall_detect_cb,
6206 	.txrx_data_stall_cb_deregister = ol_deregister_data_stall_detect_cb,
6207 	.txrx_post_data_stall_event = ol_txrx_post_data_stall_event,
6208 #ifdef FEATURE_RUNTIME_PM
6209 	.runtime_suspend = ol_txrx_runtime_suspend,
6210 	.runtime_resume = ol_txrx_runtime_resume,
6211 #endif /* FEATURE_RUNTIME_PM */
6212 	.get_opmode = ol_txrx_get_opmode,
6213 	.mark_first_wakeup_packet = ol_tx_mark_first_wakeup_packet,
6214 	.update_mac_id = ol_txrx_update_mac_id,
6215 	.flush_rx_frames = ol_txrx_wrapper_flush_rx_frames,
6216 	.get_intra_bss_fwd_pkts_count = ol_get_intra_bss_fwd_pkts_count,
6217 	.pkt_log_init = htt_pkt_log_init,
6218 	.pkt_log_con_service = ol_txrx_pkt_log_con_service,
6219 	.pkt_log_exit = ol_txrx_pkt_log_exit,
6220 	.register_pktdump_cb = ol_register_packetdump_callback,
6221 	.unregister_pktdump_cb = ol_deregister_packetdump_callback,
6222 #ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
6223 	.pdev_reset_driver_del_ack = ol_tx_pdev_reset_driver_del_ack,
6224 	.vdev_set_driver_del_ack_enable = ol_tx_vdev_set_driver_del_ack_enable,
6225 #endif
6226 #ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
6227 	.vdev_set_bundle_require_flag = ol_tx_vdev_set_bundle_require,
6228 	.pdev_reset_bundle_require_flag = ol_tx_pdev_reset_bundle_require,
6229 #endif
6230 };
6231 
6232 static struct cdp_flowctl_ops ol_ops_flowctl = {
6233 	.register_pause_cb = ol_txrx_register_pause_cb,
6234 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6235 	.set_desc_global_pool_size = ol_tx_set_desc_global_pool_size,
6236 	.dump_flow_pool_info = ol_tx_dump_flow_pool_info,
6237 	.tx_desc_thresh_reached = ol_tx_desc_thresh_reached,
6238 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
6239 };
6240 
6241 #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
6242 static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
6243 	.register_tx_flow_control = ol_txrx_register_tx_flow_control,
6244 	.deregister_tx_flow_control_cb = ol_txrx_deregister_tx_flow_control_cb,
6245 	.flow_control_cb = ol_txrx_flow_control_cb,
6246 	.get_tx_resource = ol_txrx_get_tx_resource,
6247 	.ll_set_tx_pause_q_depth = ol_txrx_ll_set_tx_pause_q_depth,
6248 	.vdev_flush = ol_txrx_vdev_flush,
6249 	.vdev_pause = ol_txrx_vdev_pause,
6250 	.vdev_unpause = ol_txrx_vdev_unpause
6251 }; /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
6252 #elif defined(QCA_HL_NETDEV_FLOW_CONTROL)
6253 static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
6254 	.register_tx_flow_control = ol_txrx_register_hl_flow_control,
6255 	.vdev_flush = ol_txrx_vdev_flush,
6256 	.vdev_pause = ol_txrx_vdev_pause,
6257 	.vdev_unpause = ol_txrx_vdev_unpause,
6258 	.set_vdev_os_queue_status = ol_txrx_set_vdev_os_queue_status,
6259 	.set_vdev_tx_desc_limit = ol_txrx_set_vdev_tx_desc_limit
6260 };
6261 #else /* QCA_HL_NETDEV_FLOW_CONTROL */
6262 static struct cdp_lflowctl_ops ol_ops_l_flowctl = { };
6263 #endif
6264 
6265 #ifdef IPA_OFFLOAD
6266 static struct cdp_ipa_ops ol_ops_ipa = {
6267 	.ipa_get_resource = ol_txrx_ipa_uc_get_resource,
6268 	.ipa_set_doorbell_paddr = ol_txrx_ipa_uc_set_doorbell_paddr,
6269 	.ipa_set_active = ol_txrx_ipa_uc_set_active,
6270 	.ipa_op_response = ol_txrx_ipa_uc_op_response,
6271 	.ipa_register_op_cb = ol_txrx_ipa_uc_register_op_cb,
6272 	.ipa_get_stat = ol_txrx_ipa_uc_get_stat,
6273 	.ipa_tx_data_frame = ol_tx_send_ipa_data_frame,
6274 	.ipa_set_uc_tx_partition_base = ol_cfg_set_ipa_uc_tx_partition_base,
6275 	.ipa_enable_autonomy = ol_txrx_ipa_enable_autonomy,
6276 	.ipa_disable_autonomy = ol_txrx_ipa_disable_autonomy,
6277 	.ipa_setup = ol_txrx_ipa_setup,
6278 	.ipa_cleanup = ol_txrx_ipa_cleanup,
6279 	.ipa_setup_iface = ol_txrx_ipa_setup_iface,
6280 	.ipa_cleanup_iface = ol_txrx_ipa_cleanup_iface,
6281 	.ipa_enable_pipes = ol_txrx_ipa_enable_pipes,
6282 	.ipa_disable_pipes = ol_txrx_ipa_disable_pipes,
6283 	.ipa_set_perf_level = ol_txrx_ipa_set_perf_level,
6284 #ifdef FEATURE_METERING
6285 	.ipa_uc_get_share_stats = ol_txrx_ipa_uc_get_share_stats,
6286 	.ipa_uc_set_quota = ol_txrx_ipa_uc_set_quota,
6287 #endif
6288 	.ipa_tx_buf_smmu_mapping = ol_txrx_ipa_tx_buf_smmu_mapping,
6289 	.ipa_tx_buf_smmu_unmapping = ol_txrx_ipa_tx_buf_smmu_unmapping
6290 };
6291 #endif
6292 
6293 #ifdef RECEIVE_OFFLOAD
6294 static struct cdp_rx_offld_ops ol_rx_offld_ops = {
6295 	.register_rx_offld_flush_cb = ol_register_offld_flush_cb,
6296 	.deregister_rx_offld_flush_cb = ol_deregister_offld_flush_cb
6297 };
6298 #endif
6299 
6300 static struct cdp_bus_ops ol_ops_bus = {
6301 	.bus_suspend = ol_txrx_bus_suspend,
6302 	.bus_resume = ol_txrx_bus_resume
6303 };
6304 
6305 #ifdef WLAN_FEATURE_DSRC
6306 static struct cdp_ocb_ops ol_ops_ocb = {
6307 	.set_ocb_chan_info = ol_txrx_set_ocb_chan_info,
6308 	.get_ocb_chan_info = ol_txrx_get_ocb_chan_info
6309 };
6310 #endif
6311 
6312 static struct cdp_throttle_ops ol_ops_throttle = {
6313 #ifdef QCA_SUPPORT_TX_THROTTLE
6314 	.throttle_init_period = ol_tx_throttle_init_period,
6315 	.throttle_set_level = ol_tx_throttle_set_level
6316 #endif /* QCA_SUPPORT_TX_THROTTLE */
6317 };
6318 
6319 static struct cdp_mob_stats_ops ol_ops_mob_stats = {
6320 	.clear_stats = ol_txrx_clear_stats,
6321 	.stats = ol_txrx_stats
6322 };
6323 
6324 static struct cdp_cfg_ops ol_ops_cfg = {
6325 	.set_cfg_rx_fwd_disabled = ol_set_cfg_rx_fwd_disabled,
6326 	.set_cfg_packet_log_enabled = ol_set_cfg_packet_log_enabled,
6327 	.cfg_attach = ol_pdev_cfg_attach,
6328 	.vdev_rx_set_intrabss_fwd = ol_vdev_rx_set_intrabss_fwd,
6329 	.is_rx_fwd_disabled = ol_txrx_is_rx_fwd_disabled,
6330 	.tx_set_is_mgmt_over_wmi_enabled = ol_tx_set_is_mgmt_over_wmi_enabled,
6331 	.is_high_latency = ol_txrx_wrapper_cfg_is_high_latency,
6332 	.set_flow_control_parameters =
6333 		ol_txrx_wrapper_set_flow_control_parameters,
6334 	.set_flow_steering = ol_set_cfg_flow_steering,
6335 	.set_ptp_rx_opt_enabled = ol_set_cfg_ptp_rx_opt_enabled,
6336 	.set_new_htt_msg_format =
6337 		ol_txrx_set_new_htt_msg_format,
6338 	.set_peer_unmap_conf_support = ol_txrx_set_peer_unmap_conf_support,
6339 	.get_peer_unmap_conf_support = ol_txrx_get_peer_unmap_conf_support,
6340 	.set_tx_compl_tsf64 = ol_txrx_set_tx_compl_tsf64,
6341 	.get_tx_compl_tsf64 = ol_txrx_get_tx_compl_tsf64,
6342 };
6343 
6344 static struct cdp_peer_ops ol_ops_peer = {
6345 	.register_peer = ol_txrx_wrapper_register_peer,
6346 	.clear_peer = ol_txrx_clear_peer,
6347 	.find_peer_exist = ol_txrx_find_peer_exist,
6348 	.find_peer_exist_on_vdev = ol_txrx_find_peer_exist_on_vdev,
6349 	.find_peer_exist_on_other_vdev = ol_txrx_find_peer_exist_on_other_vdev,
6350 	.peer_state_update = ol_txrx_wrapper_peer_state_update,
6351 	.get_vdevid = ol_txrx_get_vdevid,
6352 	.get_vdev_by_peer_addr = ol_txrx_wrapper_get_vdev_by_peer_addr,
6353 	.register_ocb_peer = ol_txrx_register_ocb_peer,
6354 	.peer_get_peer_mac_addr = ol_txrx_peer_get_peer_mac_addr,
6355 	.get_peer_state = ol_txrx_get_peer_state,
6356 	.update_ibss_add_peer_num_of_vdev =
6357 		ol_txrx_update_ibss_add_peer_num_of_vdev,
6358 #if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
6359 	.copy_mac_addr_raw = ol_txrx_copy_mac_addr_raw,
6360 	.add_last_real_peer = ol_txrx_add_last_real_peer,
6361 	.is_vdev_restore_last_peer = is_vdev_restore_last_peer,
6362 	.update_last_real_peer = ol_txrx_update_last_real_peer,
6363 	.set_tdls_offchan_enabled = ol_txrx_set_tdls_offchan_enabled,
6364 	.set_peer_as_tdls_peer = ol_txrx_set_peer_as_tdls_peer,
6365 #endif /* CONFIG_HL_SUPPORT */
6366 	.peer_detach_force_delete = ol_txrx_peer_detach_force_delete,
6367 	.peer_flush_frags = ol_txrx_peer_flush_frags,
6368 };
6369 
6370 static struct cdp_tx_delay_ops ol_ops_delay = {
6371 #ifdef QCA_COMPUTE_TX_DELAY
6372 	.tx_delay = ol_tx_delay,
6373 	.tx_delay_hist = ol_tx_delay_hist,
6374 	.tx_packet_count = ol_tx_packet_count,
6375 	.tx_set_compute_interval = ol_tx_set_compute_interval
6376 #endif /* QCA_COMPUTE_TX_DELAY */
6377 };
6378 
6379 static struct cdp_pmf_ops ol_ops_pmf = {
6380 	.get_pn_info = ol_txrx_get_pn_info
6381 };
6382 
6383 static struct cdp_ctrl_ops ol_ops_ctrl = {
6384 	.txrx_get_pldev = ol_get_pldev,
6385 	.txrx_wdi_event_sub = wdi_event_sub,
6386 	.txrx_wdi_event_unsub = wdi_event_unsub,
6387 	.txrx_get_pdev_param = ol_get_pdev_param,
6388 	.txrx_set_pdev_param = ol_set_pdev_param
6389 };
6390 
6391 /* WINplatform specific structures */
6392 static struct cdp_me_ops ol_ops_me = {
6393 	/* EMPTY FOR MCL */
6394 };
6395 
6396 static struct cdp_mon_ops ol_ops_mon = {
6397 	/* EMPTY FOR MCL */
6398 };
6399 
6400 static struct cdp_host_stats_ops ol_ops_host_stats = {
6401 	/* EMPTY FOR MCL */
6402 };
6403 
6404 static struct cdp_wds_ops ol_ops_wds = {
6405 	/* EMPTY FOR MCL */
6406 };
6407 
6408 static struct cdp_raw_ops ol_ops_raw = {
6409 	/* EMPTY FOR MCL */
6410 };
6411 
6412 static struct cdp_ops ol_txrx_ops = {
6413 	.cmn_drv_ops = &ol_ops_cmn,
6414 	.ctrl_ops = &ol_ops_ctrl,
6415 	.me_ops = &ol_ops_me,
6416 	.mon_ops = &ol_ops_mon,
6417 	.host_stats_ops = &ol_ops_host_stats,
6418 	.wds_ops = &ol_ops_wds,
6419 	.raw_ops = &ol_ops_raw,
6420 	.misc_ops = &ol_ops_misc,
6421 	.cfg_ops = &ol_ops_cfg,
6422 	.flowctl_ops = &ol_ops_flowctl,
6423 	.l_flowctl_ops = &ol_ops_l_flowctl,
6424 #ifdef IPA_OFFLOAD
6425 	.ipa_ops = &ol_ops_ipa,
6426 #endif
6427 #ifdef RECEIVE_OFFLOAD
6428 	.rx_offld_ops = &ol_rx_offld_ops,
6429 #endif
6430 	.bus_ops = &ol_ops_bus,
6431 #ifdef WLAN_FEATURE_DSRC
6432 	.ocb_ops = &ol_ops_ocb,
6433 #endif
6434 	.peer_ops = &ol_ops_peer,
6435 	.throttle_ops = &ol_ops_throttle,
6436 	.mob_stats_ops = &ol_ops_mob_stats,
6437 	.delay_ops = &ol_ops_delay,
6438 	.pmf_ops = &ol_ops_pmf,
6439 };
6440 
ol_txrx_soc_attach(void * scn_handle,struct ol_if_ops * dp_ol_if_ops)6441 ol_txrx_soc_handle ol_txrx_soc_attach(void *scn_handle,
6442 				      struct ol_if_ops *dp_ol_if_ops)
6443 {
6444 	struct ol_txrx_soc_t *soc;
6445 
6446 	soc = qdf_mem_malloc(sizeof(*soc));
6447 	if (!soc)
6448 		return NULL;
6449 
6450 	soc->psoc = scn_handle;
6451 	soc->cdp_soc.ops = &ol_txrx_ops;
6452 	soc->cdp_soc.ol_ops = dp_ol_if_ops;
6453 
6454 	return ol_txrx_soc_t_to_cdp_soc_t(soc);
6455 }
6456 
ol_txrx_get_new_htt_msg_format(struct ol_txrx_pdev_t * pdev)6457 bool ol_txrx_get_new_htt_msg_format(struct ol_txrx_pdev_t *pdev)
6458 {
6459 	if (!pdev) {
6460 		qdf_print("%s: pdev is NULL", __func__);
6461 		return false;
6462 	}
6463 	return pdev->new_htt_msg_format;
6464 }
6465 
ol_txrx_set_new_htt_msg_format(uint8_t val)6466 void ol_txrx_set_new_htt_msg_format(uint8_t val)
6467 {
6468 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
6469 	ol_txrx_pdev_handle pdev;
6470 
6471 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
6472 	if (!pdev) {
6473 		qdf_print("%s: pdev is NULL", __func__);
6474 		return;
6475 	}
6476 	pdev->new_htt_msg_format = val;
6477 }
6478 
ol_txrx_get_peer_unmap_conf_support(void)6479 bool ol_txrx_get_peer_unmap_conf_support(void)
6480 {
6481 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
6482 	ol_txrx_pdev_handle pdev;
6483 
6484 	if (qdf_unlikely(!soc))
6485 		return false;
6486 
6487 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
6488 	if (!pdev) {
6489 		qdf_print("%s: pdev is NULL", __func__);
6490 		return false;
6491 	}
6492 	return pdev->enable_peer_unmap_conf_support;
6493 }
6494 
ol_txrx_set_peer_unmap_conf_support(bool val)6495 void ol_txrx_set_peer_unmap_conf_support(bool val)
6496 {
6497 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
6498 	ol_txrx_pdev_handle pdev;
6499 
6500 	if (qdf_unlikely(!soc))
6501 		return;
6502 
6503 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
6504 	if (!pdev) {
6505 		qdf_print("%s: pdev is NULL", __func__);
6506 		return;
6507 	}
6508 	pdev->enable_peer_unmap_conf_support = val;
6509 }
6510 
6511 #ifdef WLAN_FEATURE_TSF_PLUS
ol_txrx_get_tx_compl_tsf64(void)6512 bool ol_txrx_get_tx_compl_tsf64(void)
6513 {
6514 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
6515 	ol_txrx_pdev_handle pdev;
6516 
6517 	if (qdf_unlikely(!soc))
6518 		return false;
6519 
6520 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
6521 	if (!pdev) {
6522 		qdf_print("%s: pdev is NULL", __func__);
6523 		return false;
6524 	}
6525 	return pdev->enable_tx_compl_tsf64;
6526 }
6527 
ol_txrx_set_tx_compl_tsf64(bool val)6528 void ol_txrx_set_tx_compl_tsf64(bool val)
6529 {
6530 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
6531 	ol_txrx_pdev_handle pdev;
6532 
6533 	if (qdf_unlikely(!soc))
6534 		return;
6535 
6536 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
6537 	if (!pdev) {
6538 		ol_txrx_err("pdev is NULL");
6539 		return;
6540 	}
6541 
6542 	pdev->enable_tx_compl_tsf64 = val;
6543 }
6544 #else
ol_txrx_get_tx_compl_tsf64(void)6545 bool ol_txrx_get_tx_compl_tsf64(void)
6546 {
6547 	return false;
6548 }
6549 
ol_txrx_set_tx_compl_tsf64(bool val)6550 void ol_txrx_set_tx_compl_tsf64(bool val)
6551 {
6552 }
6553 #endif
6554