xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_rx.h"
22 #include "dp_tx.h"
23 #include "dp_peer.h"
24 #include "hal_rx.h"
25 #include "hal_api.h"
26 #include "qdf_nbuf.h"
27 #ifdef MESH_MODE_SUPPORT
28 #include "if_meta_hdr.h"
29 #endif
30 #include "dp_internal.h"
31 #include "dp_ipa.h"
32 #include "dp_hist.h"
33 #include "dp_rx_buffer_pool.h"
34 #ifdef WIFI_MONITOR_SUPPORT
35 #include "dp_htt.h"
36 #include <dp_mon.h>
37 #endif
38 #ifdef FEATURE_WDS
39 #include "dp_txrx_wds.h"
40 #endif
41 
42 #ifdef DUP_RX_DESC_WAR
43 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
44 				hal_ring_handle_t hal_ring,
45 				hal_ring_desc_t ring_desc,
46 				struct dp_rx_desc *rx_desc)
47 {
48 	void *hal_soc = soc->hal_soc;
49 
50 	hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc);
51 	dp_rx_desc_dump(rx_desc);
52 }
53 #else
54 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
55 				hal_ring_handle_t hal_ring_hdl,
56 				hal_ring_desc_t ring_desc,
57 				struct dp_rx_desc *rx_desc)
58 {
59 	hal_soc_handle_t hal_soc = soc->hal_soc;
60 
61 	dp_rx_desc_dump(rx_desc);
62 	hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc);
63 	hal_srng_dump_ring(hal_soc, hal_ring_hdl);
64 	qdf_assert_always(0);
65 }
66 #endif
67 
68 #ifndef QCA_HOST_MODE_WIFI_DISABLED
69 #ifdef RX_DESC_SANITY_WAR
70 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
71 			     hal_ring_handle_t hal_ring_hdl,
72 			     hal_ring_desc_t ring_desc,
73 			     struct dp_rx_desc *rx_desc)
74 {
75 	uint8_t return_buffer_manager;
76 
77 	if (qdf_unlikely(!rx_desc)) {
78 		/*
79 		 * This is an unlikely case where the cookie obtained
80 		 * from the ring_desc is invalid and hence we are not
81 		 * able to find the corresponding rx_desc
82 		 */
83 		goto fail;
84 	}
85 
86 	return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc);
87 	if (qdf_unlikely(!(return_buffer_manager ==
88 				HAL_RX_BUF_RBM_SW1_BM(soc->wbm_sw0_bm_id) ||
89 			 return_buffer_manager ==
90 				HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)))) {
91 		goto fail;
92 	}
93 
94 	return QDF_STATUS_SUCCESS;
95 
96 fail:
97 	DP_STATS_INC(soc, rx.err.invalid_cookie, 1);
98 	dp_err("Ring Desc:");
99 	hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl,
100 				ring_desc);
101 	return QDF_STATUS_E_NULL_VALUE;
102 
103 }
104 #endif
105 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
106 
107 /**
108  * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map
109  *
110  * @dp_soc: struct dp_soc *
111  * @nbuf_frag_info_t: nbuf frag info
112  * @dp_pdev: struct dp_pdev *
113  * @rx_desc_pool: Rx desc pool
114  *
115  * Return: QDF_STATUS
116  */
117 #ifdef DP_RX_MON_MEM_FRAG
118 static inline QDF_STATUS
119 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
120 			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
121 			   struct dp_pdev *dp_pdev,
122 			   struct rx_desc_pool *rx_desc_pool)
123 {
124 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
125 
126 	(nbuf_frag_info_t->virt_addr).vaddr =
127 			qdf_frag_alloc(rx_desc_pool->buf_size);
128 
129 	if (!((nbuf_frag_info_t->virt_addr).vaddr)) {
130 		dp_err("Frag alloc failed");
131 		DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1);
132 		return QDF_STATUS_E_NOMEM;
133 	}
134 
135 	ret = qdf_mem_map_page(dp_soc->osdev,
136 			       (nbuf_frag_info_t->virt_addr).vaddr,
137 			       QDF_DMA_FROM_DEVICE,
138 			       rx_desc_pool->buf_size,
139 			       &nbuf_frag_info_t->paddr);
140 
141 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
142 		qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr);
143 		dp_err("Frag map failed");
144 		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
145 		return QDF_STATUS_E_FAULT;
146 	}
147 
148 	return QDF_STATUS_SUCCESS;
149 }
150 #else
151 static inline QDF_STATUS
152 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
153 			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
154 			   struct dp_pdev *dp_pdev,
155 			   struct rx_desc_pool *rx_desc_pool)
156 {
157 	return QDF_STATUS_SUCCESS;
158 }
159 #endif /* DP_RX_MON_MEM_FRAG */
160 
161 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
162 /**
163  * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history
164  * @soc: Datapath soc structure
165  * @ring_num: Refill ring number
166  * @num_req: number of buffers requested for refill
167  * @num_refill: number of buffers refilled
168  *
169  * Returns: None
170  */
171 static inline void
172 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
173 			       hal_ring_handle_t hal_ring_hdl,
174 			       uint32_t num_req, uint32_t num_refill)
175 {
176 	struct dp_refill_info_record *record;
177 	uint32_t idx;
178 	uint32_t tp;
179 	uint32_t hp;
180 
181 	if (qdf_unlikely(ring_num >= MAX_PDEV_CNT ||
182 			 !soc->rx_refill_ring_history[ring_num]))
183 		return;
184 
185 	idx = dp_history_get_next_index(&soc->rx_refill_ring_history[ring_num]->index,
186 					DP_RX_REFILL_HIST_MAX);
187 
188 	/* No NULL check needed for record since its an array */
189 	record = &soc->rx_refill_ring_history[ring_num]->entry[idx];
190 
191 	hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &tp, &hp);
192 	record->timestamp = qdf_get_log_timestamp();
193 	record->num_req = num_req;
194 	record->num_refill = num_refill;
195 	record->hp = hp;
196 	record->tp = tp;
197 }
198 #else
199 static inline void
200 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
201 			       hal_ring_handle_t hal_ring_hdl,
202 			       uint32_t num_req, uint32_t num_refill)
203 {
204 }
205 #endif
206 
207 /**
208  * dp_pdev_nbuf_alloc_and_map() - Allocate nbuf for desc buffer and map
209  *
210  * @dp_soc: struct dp_soc *
211  * @mac_id: Mac id
212  * @num_entries_avail: num_entries_avail
213  * @nbuf_frag_info_t: nbuf frag info
214  * @dp_pdev: struct dp_pdev *
215  * @rx_desc_pool: Rx desc pool
216  *
217  * Return: QDF_STATUS
218  */
219 static inline QDF_STATUS
220 dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc,
221 				     uint32_t mac_id,
222 				     uint32_t num_entries_avail,
223 				     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
224 				     struct dp_pdev *dp_pdev,
225 				     struct rx_desc_pool *rx_desc_pool)
226 {
227 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
228 
229 	(nbuf_frag_info_t->virt_addr).nbuf =
230 		dp_rx_buffer_pool_nbuf_alloc(dp_soc,
231 					     mac_id,
232 					     rx_desc_pool,
233 					     num_entries_avail);
234 	if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
235 		dp_err("nbuf alloc failed");
236 		DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
237 		return QDF_STATUS_E_NOMEM;
238 	}
239 
240 	ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool,
241 					 nbuf_frag_info_t);
242 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
243 		dp_rx_buffer_pool_nbuf_free(dp_soc,
244 			(nbuf_frag_info_t->virt_addr).nbuf, mac_id);
245 		dp_err("nbuf map failed");
246 		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
247 		return QDF_STATUS_E_FAULT;
248 	}
249 
250 	nbuf_frag_info_t->paddr =
251 		qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
252 
253 	dp_ipa_handle_rx_buf_smmu_mapping(dp_soc,
254 			       (qdf_nbuf_t)((nbuf_frag_info_t->virt_addr).nbuf),
255 			       rx_desc_pool->buf_size,
256 			       true);
257 
258 	ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
259 			     &nbuf_frag_info_t->paddr,
260 			     rx_desc_pool);
261 	if (ret == QDF_STATUS_E_FAILURE) {
262 		DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
263 		return QDF_STATUS_E_ADDRNOTAVAIL;
264 	}
265 
266 	return QDF_STATUS_SUCCESS;
267 }
268 
269 /*
270  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
271  *			       called during dp rx initialization
272  *			       and at the end of dp_rx_process.
273  *
274  * @soc: core txrx main context
275  * @mac_id: mac_id which is one of 3 mac_ids
276  * @dp_rxdma_srng: dp rxdma circular ring
277  * @rx_desc_pool: Pointer to free Rx descriptor pool
278  * @num_req_buffers: number of buffer to be replenished
279  * @desc_list: list of descs if called from dp_rx_process
280  *	       or NULL during dp rx initialization or out of buffer
281  *	       interrupt.
282  * @tail: tail of descs list
283  * @func_name: name of the caller function
284  * Return: return success or failure
285  */
286 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
287 				struct dp_srng *dp_rxdma_srng,
288 				struct rx_desc_pool *rx_desc_pool,
289 				uint32_t num_req_buffers,
290 				union dp_rx_desc_list_elem_t **desc_list,
291 				union dp_rx_desc_list_elem_t **tail,
292 				const char *func_name)
293 {
294 	uint32_t num_alloc_desc;
295 	uint16_t num_desc_to_free = 0;
296 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
297 	uint32_t num_entries_avail;
298 	uint32_t count;
299 	int sync_hw_ptr = 1;
300 	struct dp_rx_nbuf_frag_info nbuf_frag_info = {0};
301 	void *rxdma_ring_entry;
302 	union dp_rx_desc_list_elem_t *next;
303 	QDF_STATUS ret;
304 	void *rxdma_srng;
305 
306 	rxdma_srng = dp_rxdma_srng->hal_srng;
307 
308 	if (qdf_unlikely(!dp_pdev)) {
309 		dp_rx_err("%pK: pdev is null for mac_id = %d",
310 			  dp_soc, mac_id);
311 		return QDF_STATUS_E_FAILURE;
312 	}
313 
314 	if (qdf_unlikely(!rxdma_srng)) {
315 		dp_rx_debug("%pK: rxdma srng not initialized", dp_soc);
316 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
317 		return QDF_STATUS_E_FAILURE;
318 	}
319 
320 	dp_rx_debug("%pK: requested %d buffers for replenish",
321 		    dp_soc, num_req_buffers);
322 
323 	hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
324 
325 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
326 						   rxdma_srng,
327 						   sync_hw_ptr);
328 
329 	dp_rx_debug("%pK: no of available entries in rxdma ring: %d",
330 		    dp_soc, num_entries_avail);
331 
332 	if (!(*desc_list) && (num_entries_avail >
333 		((dp_rxdma_srng->num_entries * 3) / 4))) {
334 		num_req_buffers = num_entries_avail;
335 	} else if (num_entries_avail < num_req_buffers) {
336 		num_desc_to_free = num_req_buffers - num_entries_avail;
337 		num_req_buffers = num_entries_avail;
338 	}
339 
340 	if (qdf_unlikely(!num_req_buffers)) {
341 		num_desc_to_free = num_req_buffers;
342 		hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
343 		goto free_descs;
344 	}
345 
346 	/*
347 	 * if desc_list is NULL, allocate the descs from freelist
348 	 */
349 	if (!(*desc_list)) {
350 		num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
351 							  rx_desc_pool,
352 							  num_req_buffers,
353 							  desc_list,
354 							  tail);
355 
356 		if (!num_alloc_desc) {
357 			dp_rx_err("%pK: no free rx_descs in freelist", dp_soc);
358 			DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
359 					num_req_buffers);
360 			hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
361 			return QDF_STATUS_E_NOMEM;
362 		}
363 
364 		dp_rx_debug("%pK: %d rx desc allocated", dp_soc, num_alloc_desc);
365 		num_req_buffers = num_alloc_desc;
366 	}
367 
368 
369 	count = 0;
370 
371 	while (count < num_req_buffers) {
372 		/* Flag is set while pdev rx_desc_pool initialization */
373 		if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
374 			ret = dp_pdev_frag_alloc_and_map(dp_soc,
375 							 &nbuf_frag_info,
376 							 dp_pdev,
377 							 rx_desc_pool);
378 		else
379 			ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc,
380 								   mac_id,
381 					num_entries_avail, &nbuf_frag_info,
382 					dp_pdev, rx_desc_pool);
383 
384 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
385 			if (qdf_unlikely(ret  == QDF_STATUS_E_FAULT))
386 				continue;
387 			break;
388 		}
389 
390 		count++;
391 
392 		rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
393 							 rxdma_srng);
394 		qdf_assert_always(rxdma_ring_entry);
395 
396 		next = (*desc_list)->next;
397 
398 		/* Flag is set while pdev rx_desc_pool initialization */
399 		if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
400 			dp_rx_desc_frag_prep(&((*desc_list)->rx_desc),
401 					     &nbuf_frag_info);
402 		else
403 			dp_rx_desc_prep(&((*desc_list)->rx_desc),
404 					&nbuf_frag_info);
405 
406 		/* rx_desc.in_use should be zero at this time*/
407 		qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
408 
409 		(*desc_list)->rx_desc.in_use = 1;
410 		(*desc_list)->rx_desc.in_err_state = 0;
411 		dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc,
412 					   func_name, RX_DESC_REPLENISHED);
413 		dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d",
414 				 nbuf_frag_info.virt_addr.nbuf,
415 				 (unsigned long long)(nbuf_frag_info.paddr),
416 				 (*desc_list)->rx_desc.cookie);
417 
418 		hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, rxdma_ring_entry,
419 					     nbuf_frag_info.paddr,
420 						(*desc_list)->rx_desc.cookie,
421 						rx_desc_pool->owner);
422 
423 		*desc_list = next;
424 
425 	}
426 
427 	dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng,
428 				       num_req_buffers, count);
429 
430 	hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
431 
432 	dp_rx_schedule_refill_thread(dp_soc);
433 
434 	dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
435 			 count, num_desc_to_free);
436 
437 	/* No need to count the number of bytes received during replenish.
438 	 * Therefore set replenish.pkts.bytes as 0.
439 	 */
440 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
441 
442 free_descs:
443 	DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
444 	/*
445 	 * add any available free desc back to the free list
446 	 */
447 	if (*desc_list)
448 		dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
449 			mac_id, rx_desc_pool);
450 
451 	return QDF_STATUS_SUCCESS;
452 }
453 
454 qdf_export_symbol(__dp_rx_buffers_replenish);
455 
456 /*
457  * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
458  *				pkts to RAW mode simulation to
459  *				decapsulate the pkt.
460  *
461  * @vdev: vdev on which RAW mode is enabled
462  * @nbuf_list: list of RAW pkts to process
463  * @peer: peer object from which the pkt is rx
464  *
465  * Return: void
466  */
467 void
468 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
469 					struct dp_peer *peer)
470 {
471 	qdf_nbuf_t deliver_list_head = NULL;
472 	qdf_nbuf_t deliver_list_tail = NULL;
473 	qdf_nbuf_t nbuf;
474 
475 	nbuf = nbuf_list;
476 	while (nbuf) {
477 		qdf_nbuf_t next = qdf_nbuf_next(nbuf);
478 
479 		DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
480 
481 		DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
482 		DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf));
483 		/*
484 		 * reset the chfrag_start and chfrag_end bits in nbuf cb
485 		 * as this is a non-amsdu pkt and RAW mode simulation expects
486 		 * these bit s to be 0 for non-amsdu pkt.
487 		 */
488 		if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
489 			 qdf_nbuf_is_rx_chfrag_end(nbuf)) {
490 			qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
491 			qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
492 		}
493 
494 		nbuf = next;
495 	}
496 
497 	vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
498 				 &deliver_list_tail, peer->mac_addr.raw);
499 
500 	vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
501 }
502 
503 #ifndef QCA_HOST_MODE_WIFI_DISABLED
504 
505 #ifndef FEATURE_WDS
506 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
507 		    struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
508 {
509 }
510 #endif
511 
512 /*
513  * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
514  *
515  * @soc: core txrx main context
516  * @ta_peer	: source peer entry
517  * @rx_tlv_hdr	: start address of rx tlvs
518  * @nbuf	: nbuf that has to be intrabss forwarded
519  *
520  * Return: bool: true if it is forwarded else false
521  */
522 bool
523 dp_rx_intrabss_fwd(struct dp_soc *soc,
524 			struct dp_peer *ta_peer,
525 			uint8_t *rx_tlv_hdr,
526 			qdf_nbuf_t nbuf,
527 			struct hal_rx_msdu_metadata msdu_metadata)
528 {
529 	uint16_t len;
530 	uint8_t is_frag;
531 	uint16_t da_peer_id = HTT_INVALID_PEER;
532 	struct dp_peer *da_peer = NULL;
533 	bool is_da_bss_peer = false;
534 	struct dp_ast_entry *ast_entry;
535 	qdf_nbuf_t nbuf_copy;
536 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
537 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
538 	struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
539 					tid_stats.tid_rx_stats[ring_id][tid];
540 
541 	/* check if the destination peer is available in peer table
542 	 * and also check if the source peer and destination peer
543 	 * belong to the same vap and destination peer is not bss peer.
544 	 */
545 
546 	if ((qdf_nbuf_is_da_valid(nbuf) && !qdf_nbuf_is_da_mcbc(nbuf))) {
547 		if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
548 						    nbuf))
549 			return true;
550 
551 		ast_entry = soc->ast_table[msdu_metadata.da_idx];
552 		if (!ast_entry)
553 			return false;
554 
555 		if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) {
556 			ast_entry->is_active = TRUE;
557 			return false;
558 		}
559 
560 		da_peer_id = ast_entry->peer_id;
561 
562 		if (da_peer_id == HTT_INVALID_PEER)
563 			return false;
564 		/* TA peer cannot be same as peer(DA) on which AST is present
565 		 * this indicates a change in topology and that AST entries
566 		 * are yet to be updated.
567 		 */
568 		if (da_peer_id == ta_peer->peer_id)
569 			return false;
570 
571 		if (ast_entry->vdev_id != ta_peer->vdev->vdev_id)
572 			return false;
573 
574 		da_peer = dp_peer_get_ref_by_id(soc, da_peer_id,
575 						DP_MOD_ID_RX);
576 		if (!da_peer)
577 			return false;
578 
579 		/* If the source or destination peer in the isolation
580 		 * list then dont forward instead push to bridge stack.
581 		 */
582 		if (dp_get_peer_isolation(ta_peer) ||
583 		    dp_get_peer_isolation(da_peer)) {
584 			dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
585 			return false;
586 		}
587 
588 		is_da_bss_peer = da_peer->bss_peer;
589 		dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
590 
591 		if (!is_da_bss_peer) {
592 			len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
593 			is_frag = qdf_nbuf_is_frag(nbuf);
594 			memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
595 
596 			/* linearize the nbuf just before we send to
597 			 * dp_tx_send()
598 			 */
599 			if (qdf_unlikely(is_frag)) {
600 				if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
601 					return false;
602 
603 				nbuf = qdf_nbuf_unshare(nbuf);
604 				if (!nbuf) {
605 					DP_STATS_INC_PKT(ta_peer,
606 							 rx.intra_bss.fail,
607 							 1,
608 							 len);
609 					/* return true even though the pkt is
610 					 * not forwarded. Basically skb_unshare
611 					 * failed and we want to continue with
612 					 * next nbuf.
613 					 */
614 					tid_stats->fail_cnt[INTRABSS_DROP]++;
615 					return true;
616 				}
617 			}
618 
619 			if (!dp_tx_send((struct cdp_soc_t *)soc,
620 					ta_peer->vdev->vdev_id, nbuf)) {
621 				DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
622 						 len);
623 				return true;
624 			} else {
625 				DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
626 						len);
627 				tid_stats->fail_cnt[INTRABSS_DROP]++;
628 				return false;
629 			}
630 		}
631 	}
632 	/* if it is a broadcast pkt (eg: ARP) and it is not its own
633 	 * source, then clone the pkt and send the cloned pkt for
634 	 * intra BSS forwarding and original pkt up the network stack
635 	 * Note: how do we handle multicast pkts. do we forward
636 	 * all multicast pkts as is or let a higher layer module
637 	 * like igmpsnoop decide whether to forward or not with
638 	 * Mcast enhancement.
639 	 */
640 	else if (qdf_unlikely((qdf_nbuf_is_da_mcbc(nbuf) &&
641 			       !ta_peer->bss_peer))) {
642 		if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
643 						    nbuf))
644 			return true;
645 
646 		if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
647 			goto end;
648 
649 		/* If the source peer in the isolation list
650 		 * then dont forward instead push to bridge stack
651 		 */
652 		if (dp_get_peer_isolation(ta_peer))
653 			goto end;
654 
655 		nbuf_copy = qdf_nbuf_copy(nbuf);
656 		if (!nbuf_copy)
657 			goto end;
658 
659 		len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
660 		if (dp_tx_send((struct cdp_soc_t *)soc,
661 			       ta_peer->vdev->vdev_id, nbuf_copy)) {
662 			DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
663 			tid_stats->fail_cnt[INTRABSS_DROP]++;
664 			qdf_nbuf_free(nbuf_copy);
665 		} else {
666 			DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
667 			tid_stats->intrabss_cnt++;
668 		}
669 	}
670 
671 end:
672 	/* return false as we have to still send the original pkt
673 	 * up the stack
674 	 */
675 	return false;
676 }
677 
678 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
679 
680 #ifdef MESH_MODE_SUPPORT
681 
682 /**
683  * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
684  *
685  * @vdev: DP Virtual device handle
686  * @nbuf: Buffer pointer
687  * @rx_tlv_hdr: start of rx tlv header
688  * @peer: pointer to peer
689  *
690  * This function allocated memory for mesh receive stats and fill the
691  * required stats. Stores the memory address in skb cb.
692  *
693  * Return: void
694  */
695 
696 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
697 				uint8_t *rx_tlv_hdr, struct dp_peer *peer)
698 {
699 	struct mesh_recv_hdr_s *rx_info = NULL;
700 	uint32_t pkt_type;
701 	uint32_t nss;
702 	uint32_t rate_mcs;
703 	uint32_t bw;
704 	uint8_t primary_chan_num;
705 	uint32_t center_chan_freq;
706 	struct dp_soc *soc = vdev->pdev->soc;
707 
708 	/* fill recv mesh stats */
709 	rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
710 
711 	/* upper layers are resposible to free this memory */
712 
713 	if (!rx_info) {
714 		dp_rx_err("%pK: Memory allocation failed for mesh rx stats",
715 			  vdev->pdev->soc);
716 		DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
717 		return;
718 	}
719 
720 	rx_info->rs_flags = MESH_RXHDR_VER1;
721 	if (qdf_nbuf_is_rx_chfrag_start(nbuf))
722 		rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
723 
724 	if (qdf_nbuf_is_rx_chfrag_end(nbuf))
725 		rx_info->rs_flags |= MESH_RX_LAST_MSDU;
726 
727 	if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) {
728 		rx_info->rs_flags |= MESH_RX_DECRYPTED;
729 		rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc,
730 							  rx_tlv_hdr);
731 		if (vdev->osif_get_key)
732 			vdev->osif_get_key(vdev->osif_vdev,
733 					&rx_info->rs_decryptkey[0],
734 					&peer->mac_addr.raw[0],
735 					rx_info->rs_keyix);
736 	}
737 
738 	rx_info->rs_snr = peer->stats.rx.snr;
739 	rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR;
740 
741 	soc = vdev->pdev->soc;
742 	primary_chan_num = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr);
743 	center_chan_freq = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr) >> 16;
744 
745 	if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) {
746 		rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band(
747 							soc->ctrl_psoc,
748 							vdev->pdev->pdev_id,
749 							center_chan_freq);
750 	}
751 	rx_info->rs_channel = primary_chan_num;
752 	pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
753 	rate_mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
754 	bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr);
755 	nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
756 	rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
757 				(bw << 24);
758 
759 	qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
760 
761 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
762 		FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"),
763 						rx_info->rs_flags,
764 						rx_info->rs_rssi,
765 						rx_info->rs_channel,
766 						rx_info->rs_ratephy1,
767 						rx_info->rs_keyix,
768 						rx_info->rs_snr);
769 
770 }
771 
772 /**
773  * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
774  *
775  * @vdev: DP Virtual device handle
776  * @nbuf: Buffer pointer
777  * @rx_tlv_hdr: start of rx tlv header
778  *
779  * This checks if the received packet is matching any filter out
780  * catogery and and drop the packet if it matches.
781  *
782  * Return: status(0 indicates drop, 1 indicate to no drop)
783  */
784 
785 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
786 					uint8_t *rx_tlv_hdr)
787 {
788 	union dp_align_mac_addr mac_addr;
789 	struct dp_soc *soc = vdev->pdev->soc;
790 
791 	if (qdf_unlikely(vdev->mesh_rx_filter)) {
792 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
793 			if (hal_rx_mpdu_get_fr_ds(soc->hal_soc,
794 						  rx_tlv_hdr))
795 				return  QDF_STATUS_SUCCESS;
796 
797 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
798 			if (hal_rx_mpdu_get_to_ds(soc->hal_soc,
799 						  rx_tlv_hdr))
800 				return  QDF_STATUS_SUCCESS;
801 
802 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
803 			if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc,
804 						   rx_tlv_hdr) &&
805 			    !hal_rx_mpdu_get_to_ds(soc->hal_soc,
806 						   rx_tlv_hdr))
807 				return  QDF_STATUS_SUCCESS;
808 
809 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
810 			if (hal_rx_mpdu_get_addr1(soc->hal_soc,
811 						  rx_tlv_hdr,
812 					&mac_addr.raw[0]))
813 				return QDF_STATUS_E_FAILURE;
814 
815 			if (!qdf_mem_cmp(&mac_addr.raw[0],
816 					&vdev->mac_addr.raw[0],
817 					QDF_MAC_ADDR_SIZE))
818 				return  QDF_STATUS_SUCCESS;
819 		}
820 
821 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
822 			if (hal_rx_mpdu_get_addr2(soc->hal_soc,
823 						  rx_tlv_hdr,
824 						  &mac_addr.raw[0]))
825 				return QDF_STATUS_E_FAILURE;
826 
827 			if (!qdf_mem_cmp(&mac_addr.raw[0],
828 					&vdev->mac_addr.raw[0],
829 					QDF_MAC_ADDR_SIZE))
830 				return  QDF_STATUS_SUCCESS;
831 		}
832 	}
833 
834 	return QDF_STATUS_E_FAILURE;
835 }
836 
837 #else
838 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
839 				uint8_t *rx_tlv_hdr, struct dp_peer *peer)
840 {
841 }
842 
843 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
844 					uint8_t *rx_tlv_hdr)
845 {
846 	return QDF_STATUS_E_FAILURE;
847 }
848 
849 #endif
850 
851 #ifdef FEATURE_NAC_RSSI
852 /**
853  * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
854  * @soc: DP SOC handle
855  * @mpdu: mpdu for which peer is invalid
856  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
857  * pool_id has same mapping)
858  *
859  * return: integer type
860  */
861 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
862 				   uint8_t mac_id)
863 {
864 	struct dp_invalid_peer_msg msg;
865 	struct dp_vdev *vdev = NULL;
866 	struct dp_pdev *pdev = NULL;
867 	struct ieee80211_frame *wh;
868 	qdf_nbuf_t curr_nbuf, next_nbuf;
869 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
870 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
871 
872 	if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) {
873 		dp_rx_debug("%pK: Drop decapped frames", soc);
874 		goto free;
875 	}
876 
877 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
878 
879 	if (!DP_FRAME_IS_DATA(wh)) {
880 		dp_rx_debug("%pK: NAWDS valid only for data frames", soc);
881 		goto free;
882 	}
883 
884 	if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
885 		dp_rx_err("%pK: Invalid nbuf length", soc);
886 		goto free;
887 	}
888 
889 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
890 
891 	if (!pdev || qdf_unlikely(pdev->is_pdev_down)) {
892 		dp_rx_err("%pK: PDEV %s", soc, !pdev ? "not found" : "down");
893 		goto free;
894 	}
895 
896 	if (dp_monitor_filter_neighbour_peer(pdev, rx_pkt_hdr) ==
897 	    QDF_STATUS_SUCCESS)
898 		return 0;
899 
900 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
901 		if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
902 				QDF_MAC_ADDR_SIZE) == 0) {
903 			goto out;
904 		}
905 	}
906 
907 	if (!vdev) {
908 		dp_rx_err("%pK: VDEV not found", soc);
909 		goto free;
910 	}
911 
912 out:
913 	msg.wh = wh;
914 	qdf_nbuf_pull_head(mpdu, soc->rx_pkt_tlv_size);
915 	msg.nbuf = mpdu;
916 	msg.vdev_id = vdev->vdev_id;
917 
918 	/*
919 	 * NOTE: Only valid for HKv1.
920 	 * If smart monitor mode is enabled on RE, we are getting invalid
921 	 * peer frames with RA as STA mac of RE and the TA not matching
922 	 * with any NAC list or the the BSSID.Such frames need to dropped
923 	 * in order to avoid HM_WDS false addition.
924 	 */
925 	if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) {
926 		if (dp_monitor_drop_inv_peer_pkts(vdev) == QDF_STATUS_SUCCESS) {
927 			dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm",
928 				   soc, wh->i_addr1);
929 			goto free;
930 		}
931 		pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(
932 				(struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc,
933 				pdev->pdev_id, &msg);
934 	}
935 
936 free:
937 	/* Drop and free packet */
938 	curr_nbuf = mpdu;
939 	while (curr_nbuf) {
940 		next_nbuf = qdf_nbuf_next(curr_nbuf);
941 		qdf_nbuf_free(curr_nbuf);
942 		curr_nbuf = next_nbuf;
943 	}
944 
945 	return 0;
946 }
947 
948 /**
949  * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
950  * @soc: DP SOC handle
951  * @mpdu: mpdu for which peer is invalid
952  * @mpdu_done: if an mpdu is completed
953  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
954  * pool_id has same mapping)
955  *
956  * return: integer type
957  */
958 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
959 					qdf_nbuf_t mpdu, bool mpdu_done,
960 					uint8_t mac_id)
961 {
962 	/* Only trigger the process when mpdu is completed */
963 	if (mpdu_done)
964 		dp_rx_process_invalid_peer(soc, mpdu, mac_id);
965 }
966 #else
967 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
968 				   uint8_t mac_id)
969 {
970 	qdf_nbuf_t curr_nbuf, next_nbuf;
971 	struct dp_pdev *pdev;
972 	struct dp_vdev *vdev = NULL;
973 	struct ieee80211_frame *wh;
974 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
975 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
976 
977 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
978 
979 	if (!DP_FRAME_IS_DATA(wh)) {
980 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP,
981 				   "only for data frames");
982 		goto free;
983 	}
984 
985 	if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
986 		dp_rx_info_rl("%pK: Invalid nbuf length", soc);
987 		goto free;
988 	}
989 
990 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
991 	if (!pdev) {
992 		dp_rx_info_rl("%pK: PDEV not found", soc);
993 		goto free;
994 	}
995 
996 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
997 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
998 		if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
999 				QDF_MAC_ADDR_SIZE) == 0) {
1000 			qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1001 			goto out;
1002 		}
1003 	}
1004 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1005 
1006 	if (!vdev) {
1007 		dp_rx_info_rl("%pK: VDEV not found", soc);
1008 		goto free;
1009 	}
1010 
1011 out:
1012 	if (soc->cdp_soc.ol_ops->rx_invalid_peer)
1013 		soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh);
1014 free:
1015 	/* reset the head and tail pointers */
1016 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1017 	if (pdev) {
1018 		pdev->invalid_peer_head_msdu = NULL;
1019 		pdev->invalid_peer_tail_msdu = NULL;
1020 	}
1021 
1022 	/* Drop and free packet */
1023 	curr_nbuf = mpdu;
1024 	while (curr_nbuf) {
1025 		next_nbuf = qdf_nbuf_next(curr_nbuf);
1026 		qdf_nbuf_free(curr_nbuf);
1027 		curr_nbuf = next_nbuf;
1028 	}
1029 
1030 	/* Reset the head and tail pointers */
1031 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1032 	if (pdev) {
1033 		pdev->invalid_peer_head_msdu = NULL;
1034 		pdev->invalid_peer_tail_msdu = NULL;
1035 	}
1036 
1037 	return 0;
1038 }
1039 
1040 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
1041 					qdf_nbuf_t mpdu, bool mpdu_done,
1042 					uint8_t mac_id)
1043 {
1044 	/* Process the nbuf */
1045 	dp_rx_process_invalid_peer(soc, mpdu, mac_id);
1046 }
1047 #endif
1048 
1049 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1050 
1051 #ifdef RECEIVE_OFFLOAD
1052 /**
1053  * dp_rx_print_offload_info() - Print offload info from RX TLV
1054  * @soc: dp soc handle
1055  * @msdu: MSDU for which the offload info is to be printed
1056  *
1057  * Return: None
1058  */
1059 static void dp_rx_print_offload_info(struct dp_soc *soc,
1060 				     qdf_nbuf_t msdu)
1061 {
1062 	dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------");
1063 	dp_verbose_debug("lro_eligible 0x%x",
1064 			 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu));
1065 	dp_verbose_debug("pure_ack 0x%x", QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu));
1066 	dp_verbose_debug("chksum 0x%x", QDF_NBUF_CB_RX_TCP_CHKSUM(msdu));
1067 	dp_verbose_debug("TCP seq num 0x%x", QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu));
1068 	dp_verbose_debug("TCP ack num 0x%x", QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu));
1069 	dp_verbose_debug("TCP window 0x%x", QDF_NBUF_CB_RX_TCP_WIN(msdu));
1070 	dp_verbose_debug("TCP protocol 0x%x", QDF_NBUF_CB_RX_TCP_PROTO(msdu));
1071 	dp_verbose_debug("TCP offset 0x%x", QDF_NBUF_CB_RX_TCP_OFFSET(msdu));
1072 	dp_verbose_debug("toeplitz 0x%x", QDF_NBUF_CB_RX_FLOW_ID(msdu));
1073 	dp_verbose_debug("---------------------------------------------------------");
1074 }
1075 
1076 /**
1077  * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
1078  * @soc: DP SOC handle
1079  * @rx_tlv: RX TLV received for the msdu
1080  * @msdu: msdu for which GRO info needs to be filled
1081  * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
1082  *
1083  * Return: None
1084  */
1085 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
1086 			 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
1087 {
1088 	struct hal_offload_info offload_info;
1089 
1090 	if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
1091 		return;
1092 
1093 	if (hal_rx_tlv_get_offload_info(soc->hal_soc, rx_tlv, &offload_info))
1094 		return;
1095 
1096 	*rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1;
1097 
1098 	QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = offload_info.lro_eligible;
1099 	QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = offload_info.tcp_pure_ack;
1100 	QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
1101 			hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
1102 						  rx_tlv);
1103 	QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = offload_info.tcp_seq_num;
1104 	QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = offload_info.tcp_ack_num;
1105 	QDF_NBUF_CB_RX_TCP_WIN(msdu) = offload_info.tcp_win;
1106 	QDF_NBUF_CB_RX_TCP_PROTO(msdu) = offload_info.tcp_proto;
1107 	QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = offload_info.ipv6_proto;
1108 	QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = offload_info.tcp_offset;
1109 	QDF_NBUF_CB_RX_FLOW_ID(msdu) = offload_info.flow_id;
1110 
1111 	dp_rx_print_offload_info(soc, msdu);
1112 }
1113 #endif /* RECEIVE_OFFLOAD */
1114 
1115 /**
1116  * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
1117  *
1118  * @soc: DP soc handle
1119  * @nbuf: pointer to msdu.
1120  * @mpdu_len: mpdu length
1121  * @l3_pad_len: L3 padding length by HW
1122  *
1123  * Return: returns true if nbuf is last msdu of mpdu else retuns false.
1124  */
1125 static inline bool dp_rx_adjust_nbuf_len(struct dp_soc *soc,
1126 					 qdf_nbuf_t nbuf,
1127 					 uint16_t *mpdu_len,
1128 					 uint32_t l3_pad_len)
1129 {
1130 	bool last_nbuf;
1131 	uint32_t pkt_hdr_size;
1132 
1133 	pkt_hdr_size = soc->rx_pkt_tlv_size + l3_pad_len;
1134 
1135 	if ((*mpdu_len + pkt_hdr_size) > RX_DATA_BUFFER_SIZE) {
1136 		qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE);
1137 		last_nbuf = false;
1138 		*mpdu_len -= (RX_DATA_BUFFER_SIZE - pkt_hdr_size);
1139 	} else {
1140 		qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + pkt_hdr_size));
1141 		last_nbuf = true;
1142 		*mpdu_len = 0;
1143 	}
1144 
1145 	return last_nbuf;
1146 }
1147 
1148 /**
1149  * dp_get_l3_hdr_pad_len() - get L3 header padding length.
1150  *
1151  * @soc: DP soc handle
1152  * @nbuf: pointer to msdu.
1153  *
1154  * Return: returns padding length in bytes.
1155  */
1156 static inline uint32_t dp_get_l3_hdr_pad_len(struct dp_soc *soc,
1157 					     qdf_nbuf_t nbuf)
1158 {
1159 	uint32_t l3_hdr_pad = 0;
1160 	uint8_t *rx_tlv_hdr;
1161 	struct hal_rx_msdu_metadata msdu_metadata;
1162 
1163 	while (nbuf) {
1164 		if (!qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
1165 			/* scattered msdu end with continuation is 0 */
1166 			rx_tlv_hdr = qdf_nbuf_data(nbuf);
1167 			hal_rx_msdu_metadata_get(soc->hal_soc,
1168 						 rx_tlv_hdr,
1169 						 &msdu_metadata);
1170 			l3_hdr_pad = msdu_metadata.l3_hdr_pad;
1171 			break;
1172 		}
1173 		nbuf = nbuf->next;
1174 	}
1175 
1176 	return l3_hdr_pad;
1177 }
1178 
1179 /**
1180  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
1181  *		     multiple nbufs.
1182  * @soc: DP SOC handle
1183  * @nbuf: pointer to the first msdu of an amsdu.
1184  *
1185  * This function implements the creation of RX frag_list for cases
1186  * where an MSDU is spread across multiple nbufs.
1187  *
1188  * Return: returns the head nbuf which contains complete frag_list.
1189  */
1190 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf)
1191 {
1192 	qdf_nbuf_t parent, frag_list, next = NULL;
1193 	uint16_t frag_list_len = 0;
1194 	uint16_t mpdu_len;
1195 	bool last_nbuf;
1196 	uint32_t l3_hdr_pad_offset = 0;
1197 
1198 	/*
1199 	 * Use msdu len got from REO entry descriptor instead since
1200 	 * there is case the RX PKT TLV is corrupted while msdu_len
1201 	 * from REO descriptor is right for non-raw RX scatter msdu.
1202 	 */
1203 	mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1204 
1205 	/*
1206 	 * this is a case where the complete msdu fits in one single nbuf.
1207 	 * in this case HW sets both start and end bit and we only need to
1208 	 * reset these bits for RAW mode simulator to decap the pkt
1209 	 */
1210 	if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
1211 					qdf_nbuf_is_rx_chfrag_end(nbuf)) {
1212 		qdf_nbuf_set_pktlen(nbuf, mpdu_len + soc->rx_pkt_tlv_size);
1213 		qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
1214 		return nbuf;
1215 	}
1216 
1217 	l3_hdr_pad_offset = dp_get_l3_hdr_pad_len(soc, nbuf);
1218 	/*
1219 	 * This is a case where we have multiple msdus (A-MSDU) spread across
1220 	 * multiple nbufs. here we create a fraglist out of these nbufs.
1221 	 *
1222 	 * the moment we encounter a nbuf with continuation bit set we
1223 	 * know for sure we have an MSDU which is spread across multiple
1224 	 * nbufs. We loop through and reap nbufs till we reach last nbuf.
1225 	 */
1226 	parent = nbuf;
1227 	frag_list = nbuf->next;
1228 	nbuf = nbuf->next;
1229 
1230 	/*
1231 	 * set the start bit in the first nbuf we encounter with continuation
1232 	 * bit set. This has the proper mpdu length set as it is the first
1233 	 * msdu of the mpdu. this becomes the parent nbuf and the subsequent
1234 	 * nbufs will form the frag_list of the parent nbuf.
1235 	 */
1236 	qdf_nbuf_set_rx_chfrag_start(parent, 1);
1237 	/*
1238 	 * L3 header padding is only needed for the 1st buffer
1239 	 * in a scattered msdu
1240 	 */
1241 	last_nbuf = dp_rx_adjust_nbuf_len(soc, parent, &mpdu_len,
1242 					  l3_hdr_pad_offset);
1243 
1244 	/*
1245 	 * MSDU cont bit is set but reported MPDU length can fit
1246 	 * in to single buffer
1247 	 *
1248 	 * Increment error stats and avoid SG list creation
1249 	 */
1250 	if (last_nbuf) {
1251 		DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1);
1252 		qdf_nbuf_pull_head(parent,
1253 				   soc->rx_pkt_tlv_size + l3_hdr_pad_offset);
1254 		return parent;
1255 	}
1256 
1257 	/*
1258 	 * this is where we set the length of the fragments which are
1259 	 * associated to the parent nbuf. We iterate through the frag_list
1260 	 * till we hit the last_nbuf of the list.
1261 	 */
1262 	do {
1263 		last_nbuf = dp_rx_adjust_nbuf_len(soc, nbuf, &mpdu_len, 0);
1264 		qdf_nbuf_pull_head(nbuf,
1265 				   soc->rx_pkt_tlv_size);
1266 		frag_list_len += qdf_nbuf_len(nbuf);
1267 
1268 		if (last_nbuf) {
1269 			next = nbuf->next;
1270 			nbuf->next = NULL;
1271 			break;
1272 		}
1273 
1274 		nbuf = nbuf->next;
1275 	} while (!last_nbuf);
1276 
1277 	qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
1278 	qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
1279 	parent->next = next;
1280 
1281 	qdf_nbuf_pull_head(parent,
1282 			   soc->rx_pkt_tlv_size + l3_hdr_pad_offset);
1283 	return parent;
1284 }
1285 
1286 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1287 
1288 #ifdef QCA_PEER_EXT_STATS
1289 /*
1290  * dp_rx_compute_tid_delay - Computer per TID delay stats
1291  * @peer: DP soc context
1292  * @nbuf: NBuffer
1293  *
1294  * Return: Void
1295  */
1296 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
1297 			     qdf_nbuf_t nbuf)
1298 {
1299 	struct cdp_delay_rx_stats  *rx_delay = &stats->rx_delay;
1300 	uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
1301 
1302 	dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack);
1303 }
1304 #endif /* QCA_PEER_EXT_STATS */
1305 
1306 /**
1307  * dp_rx_compute_delay() - Compute and fill in all timestamps
1308  *				to pass in correct fields
1309  *
1310  * @vdev: pdev handle
1311  * @tx_desc: tx descriptor
1312  * @tid: tid value
1313  * Return: none
1314  */
1315 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1316 {
1317 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
1318 	int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get());
1319 	uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
1320 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
1321 	uint32_t interframe_delay =
1322 		(uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp);
1323 
1324 	dp_update_delay_stats(vdev->pdev, to_stack, tid,
1325 			      CDP_DELAY_STATS_REAP_STACK, ring_id);
1326 	/*
1327 	 * Update interframe delay stats calculated at deliver_data_ol point.
1328 	 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so
1329 	 * interframe delay will not be calculate correctly for 1st frame.
1330 	 * On the other side, this will help in avoiding extra per packet check
1331 	 * of vdev->prev_rx_deliver_tstamp.
1332 	 */
1333 	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
1334 			      CDP_DELAY_STATS_RX_INTERFRAME, ring_id);
1335 	vdev->prev_rx_deliver_tstamp = current_ts;
1336 }
1337 
1338 /**
1339  * dp_rx_drop_nbuf_list() - drop an nbuf list
1340  * @pdev: dp pdev reference
1341  * @buf_list: buffer list to be dropepd
1342  *
1343  * Return: int (number of bufs dropped)
1344  */
1345 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
1346 				       qdf_nbuf_t buf_list)
1347 {
1348 	struct cdp_tid_rx_stats *stats = NULL;
1349 	uint8_t tid = 0, ring_id = 0;
1350 	int num_dropped = 0;
1351 	qdf_nbuf_t buf, next_buf;
1352 
1353 	buf = buf_list;
1354 	while (buf) {
1355 		ring_id = QDF_NBUF_CB_RX_CTX_ID(buf);
1356 		next_buf = qdf_nbuf_queue_next(buf);
1357 		tid = qdf_nbuf_get_tid_val(buf);
1358 		if (qdf_likely(pdev)) {
1359 			stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
1360 			stats->fail_cnt[INVALID_PEER_VDEV]++;
1361 			stats->delivered_to_stack--;
1362 		}
1363 		qdf_nbuf_free(buf);
1364 		buf = next_buf;
1365 		num_dropped++;
1366 	}
1367 
1368 	return num_dropped;
1369 }
1370 
1371 #ifdef QCA_SUPPORT_WDS_EXTENDED
1372 /**
1373  * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta
1374  * @soc: core txrx main context
1375  * @vdev: vdev
1376  * @peer: peer
1377  * @nbuf_head: skb list head
1378  *
1379  * Return: true if packet is delivered to netdev per STA.
1380  */
1381 static inline bool
1382 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
1383 			   struct dp_peer *peer, qdf_nbuf_t nbuf_head)
1384 {
1385 	/*
1386 	 * When extended WDS is disabled, frames are sent to AP netdevice.
1387 	 */
1388 	if (qdf_likely(!vdev->wds_ext_enabled))
1389 		return false;
1390 
1391 	/*
1392 	 * There can be 2 cases:
1393 	 * 1. Send frame to parent netdev if its not for netdev per STA
1394 	 * 2. If frame is meant for netdev per STA:
1395 	 *    a. Send frame to appropriate netdev using registered fp.
1396 	 *    b. If fp is NULL, drop the frames.
1397 	 */
1398 	if (!peer->wds_ext.init)
1399 		return false;
1400 
1401 	if (peer->osif_rx)
1402 		peer->osif_rx(peer->wds_ext.osif_peer, nbuf_head);
1403 	else
1404 		dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
1405 
1406 	return true;
1407 }
1408 
1409 #else
1410 static inline bool
1411 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
1412 			   struct dp_peer *peer, qdf_nbuf_t nbuf_head)
1413 {
1414 	return false;
1415 }
1416 #endif
1417 
1418 #ifdef PEER_CACHE_RX_PKTS
1419 /**
1420  * dp_rx_flush_rx_cached() - flush cached rx frames
1421  * @peer: peer
1422  * @drop: flag to drop frames or forward to net stack
1423  *
1424  * Return: None
1425  */
1426 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
1427 {
1428 	struct dp_peer_cached_bufq *bufqi;
1429 	struct dp_rx_cached_buf *cache_buf = NULL;
1430 	ol_txrx_rx_fp data_rx = NULL;
1431 	int num_buff_elem;
1432 	QDF_STATUS status;
1433 
1434 	if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
1435 		qdf_atomic_dec(&peer->flush_in_progress);
1436 		return;
1437 	}
1438 
1439 	qdf_spin_lock_bh(&peer->peer_info_lock);
1440 	if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx)
1441 		data_rx = peer->vdev->osif_rx;
1442 	else
1443 		drop = true;
1444 	qdf_spin_unlock_bh(&peer->peer_info_lock);
1445 
1446 	bufqi = &peer->bufq_info;
1447 
1448 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1449 	qdf_list_remove_front(&bufqi->cached_bufq,
1450 			      (qdf_list_node_t **)&cache_buf);
1451 	while (cache_buf) {
1452 		num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(
1453 								cache_buf->buf);
1454 		bufqi->entries -= num_buff_elem;
1455 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
1456 		if (drop) {
1457 			bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1458 							      cache_buf->buf);
1459 		} else {
1460 			/* Flush the cached frames to OSIF DEV */
1461 			status = data_rx(peer->vdev->osif_vdev, cache_buf->buf);
1462 			if (status != QDF_STATUS_SUCCESS)
1463 				bufqi->dropped = dp_rx_drop_nbuf_list(
1464 							peer->vdev->pdev,
1465 							cache_buf->buf);
1466 		}
1467 		qdf_mem_free(cache_buf);
1468 		cache_buf = NULL;
1469 		qdf_spin_lock_bh(&bufqi->bufq_lock);
1470 		qdf_list_remove_front(&bufqi->cached_bufq,
1471 				      (qdf_list_node_t **)&cache_buf);
1472 	}
1473 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1474 	qdf_atomic_dec(&peer->flush_in_progress);
1475 }
1476 
1477 /**
1478  * dp_rx_enqueue_rx() - cache rx frames
1479  * @peer: peer
1480  * @rx_buf_list: cache buffer list
1481  *
1482  * Return: None
1483  */
1484 static QDF_STATUS
1485 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1486 {
1487 	struct dp_rx_cached_buf *cache_buf;
1488 	struct dp_peer_cached_bufq *bufqi = &peer->bufq_info;
1489 	int num_buff_elem;
1490 
1491 	dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries,
1492 		    bufqi->dropped);
1493 	if (!peer->valid) {
1494 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1495 						      rx_buf_list);
1496 		return QDF_STATUS_E_INVAL;
1497 	}
1498 
1499 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1500 	if (bufqi->entries >= bufqi->thresh) {
1501 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1502 						      rx_buf_list);
1503 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
1504 		return QDF_STATUS_E_RESOURCES;
1505 	}
1506 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1507 
1508 	num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list);
1509 
1510 	cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf));
1511 	if (!cache_buf) {
1512 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1513 			  "Failed to allocate buf to cache rx frames");
1514 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1515 						      rx_buf_list);
1516 		return QDF_STATUS_E_NOMEM;
1517 	}
1518 
1519 	cache_buf->buf = rx_buf_list;
1520 
1521 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1522 	qdf_list_insert_back(&bufqi->cached_bufq,
1523 			     &cache_buf->node);
1524 	bufqi->entries += num_buff_elem;
1525 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1526 
1527 	return QDF_STATUS_SUCCESS;
1528 }
1529 
1530 static inline
1531 bool dp_rx_is_peer_cache_bufq_supported(void)
1532 {
1533 	return true;
1534 }
1535 #else
1536 static inline
1537 bool dp_rx_is_peer_cache_bufq_supported(void)
1538 {
1539 	return false;
1540 }
1541 
1542 static inline QDF_STATUS
1543 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1544 {
1545 	return QDF_STATUS_SUCCESS;
1546 }
1547 #endif
1548 
1549 #ifndef DELIVERY_TO_STACK_STATUS_CHECK
1550 /**
1551  * dp_rx_check_delivery_to_stack() - Deliver pkts to network
1552  * using the appropriate call back functions.
1553  * @soc: soc
1554  * @vdev: vdev
1555  * @peer: peer
1556  * @nbuf_head: skb list head
1557  * @nbuf_tail: skb list tail
1558  *
1559  * Return: None
1560  */
1561 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
1562 					  struct dp_vdev *vdev,
1563 					  struct dp_peer *peer,
1564 					  qdf_nbuf_t nbuf_head)
1565 {
1566 	if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev,
1567 						    peer, nbuf_head)))
1568 		return;
1569 
1570 	/* Function pointer initialized only when FISA is enabled */
1571 	if (vdev->osif_fisa_rx)
1572 		/* on failure send it via regular path */
1573 		vdev->osif_fisa_rx(soc, vdev, nbuf_head);
1574 	else
1575 		vdev->osif_rx(vdev->osif_vdev, nbuf_head);
1576 }
1577 
1578 #else
1579 /**
1580  * dp_rx_check_delivery_to_stack() - Deliver pkts to network
1581  * using the appropriate call back functions.
1582  * @soc: soc
1583  * @vdev: vdev
1584  * @peer: peer
1585  * @nbuf_head: skb list head
1586  * @nbuf_tail: skb list tail
1587  *
1588  * Check the return status of the call back function and drop
1589  * the packets if the return status indicates a failure.
1590  *
1591  * Return: None
1592  */
1593 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
1594 					  struct dp_vdev *vdev,
1595 					  struct dp_peer *peer,
1596 					  qdf_nbuf_t nbuf_head)
1597 {
1598 	int num_nbuf = 0;
1599 	QDF_STATUS ret_val = QDF_STATUS_E_FAILURE;
1600 
1601 	/* Function pointer initialized only when FISA is enabled */
1602 	if (vdev->osif_fisa_rx)
1603 		/* on failure send it via regular path */
1604 		ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head);
1605 	else if (vdev->osif_rx)
1606 		ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head);
1607 
1608 	if (!QDF_IS_STATUS_SUCCESS(ret_val)) {
1609 		num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
1610 		DP_STATS_INC(soc, rx.err.rejected, num_nbuf);
1611 		if (peer)
1612 			DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf);
1613 	}
1614 }
1615 #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */
1616 
1617 /*
1618  * dp_rx_validate_rx_callbacks() - validate rx callbacks
1619  * @soc DP soc
1620  * @vdev: DP vdev handle
1621  * @peer: pointer to the peer object
1622  * nbuf_head: skb list head
1623  *
1624  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
1625  *			QDF_STATUS_E_FAILURE
1626  */
1627 static inline QDF_STATUS
1628 dp_rx_validate_rx_callbacks(struct dp_soc *soc,
1629 			    struct dp_vdev *vdev,
1630 			    struct dp_peer *peer,
1631 			    qdf_nbuf_t nbuf_head)
1632 {
1633 	int num_nbuf;
1634 
1635 	if (qdf_unlikely(!vdev || vdev->delete.pending)) {
1636 		num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head);
1637 		/*
1638 		 * This is a special case where vdev is invalid,
1639 		 * so we cannot know the pdev to which this packet
1640 		 * belonged. Hence we update the soc rx error stats.
1641 		 */
1642 		DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf);
1643 		return QDF_STATUS_E_FAILURE;
1644 	}
1645 
1646 	/*
1647 	 * highly unlikely to have a vdev without a registered rx
1648 	 * callback function. if so let us free the nbuf_list.
1649 	 */
1650 	if (qdf_unlikely(!vdev->osif_rx)) {
1651 		if (peer && dp_rx_is_peer_cache_bufq_supported()) {
1652 			dp_rx_enqueue_rx(peer, nbuf_head);
1653 		} else {
1654 			num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev,
1655 							nbuf_head);
1656 			DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf);
1657 		}
1658 		return QDF_STATUS_E_FAILURE;
1659 	}
1660 
1661 	return QDF_STATUS_SUCCESS;
1662 }
1663 
1664 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc,
1665 				  struct dp_vdev *vdev,
1666 				  struct dp_peer *peer,
1667 				  qdf_nbuf_t nbuf_head,
1668 				  qdf_nbuf_t nbuf_tail)
1669 {
1670 	if (dp_rx_validate_rx_callbacks(soc, vdev, peer, nbuf_head) !=
1671 					QDF_STATUS_SUCCESS)
1672 		return QDF_STATUS_E_FAILURE;
1673 
1674 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
1675 			(vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
1676 		vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
1677 				&nbuf_tail, peer->mac_addr.raw);
1678 	}
1679 
1680 	dp_rx_check_delivery_to_stack(soc, vdev, peer, nbuf_head);
1681 
1682 	return QDF_STATUS_SUCCESS;
1683 }
1684 
1685 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
1686 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
1687 					struct dp_vdev *vdev,
1688 					struct dp_peer *peer,
1689 					qdf_nbuf_t nbuf_head,
1690 					qdf_nbuf_t nbuf_tail)
1691 {
1692 	if (dp_rx_validate_rx_callbacks(soc, vdev, peer, nbuf_head) !=
1693 					QDF_STATUS_SUCCESS)
1694 		return QDF_STATUS_E_FAILURE;
1695 
1696 	vdev->osif_rx_eapol(vdev->osif_vdev, nbuf_head);
1697 
1698 	return QDF_STATUS_SUCCESS;
1699 }
1700 #endif
1701 
1702 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1703 #ifdef VDEV_PEER_PROTOCOL_COUNT
1704 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer) \
1705 { \
1706 	qdf_nbuf_t nbuf_local; \
1707 	struct dp_peer *peer_local; \
1708 	struct dp_vdev *vdev_local = vdev_hdl; \
1709 	do { \
1710 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
1711 			break; \
1712 		nbuf_local = nbuf; \
1713 		peer_local = peer; \
1714 		if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \
1715 			break; \
1716 		else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \
1717 			break; \
1718 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
1719 						       (nbuf_local), \
1720 						       (peer_local), 0, 1); \
1721 	} while (0); \
1722 }
1723 #else
1724 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer)
1725 #endif
1726 
1727 /**
1728  * dp_rx_msdu_stats_update() - update per msdu stats.
1729  * @soc: core txrx main context
1730  * @nbuf: pointer to the first msdu of an amsdu.
1731  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1732  * @peer: pointer to the peer object.
1733  * @ring_id: reo dest ring number on which pkt is reaped.
1734  * @tid_stats: per tid rx stats.
1735  *
1736  * update all the per msdu stats for that nbuf.
1737  * Return: void
1738  */
1739 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
1740 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer,
1741 			     uint8_t ring_id,
1742 			     struct cdp_tid_rx_stats *tid_stats)
1743 {
1744 	bool is_ampdu, is_not_amsdu;
1745 	uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
1746 	struct dp_vdev *vdev = peer->vdev;
1747 	qdf_ether_header_t *eh;
1748 	uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1749 
1750 	dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, peer);
1751 	is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
1752 			qdf_nbuf_is_rx_chfrag_end(nbuf);
1753 
1754 	DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len);
1755 	DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu);
1756 	DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu);
1757 	DP_STATS_INCC(peer, rx.rx_retries, 1, qdf_nbuf_is_rx_retry_flag(nbuf));
1758 
1759 	tid_stats->msdu_cnt++;
1760 	if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
1761 			 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
1762 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1763 		DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len);
1764 		tid_stats->mcast_msdu_cnt++;
1765 		if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
1766 			DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len);
1767 			tid_stats->bcast_msdu_cnt++;
1768 		}
1769 	}
1770 
1771 	/*
1772 	 * currently we can return from here as we have similar stats
1773 	 * updated at per ppdu level instead of msdu level
1774 	 */
1775 	if (!soc->process_rx_status)
1776 		return;
1777 
1778 	/*
1779 	 * TODO - For WCN7850 this field is present in ring_desc
1780 	 * Try to use ring desc instead of tlv.
1781 	 */
1782 	is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr);
1783 	DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu);
1784 	DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
1785 
1786 	sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr);
1787 	mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
1788 	tid = qdf_nbuf_get_tid_val(nbuf);
1789 	bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr);
1790 	reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
1791 							      rx_tlv_hdr);
1792 	nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
1793 	pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
1794 
1795 	DP_STATS_INCC(peer, rx.rx_mpdu_cnt[mcs], 1,
1796 		      ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
1797 	DP_STATS_INCC(peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1,
1798 		      ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
1799 	DP_STATS_INC(peer, rx.bw[bw], 1);
1800 	/*
1801 	 * only if nss > 0 and pkt_type is 11N/AC/AX,
1802 	 * then increase index [nss - 1] in array counter.
1803 	 */
1804 	if (nss > 0 && (pkt_type == DOT11_N ||
1805 			pkt_type == DOT11_AC ||
1806 			pkt_type == DOT11_AX))
1807 		DP_STATS_INC(peer, rx.nss[nss - 1], 1);
1808 
1809 	DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
1810 	DP_STATS_INCC(peer, rx.err.mic_err, 1,
1811 		      hal_rx_tlv_mic_err_get(soc->hal_soc, rx_tlv_hdr));
1812 	DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
1813 		      hal_rx_tlv_decrypt_err_get(soc->hal_soc, rx_tlv_hdr));
1814 
1815 	DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
1816 	DP_STATS_INC(peer, rx.reception_type[reception_type], 1);
1817 
1818 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1819 		      ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1820 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1821 		      ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1822 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1823 		      ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1824 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1825 		      ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1826 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1827 		      ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1828 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1829 		      ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1830 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1831 		      ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1832 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1833 		      ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1834 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1835 		      ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX)));
1836 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1837 		      ((mcs < MAX_MCS) && (pkt_type == DOT11_AX)));
1838 }
1839 
1840 #ifndef WDS_VENDOR_EXTENSION
1841 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
1842 			   struct dp_vdev *vdev,
1843 			   struct dp_peer *peer)
1844 {
1845 	return 1;
1846 }
1847 #endif
1848 
1849 #ifdef RX_DESC_DEBUG_CHECK
1850 /**
1851  * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
1852  *				  corruption
1853  *
1854  * @ring_desc: REO ring descriptor
1855  * @rx_desc: Rx descriptor
1856  *
1857  * Return: NONE
1858  */
1859 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
1860 					hal_ring_desc_t ring_desc,
1861 					struct dp_rx_desc *rx_desc)
1862 {
1863 	struct hal_buf_info hbi;
1864 
1865 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
1866 	/* Sanity check for possible buffer paddr corruption */
1867 	if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
1868 		return QDF_STATUS_SUCCESS;
1869 
1870 	return QDF_STATUS_E_FAILURE;
1871 }
1872 
1873 /**
1874  * dp_rx_desc_nbuf_len_sanity_check - Add sanity check to catch Rx buffer
1875  *				      out of bound access from H.W
1876  *
1877  * @soc: DP soc
1878  * @pkt_len: Packet length received from H.W
1879  *
1880  * Return: NONE
1881  */
1882 static inline void
1883 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc,
1884 				 uint32_t pkt_len)
1885 {
1886 	struct rx_desc_pool *rx_desc_pool;
1887 
1888 	rx_desc_pool = &soc->rx_desc_buf[0];
1889 	qdf_assert_always(pkt_len <= rx_desc_pool->buf_size);
1890 }
1891 #else
1892 static inline void
1893 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { }
1894 #endif
1895 
1896 #ifdef DP_RX_PKT_NO_PEER_DELIVER
1897 /**
1898  * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
1899  *				      no corresbonding peer found
1900  * @soc: core txrx main context
1901  * @nbuf: pkt skb pointer
1902  *
1903  * This function will try to deliver some RX special frames to stack
1904  * even there is no peer matched found. for instance, LFR case, some
1905  * eapol data will be sent to host before peer_map done.
1906  *
1907  * Return: None
1908  */
1909 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
1910 {
1911 	uint16_t peer_id;
1912 	uint8_t vdev_id;
1913 	struct dp_vdev *vdev = NULL;
1914 	uint32_t l2_hdr_offset = 0;
1915 	uint16_t msdu_len = 0;
1916 	uint32_t pkt_len = 0;
1917 	uint8_t *rx_tlv_hdr;
1918 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
1919 				FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
1920 
1921 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
1922 	if (peer_id > soc->max_peers)
1923 		goto deliver_fail;
1924 
1925 	vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
1926 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX);
1927 	if (!vdev || vdev->delete.pending || !vdev->osif_rx)
1928 		goto deliver_fail;
1929 
1930 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf)))
1931 		goto deliver_fail;
1932 
1933 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
1934 	l2_hdr_offset =
1935 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
1936 
1937 	msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1938 	pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
1939 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
1940 
1941 	qdf_nbuf_set_pktlen(nbuf, pkt_len);
1942 	qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + l2_hdr_offset);
1943 
1944 	if (dp_rx_is_special_frame(nbuf, frame_mask)) {
1945 		qdf_nbuf_set_exc_frame(nbuf, 1);
1946 		if (QDF_STATUS_SUCCESS !=
1947 		    vdev->osif_rx(vdev->osif_vdev, nbuf))
1948 			goto deliver_fail;
1949 		DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1);
1950 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
1951 		return;
1952 	}
1953 
1954 deliver_fail:
1955 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1956 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
1957 	qdf_nbuf_free(nbuf);
1958 	if (vdev)
1959 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
1960 }
1961 #else
1962 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
1963 {
1964 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1965 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
1966 	qdf_nbuf_free(nbuf);
1967 }
1968 #endif
1969 
1970 /**
1971  * dp_rx_srng_get_num_pending() - get number of pending entries
1972  * @hal_soc: hal soc opaque pointer
1973  * @hal_ring: opaque pointer to the HAL Rx Ring
1974  * @num_entries: number of entries in the hal_ring.
1975  * @near_full: pointer to a boolean. This is set if ring is near full.
1976  *
1977  * The function returns the number of entries in a destination ring which are
1978  * yet to be reaped. The function also checks if the ring is near full.
1979  * If more than half of the ring needs to be reaped, the ring is considered
1980  * approaching full.
1981  * The function useses hal_srng_dst_num_valid_locked to get the number of valid
1982  * entries. It should not be called within a SRNG lock. HW pointer value is
1983  * synced into cached_hp.
1984  *
1985  * Return: Number of pending entries if any
1986  */
1987 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
1988 				    hal_ring_handle_t hal_ring_hdl,
1989 				    uint32_t num_entries,
1990 				    bool *near_full)
1991 {
1992 	uint32_t num_pending = 0;
1993 
1994 	num_pending = hal_srng_dst_num_valid_locked(hal_soc,
1995 						    hal_ring_hdl,
1996 						    true);
1997 
1998 	if (num_entries && (num_pending >= num_entries >> 1))
1999 		*near_full = true;
2000 	else
2001 		*near_full = false;
2002 
2003 	return num_pending;
2004 }
2005 
2006 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2007 
2008 #ifdef WLAN_SUPPORT_RX_FISA
2009 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding)
2010 {
2011 	QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
2012 	qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size);
2013 }
2014 
2015 /**
2016  * dp_rx_set_hdr_pad() - set l3 padding in nbuf cb
2017  * @nbuf: pkt skb pointer
2018  * @l3_padding: l3 padding
2019  *
2020  * Return: None
2021  */
2022 static inline
2023 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
2024 {
2025 	QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
2026 }
2027 #else
2028 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding)
2029 {
2030 	qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size);
2031 }
2032 
2033 static inline
2034 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
2035 {
2036 }
2037 #endif
2038 
2039 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2040 
2041 #ifdef DP_RX_DROP_RAW_FRM
2042 /**
2043  * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop
2044  * @nbuf: pkt skb pointer
2045  *
2046  * Return: true - raw frame, dropped
2047  *	   false - not raw frame, do nothing
2048  */
2049 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
2050 {
2051 	if (qdf_nbuf_is_raw_frame(nbuf)) {
2052 		qdf_nbuf_free(nbuf);
2053 		return true;
2054 	}
2055 
2056 	return false;
2057 }
2058 #endif
2059 
2060 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
2061 /**
2062  * dp_rx_ring_record_entry() - Record an entry into the rx ring history.
2063  * @soc: Datapath soc structure
2064  * @ring_num: REO ring number
2065  * @ring_desc: REO ring descriptor
2066  *
2067  * Returns: None
2068  */
2069 void
2070 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
2071 			hal_ring_desc_t ring_desc)
2072 {
2073 	struct dp_buf_info_record *record;
2074 	struct hal_buf_info hbi;
2075 	uint32_t idx;
2076 
2077 	if (qdf_unlikely(!soc->rx_ring_history[ring_num]))
2078 		return;
2079 
2080 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
2081 
2082 	/* buffer_addr_info is the first element of ring_desc */
2083 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc,
2084 				  &hbi);
2085 
2086 	idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index,
2087 					DP_RX_HIST_MAX);
2088 
2089 	/* No NULL check needed for record since its an array */
2090 	record = &soc->rx_ring_history[ring_num]->entry[idx];
2091 
2092 	record->timestamp = qdf_get_log_timestamp();
2093 	record->hbi.paddr = hbi.paddr;
2094 	record->hbi.sw_cookie = hbi.sw_cookie;
2095 	record->hbi.rbm = hbi.rbm;
2096 }
2097 #endif
2098 
2099 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
2100 /**
2101  * dp_rx_update_stats() - Update soc level rx packet count
2102  * @soc: DP soc handle
2103  * @nbuf: nbuf received
2104  *
2105  * Returns: none
2106  */
2107 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf)
2108 {
2109 	DP_STATS_INC_PKT(soc, rx.ingress, 1,
2110 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
2111 }
2112 #endif
2113 
2114 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
2115 /**
2116  * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture
2117  * @soc : dp_soc handle
2118  * @pdev: dp_pdev handle
2119  * @peer_id: peer_id of the peer for which completion came
2120  * @ppdu_id: ppdu_id
2121  * @netbuf: Buffer pointer
2122  *
2123  * This function is used to deliver rx packet to packet capture
2124  */
2125 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc,  struct dp_pdev *pdev,
2126 				  uint16_t peer_id, uint32_t is_offload,
2127 				  qdf_nbuf_t netbuf)
2128 {
2129 	if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
2130 		dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf,
2131 				     peer_id, is_offload, pdev->pdev_id);
2132 }
2133 
2134 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
2135 					  uint32_t is_offload)
2136 {
2137 	if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
2138 		dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA_NO_PEER,
2139 				     soc, nbuf, HTT_INVALID_VDEV,
2140 				     is_offload, 0);
2141 }
2142 #endif
2143 
2144 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2145 
2146 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
2147 {
2148 	QDF_STATUS ret;
2149 
2150 	if (vdev->osif_rx_flush) {
2151 		ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id);
2152 		if (!QDF_IS_STATUS_SUCCESS(ret)) {
2153 			dp_err("Failed to flush rx pkts for vdev %d\n",
2154 			       vdev->vdev_id);
2155 			return ret;
2156 		}
2157 	}
2158 
2159 	return QDF_STATUS_SUCCESS;
2160 }
2161 
2162 static QDF_STATUS
2163 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc,
2164 			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
2165 			   struct dp_pdev *dp_pdev,
2166 			   struct rx_desc_pool *rx_desc_pool)
2167 {
2168 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
2169 
2170 	(nbuf_frag_info_t->virt_addr).nbuf =
2171 		qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size,
2172 			       RX_BUFFER_RESERVATION,
2173 			       rx_desc_pool->buf_alignment, FALSE);
2174 	if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
2175 		dp_err("nbuf alloc failed");
2176 		DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
2177 		return ret;
2178 	}
2179 
2180 	ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
2181 					 (nbuf_frag_info_t->virt_addr).nbuf,
2182 					 QDF_DMA_FROM_DEVICE,
2183 					 rx_desc_pool->buf_size);
2184 
2185 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
2186 		qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf);
2187 		dp_err("nbuf map failed");
2188 		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
2189 		return ret;
2190 	}
2191 
2192 	nbuf_frag_info_t->paddr =
2193 		qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
2194 
2195 	ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
2196 			     &nbuf_frag_info_t->paddr,
2197 			     rx_desc_pool);
2198 	if (ret == QDF_STATUS_E_FAILURE) {
2199 		dp_err("nbuf check x86 failed");
2200 		DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
2201 		return ret;
2202 	}
2203 
2204 	return QDF_STATUS_SUCCESS;
2205 }
2206 
2207 QDF_STATUS
2208 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
2209 			  struct dp_srng *dp_rxdma_srng,
2210 			  struct rx_desc_pool *rx_desc_pool,
2211 			  uint32_t num_req_buffers)
2212 {
2213 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
2214 	hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng;
2215 	union dp_rx_desc_list_elem_t *next;
2216 	void *rxdma_ring_entry;
2217 	qdf_dma_addr_t paddr;
2218 	struct dp_rx_nbuf_frag_info *nf_info;
2219 	uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0;
2220 	uint32_t buffer_index, nbuf_ptrs_per_page;
2221 	qdf_nbuf_t nbuf;
2222 	QDF_STATUS ret;
2223 	int page_idx, total_pages;
2224 	union dp_rx_desc_list_elem_t *desc_list = NULL;
2225 	union dp_rx_desc_list_elem_t *tail = NULL;
2226 	int sync_hw_ptr = 1;
2227 	uint32_t num_entries_avail;
2228 
2229 	if (qdf_unlikely(!dp_pdev)) {
2230 		dp_rx_err("%pK: pdev is null for mac_id = %d",
2231 			  dp_soc, mac_id);
2232 		return QDF_STATUS_E_FAILURE;
2233 	}
2234 
2235 	if (qdf_unlikely(!rxdma_srng)) {
2236 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
2237 		return QDF_STATUS_E_FAILURE;
2238 	}
2239 
2240 	dp_debug("requested %u RX buffers for driver attach", num_req_buffers);
2241 
2242 	hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
2243 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
2244 						   rxdma_srng,
2245 						   sync_hw_ptr);
2246 	hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
2247 
2248 	if (!num_entries_avail) {
2249 		dp_err("Num of available entries is zero, nothing to do");
2250 		return QDF_STATUS_E_NOMEM;
2251 	}
2252 
2253 	if (num_entries_avail < num_req_buffers)
2254 		num_req_buffers = num_entries_avail;
2255 
2256 	nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool,
2257 					    num_req_buffers, &desc_list, &tail);
2258 	if (!nr_descs) {
2259 		dp_err("no free rx_descs in freelist");
2260 		DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
2261 		return QDF_STATUS_E_NOMEM;
2262 	}
2263 
2264 	dp_debug("got %u RX descs for driver attach", nr_descs);
2265 
2266 	/*
2267 	 * Try to allocate pointers to the nbuf one page at a time.
2268 	 * Take pointers that can fit in one page of memory and
2269 	 * iterate through the total descriptors that need to be
2270 	 * allocated in order of pages. Reuse the pointers that
2271 	 * have been allocated to fit in one page across each
2272 	 * iteration to index into the nbuf.
2273 	 */
2274 	total_pages = (nr_descs * sizeof(*nf_info)) / PAGE_SIZE;
2275 
2276 	/*
2277 	 * Add an extra page to store the remainder if any
2278 	 */
2279 	if ((nr_descs * sizeof(*nf_info)) % PAGE_SIZE)
2280 		total_pages++;
2281 	nf_info = qdf_mem_malloc(PAGE_SIZE);
2282 	if (!nf_info) {
2283 		dp_err("failed to allocate nbuf array");
2284 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
2285 		QDF_BUG(0);
2286 		return QDF_STATUS_E_NOMEM;
2287 	}
2288 	nbuf_ptrs_per_page = PAGE_SIZE / sizeof(*nf_info);
2289 
2290 	for (page_idx = 0; page_idx < total_pages; page_idx++) {
2291 		qdf_mem_zero(nf_info, PAGE_SIZE);
2292 
2293 		for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) {
2294 			/*
2295 			 * The last page of buffer pointers may not be required
2296 			 * completely based on the number of descriptors. Below
2297 			 * check will ensure we are allocating only the
2298 			 * required number of descriptors.
2299 			 */
2300 			if (nr_nbuf_total >= nr_descs)
2301 				break;
2302 			/* Flag is set while pdev rx_desc_pool initialization */
2303 			if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
2304 				ret = dp_pdev_frag_alloc_and_map(dp_soc,
2305 						&nf_info[nr_nbuf], dp_pdev,
2306 						rx_desc_pool);
2307 			else
2308 				ret = dp_pdev_nbuf_alloc_and_map(dp_soc,
2309 						&nf_info[nr_nbuf], dp_pdev,
2310 						rx_desc_pool);
2311 			if (QDF_IS_STATUS_ERROR(ret))
2312 				break;
2313 
2314 			nr_nbuf_total++;
2315 		}
2316 
2317 		hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
2318 
2319 		for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) {
2320 			rxdma_ring_entry =
2321 				hal_srng_src_get_next(dp_soc->hal_soc,
2322 						      rxdma_srng);
2323 			qdf_assert_always(rxdma_ring_entry);
2324 
2325 			next = desc_list->next;
2326 			paddr = nf_info[buffer_index].paddr;
2327 			nbuf = nf_info[buffer_index].virt_addr.nbuf;
2328 
2329 			/* Flag is set while pdev rx_desc_pool initialization */
2330 			if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
2331 				dp_rx_desc_frag_prep(&desc_list->rx_desc,
2332 						     &nf_info[buffer_index]);
2333 			else
2334 				dp_rx_desc_prep(&desc_list->rx_desc,
2335 						&nf_info[buffer_index]);
2336 			desc_list->rx_desc.in_use = 1;
2337 			dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc);
2338 			dp_rx_desc_update_dbg_info(&desc_list->rx_desc,
2339 						   __func__,
2340 						   RX_DESC_REPLENISHED);
2341 
2342 			hal_rxdma_buff_addr_info_set(dp_soc->hal_soc ,rxdma_ring_entry, paddr,
2343 						     desc_list->rx_desc.cookie,
2344 						     rx_desc_pool->owner);
2345 			dp_ipa_handle_rx_buf_smmu_mapping(
2346 						dp_soc, nbuf,
2347 						rx_desc_pool->buf_size,
2348 						true);
2349 
2350 			desc_list = next;
2351 		}
2352 
2353 		dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id,
2354 					       rxdma_srng, nr_nbuf, nr_nbuf);
2355 		hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
2356 	}
2357 
2358 	dp_info("filled %u RX buffers for driver attach", nr_nbuf_total);
2359 	qdf_mem_free(nf_info);
2360 
2361 	if (!nr_nbuf_total) {
2362 		dp_err("No nbuf's allocated");
2363 		QDF_BUG(0);
2364 		return QDF_STATUS_E_RESOURCES;
2365 	}
2366 
2367 	/* No need to count the number of bytes received during replenish.
2368 	 * Therefore set replenish.pkts.bytes as 0.
2369 	 */
2370 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0);
2371 
2372 	return QDF_STATUS_SUCCESS;
2373 }
2374 
2375 qdf_export_symbol(dp_pdev_rx_buffers_attach);
2376 
2377 /**
2378  * dp_rx_enable_mon_dest_frag() - Enable frag processing for
2379  *              monitor destination ring via frag.
2380  *
2381  * Enable this flag only for monitor destination buffer processing
2382  * if DP_RX_MON_MEM_FRAG feature is enabled.
2383  * If flag is set then frag based function will be called for alloc,
2384  * map, prep desc and free ops for desc buffer else normal nbuf based
2385  * function will be called.
2386  *
2387  * @rx_desc_pool: Rx desc pool
2388  * @is_mon_dest_desc: Is it for monitor dest buffer
2389  *
2390  * Return: None
2391  */
2392 #ifdef DP_RX_MON_MEM_FRAG
2393 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
2394 				bool is_mon_dest_desc)
2395 {
2396 	rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc;
2397 	if (is_mon_dest_desc)
2398 		dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled");
2399 }
2400 #else
2401 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
2402 				bool is_mon_dest_desc)
2403 {
2404 	rx_desc_pool->rx_mon_dest_frag_enable = false;
2405 	if (is_mon_dest_desc)
2406 		dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled");
2407 }
2408 #endif
2409 
2410 qdf_export_symbol(dp_rx_enable_mon_dest_frag);
2411 
2412 /*
2413  * dp_rx_pdev_desc_pool_alloc() -  allocate memory for software rx descriptor
2414  *				   pool
2415  *
2416  * @pdev: core txrx pdev context
2417  *
2418  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
2419  *			QDF_STATUS_E_NOMEM
2420  */
2421 QDF_STATUS
2422 dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev)
2423 {
2424 	struct dp_soc *soc = pdev->soc;
2425 	uint32_t rxdma_entries;
2426 	uint32_t rx_sw_desc_num;
2427 	struct dp_srng *dp_rxdma_srng;
2428 	struct rx_desc_pool *rx_desc_pool;
2429 	uint32_t status = QDF_STATUS_SUCCESS;
2430 	int mac_for_pdev;
2431 
2432 	mac_for_pdev = pdev->lmac_id;
2433 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
2434 		dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d",
2435 			   soc, mac_for_pdev);
2436 		return status;
2437 	}
2438 
2439 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
2440 	rxdma_entries = dp_rxdma_srng->num_entries;
2441 
2442 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2443 	rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
2444 
2445 	rx_desc_pool->desc_type = DP_RX_DESC_BUF_TYPE;
2446 	status = dp_rx_desc_pool_alloc(soc,
2447 				       rx_sw_desc_num,
2448 				       rx_desc_pool);
2449 	if (status != QDF_STATUS_SUCCESS)
2450 		return status;
2451 
2452 	return status;
2453 }
2454 
2455 /*
2456  * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool
2457  *
2458  * @pdev: core txrx pdev context
2459  */
2460 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev)
2461 {
2462 	int mac_for_pdev = pdev->lmac_id;
2463 	struct dp_soc *soc = pdev->soc;
2464 	struct rx_desc_pool *rx_desc_pool;
2465 
2466 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2467 
2468 	dp_rx_desc_pool_free(soc, rx_desc_pool);
2469 }
2470 
2471 /*
2472  * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors
2473  *
2474  * @pdev: core txrx pdev context
2475  *
2476  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
2477  *			QDF_STATUS_E_NOMEM
2478  */
2479 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev)
2480 {
2481 	int mac_for_pdev = pdev->lmac_id;
2482 	struct dp_soc *soc = pdev->soc;
2483 	uint32_t rxdma_entries;
2484 	uint32_t rx_sw_desc_num;
2485 	struct dp_srng *dp_rxdma_srng;
2486 	struct rx_desc_pool *rx_desc_pool;
2487 
2488 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2489 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
2490 		/**
2491 		 * If NSS is enabled, rx_desc_pool is already filled.
2492 		 * Hence, just disable desc_pool frag flag.
2493 		 */
2494 		dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
2495 
2496 		dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d",
2497 			   soc, mac_for_pdev);
2498 		return QDF_STATUS_SUCCESS;
2499 	}
2500 
2501 	if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM)
2502 		return QDF_STATUS_E_NOMEM;
2503 
2504 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
2505 	rxdma_entries = dp_rxdma_srng->num_entries;
2506 
2507 	soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
2508 
2509 	rx_sw_desc_num =
2510 	wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
2511 
2512 	rx_desc_pool->owner = DP_WBM2SW_RBM(soc->wbm_sw0_bm_id);
2513 	rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
2514 	rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
2515 	/* Disable monitor dest processing via frag */
2516 	dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
2517 
2518 	dp_rx_desc_pool_init(soc, mac_for_pdev,
2519 			     rx_sw_desc_num, rx_desc_pool);
2520 	return QDF_STATUS_SUCCESS;
2521 }
2522 
2523 /*
2524  * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools
2525  * @pdev: core txrx pdev context
2526  *
2527  * This function resets the freelist of rx descriptors and destroys locks
2528  * associated with this list of descriptors.
2529  */
2530 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev)
2531 {
2532 	int mac_for_pdev = pdev->lmac_id;
2533 	struct dp_soc *soc = pdev->soc;
2534 	struct rx_desc_pool *rx_desc_pool;
2535 
2536 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2537 
2538 	dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev);
2539 }
2540 
2541 /*
2542  * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring
2543  *
2544  * @pdev: core txrx pdev context
2545  *
2546  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
2547  *			QDF_STATUS_E_NOMEM
2548  */
2549 QDF_STATUS
2550 dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev)
2551 {
2552 	int mac_for_pdev = pdev->lmac_id;
2553 	struct dp_soc *soc = pdev->soc;
2554 	struct dp_srng *dp_rxdma_srng;
2555 	struct rx_desc_pool *rx_desc_pool;
2556 	uint32_t rxdma_entries;
2557 
2558 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
2559 	rxdma_entries = dp_rxdma_srng->num_entries;
2560 
2561 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2562 
2563 	/* Initialize RX buffer pool which will be
2564 	 * used during low memory conditions
2565 	 */
2566 	dp_rx_buffer_pool_init(soc, mac_for_pdev);
2567 
2568 	return dp_pdev_rx_buffers_attach(soc, mac_for_pdev, dp_rxdma_srng,
2569 					 rx_desc_pool, rxdma_entries - 1);
2570 }
2571 
2572 /*
2573  * dp_rx_pdev_buffers_free - Free nbufs (skbs)
2574  *
2575  * @pdev: core txrx pdev context
2576  */
2577 void
2578 dp_rx_pdev_buffers_free(struct dp_pdev *pdev)
2579 {
2580 	int mac_for_pdev = pdev->lmac_id;
2581 	struct dp_soc *soc = pdev->soc;
2582 	struct rx_desc_pool *rx_desc_pool;
2583 
2584 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2585 
2586 	dp_rx_desc_nbuf_free(soc, rx_desc_pool);
2587 	dp_rx_buffer_pool_deinit(soc, mac_for_pdev);
2588 }
2589 
2590 #ifdef DP_RX_SPECIAL_FRAME_NEED
2591 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
2592 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
2593 				 uint8_t *rx_tlv_hdr)
2594 {
2595 	uint32_t l2_hdr_offset = 0;
2596 	uint16_t msdu_len = 0;
2597 	uint32_t skip_len;
2598 
2599 	l2_hdr_offset =
2600 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
2601 
2602 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
2603 		skip_len = l2_hdr_offset;
2604 	} else {
2605 		msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
2606 		skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
2607 		qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
2608 	}
2609 
2610 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
2611 	dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
2612 	qdf_nbuf_pull_head(nbuf, skip_len);
2613 
2614 	if (dp_rx_is_special_frame(nbuf, frame_mask)) {
2615 		dp_info("special frame, mpdu sn 0x%x",
2616 			hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
2617 		qdf_nbuf_set_exc_frame(nbuf, 1);
2618 		dp_rx_deliver_to_stack(soc, peer->vdev, peer,
2619 				       nbuf, NULL);
2620 		return true;
2621 	}
2622 
2623 	return false;
2624 }
2625 #endif
2626