xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.c (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_rx.h"
22 #include "dp_peer.h"
23 #include "hal_rx.h"
24 #include "hal_api.h"
25 #include "qdf_nbuf.h"
26 #ifdef MESH_MODE_SUPPORT
27 #include "if_meta_hdr.h"
28 #endif
29 #include "dp_internal.h"
30 #include "dp_rx_mon.h"
31 #include "dp_ipa.h"
32 #ifdef FEATURE_WDS
33 #include "dp_txrx_wds.h"
34 #endif
35 #include "dp_hist.h"
36 #include "dp_rx_buffer_pool.h"
37 
38 #ifdef ATH_RX_PRI_SAVE
39 #define DP_RX_TID_SAVE(_nbuf, _tid) \
40 	(qdf_nbuf_set_priority(_nbuf, _tid))
41 #else
42 #define DP_RX_TID_SAVE(_nbuf, _tid)
43 #endif
44 
45 #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING
46 static inline
47 bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
48 {
49 	if (ta_peer->vdev->opmode == wlan_op_mode_ndi &&
50 	    qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) {
51 		DP_STATS_INC(ta_peer, rx.intra_bss.mdns_no_fwd, 1);
52 		return false;
53 	}
54 		return true;
55 }
56 #else
57 static inline
58 bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
59 {
60 	return true;
61 }
62 #endif
63 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
64 {
65 	return vdev->ap_bridge_enabled;
66 }
67 
68 #ifdef DUP_RX_DESC_WAR
69 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
70 				hal_ring_handle_t hal_ring,
71 				hal_ring_desc_t ring_desc,
72 				struct dp_rx_desc *rx_desc)
73 {
74 	void *hal_soc = soc->hal_soc;
75 
76 	hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc);
77 	dp_rx_desc_dump(rx_desc);
78 }
79 #else
80 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
81 				hal_ring_handle_t hal_ring_hdl,
82 				hal_ring_desc_t ring_desc,
83 				struct dp_rx_desc *rx_desc)
84 {
85 	hal_soc_handle_t hal_soc = soc->hal_soc;
86 
87 	dp_rx_desc_dump(rx_desc);
88 	hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc);
89 	hal_srng_dump_ring(hal_soc, hal_ring_hdl);
90 	qdf_assert_always(0);
91 }
92 #endif
93 
94 #ifdef RX_DESC_SANITY_WAR
95 static inline
96 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
97 			     hal_ring_handle_t hal_ring_hdl,
98 			     hal_ring_desc_t ring_desc,
99 			     struct dp_rx_desc *rx_desc)
100 {
101 	uint8_t return_buffer_manager;
102 
103 	if (qdf_unlikely(!rx_desc)) {
104 		/*
105 		 * This is an unlikely case where the cookie obtained
106 		 * from the ring_desc is invalid and hence we are not
107 		 * able to find the corresponding rx_desc
108 		 */
109 		goto fail;
110 	}
111 
112 	return_buffer_manager = hal_rx_ret_buf_manager_get(ring_desc);
113 	if (qdf_unlikely(!(return_buffer_manager == HAL_RX_BUF_RBM_SW1_BM ||
114 			 return_buffer_manager == HAL_RX_BUF_RBM_SW3_BM))) {
115 		goto fail;
116 	}
117 
118 	return QDF_STATUS_SUCCESS;
119 
120 fail:
121 	DP_STATS_INC(soc, rx.err.invalid_cookie, 1);
122 	dp_err("Ring Desc:");
123 	hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl,
124 				ring_desc);
125 	return QDF_STATUS_E_NULL_VALUE;
126 
127 }
128 #else
129 static inline
130 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
131 			     hal_ring_handle_t hal_ring_hdl,
132 			     hal_ring_desc_t ring_desc,
133 			     struct dp_rx_desc *rx_desc)
134 {
135 	return QDF_STATUS_SUCCESS;
136 }
137 #endif
138 
139 /**
140  * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map
141  *
142  * @dp_soc: struct dp_soc *
143  * @nbuf_frag_info_t: nbuf frag info
144  * @dp_pdev: struct dp_pdev *
145  * @rx_desc_pool: Rx desc pool
146  *
147  * Return: QDF_STATUS
148  */
149 #ifdef DP_RX_MON_MEM_FRAG
150 static inline QDF_STATUS
151 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
152 			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
153 			   struct dp_pdev *dp_pdev,
154 			   struct rx_desc_pool *rx_desc_pool)
155 {
156 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
157 
158 	(nbuf_frag_info_t->virt_addr).vaddr =
159 			qdf_frag_alloc(rx_desc_pool->buf_size);
160 
161 	if (!((nbuf_frag_info_t->virt_addr).vaddr)) {
162 		dp_err("Frag alloc failed");
163 		DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1);
164 		return QDF_STATUS_E_NOMEM;
165 	}
166 
167 	ret = qdf_mem_map_page(dp_soc->osdev,
168 			       (nbuf_frag_info_t->virt_addr).vaddr,
169 			       QDF_DMA_FROM_DEVICE,
170 			       rx_desc_pool->buf_size,
171 			       &nbuf_frag_info_t->paddr);
172 
173 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
174 		qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr);
175 		dp_err("Frag map failed");
176 		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
177 		return QDF_STATUS_E_FAULT;
178 	}
179 
180 	return QDF_STATUS_SUCCESS;
181 }
182 #else
183 static inline QDF_STATUS
184 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
185 			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
186 			   struct dp_pdev *dp_pdev,
187 			   struct rx_desc_pool *rx_desc_pool)
188 {
189 	return QDF_STATUS_SUCCESS;
190 }
191 #endif /* DP_RX_MON_MEM_FRAG */
192 
193 /**
194  * dp_pdev_nbuf_alloc_and_map() - Allocate nbuf for desc buffer and map
195  *
196  * @dp_soc: struct dp_soc *
197  * @mac_id: Mac id
198  * @num_entries_avail: num_entries_avail
199  * @nbuf_frag_info_t: nbuf frag info
200  * @dp_pdev: struct dp_pdev *
201  * @rx_desc_pool: Rx desc pool
202  *
203  * Return: QDF_STATUS
204  */
205 static inline QDF_STATUS
206 dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc,
207 				     uint32_t mac_id,
208 				     uint32_t num_entries_avail,
209 				     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
210 				     struct dp_pdev *dp_pdev,
211 				     struct rx_desc_pool *rx_desc_pool)
212 {
213 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
214 
215 	(nbuf_frag_info_t->virt_addr).nbuf =
216 		dp_rx_buffer_pool_nbuf_alloc(dp_soc,
217 					     mac_id,
218 					     rx_desc_pool,
219 					     num_entries_avail);
220 	if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
221 		dp_err("nbuf alloc failed");
222 		DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
223 		return QDF_STATUS_E_NOMEM;
224 	}
225 
226 	ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
227 					 (nbuf_frag_info_t->virt_addr).nbuf,
228 					 QDF_DMA_FROM_DEVICE,
229 					 rx_desc_pool->buf_size);
230 
231 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
232 		dp_rx_buffer_pool_nbuf_free(dp_soc,
233 			(nbuf_frag_info_t->virt_addr).nbuf, mac_id);
234 		dp_err("nbuf map failed");
235 		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
236 		return QDF_STATUS_E_FAULT;
237 	}
238 
239 	nbuf_frag_info_t->paddr =
240 		qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
241 
242 	dp_ipa_handle_rx_buf_smmu_mapping(dp_soc,
243 			(qdf_nbuf_t)((nbuf_frag_info_t->virt_addr).nbuf),
244 					  rx_desc_pool->buf_size,
245 					  true);
246 
247 	ret = check_x86_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
248 			      &nbuf_frag_info_t->paddr,
249 			      rx_desc_pool);
250 	if (ret == QDF_STATUS_E_FAILURE) {
251 		qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
252 					     (nbuf_frag_info_t->virt_addr).nbuf,
253 					     QDF_DMA_FROM_DEVICE,
254 					     rx_desc_pool->buf_size);
255 		DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
256 		return QDF_STATUS_E_ADDRNOTAVAIL;
257 	}
258 
259 	return QDF_STATUS_SUCCESS;
260 }
261 
262 /*
263  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
264  *			       called during dp rx initialization
265  *			       and at the end of dp_rx_process.
266  *
267  * @soc: core txrx main context
268  * @mac_id: mac_id which is one of 3 mac_ids
269  * @dp_rxdma_srng: dp rxdma circular ring
270  * @rx_desc_pool: Pointer to free Rx descriptor pool
271  * @num_req_buffers: number of buffer to be replenished
272  * @desc_list: list of descs if called from dp_rx_process
273  *	       or NULL during dp rx initialization or out of buffer
274  *	       interrupt.
275  * @tail: tail of descs list
276  * @func_name: name of the caller function
277  * Return: return success or failure
278  */
279 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
280 				struct dp_srng *dp_rxdma_srng,
281 				struct rx_desc_pool *rx_desc_pool,
282 				uint32_t num_req_buffers,
283 				union dp_rx_desc_list_elem_t **desc_list,
284 				union dp_rx_desc_list_elem_t **tail,
285 				const char *func_name)
286 {
287 	uint32_t num_alloc_desc;
288 	uint16_t num_desc_to_free = 0;
289 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
290 	uint32_t num_entries_avail;
291 	uint32_t count;
292 	int sync_hw_ptr = 1;
293 	struct dp_rx_nbuf_frag_info nbuf_frag_info = {0};
294 	void *rxdma_ring_entry;
295 	union dp_rx_desc_list_elem_t *next;
296 	QDF_STATUS ret;
297 	void *rxdma_srng;
298 
299 	rxdma_srng = dp_rxdma_srng->hal_srng;
300 
301 	if (!rxdma_srng) {
302 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
303 				  "rxdma srng not initialized");
304 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
305 		return QDF_STATUS_E_FAILURE;
306 	}
307 
308 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
309 		"requested %d buffers for replenish", num_req_buffers);
310 
311 	hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
312 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
313 						   rxdma_srng,
314 						   sync_hw_ptr);
315 
316 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
317 		"no of available entries in rxdma ring: %d",
318 		num_entries_avail);
319 
320 	if (!(*desc_list) && (num_entries_avail >
321 		((dp_rxdma_srng->num_entries * 3) / 4))) {
322 		num_req_buffers = num_entries_avail;
323 	} else if (num_entries_avail < num_req_buffers) {
324 		num_desc_to_free = num_req_buffers - num_entries_avail;
325 		num_req_buffers = num_entries_avail;
326 	}
327 
328 	if (qdf_unlikely(!num_req_buffers)) {
329 		num_desc_to_free = num_req_buffers;
330 		hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
331 		goto free_descs;
332 	}
333 
334 	/*
335 	 * if desc_list is NULL, allocate the descs from freelist
336 	 */
337 	if (!(*desc_list)) {
338 		num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
339 							  rx_desc_pool,
340 							  num_req_buffers,
341 							  desc_list,
342 							  tail);
343 
344 		if (!num_alloc_desc) {
345 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
346 				"no free rx_descs in freelist");
347 			DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
348 					num_req_buffers);
349 			hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
350 			return QDF_STATUS_E_NOMEM;
351 		}
352 
353 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
354 			"%d rx desc allocated", num_alloc_desc);
355 		num_req_buffers = num_alloc_desc;
356 	}
357 
358 
359 	count = 0;
360 
361 	while (count < num_req_buffers) {
362 		/* Flag is set while pdev rx_desc_pool initialization */
363 		if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
364 			ret = dp_pdev_frag_alloc_and_map(dp_soc,
365 							 &nbuf_frag_info,
366 							 dp_pdev,
367 							 rx_desc_pool);
368 		else
369 			ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc,
370 								   mac_id,
371 					num_entries_avail, &nbuf_frag_info,
372 					dp_pdev, rx_desc_pool);
373 
374 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
375 			if (qdf_unlikely(ret  == QDF_STATUS_E_FAULT))
376 				continue;
377 			break;
378 		}
379 
380 		count++;
381 
382 		rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
383 							 rxdma_srng);
384 		qdf_assert_always(rxdma_ring_entry);
385 
386 		next = (*desc_list)->next;
387 
388 		/* Flag is set while pdev rx_desc_pool initialization */
389 		if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
390 			dp_rx_desc_frag_prep(&((*desc_list)->rx_desc),
391 					     &nbuf_frag_info);
392 		else
393 			dp_rx_desc_prep(&((*desc_list)->rx_desc),
394 					&nbuf_frag_info);
395 
396 		/* rx_desc.in_use should be zero at this time*/
397 		qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
398 
399 		(*desc_list)->rx_desc.in_use = 1;
400 		(*desc_list)->rx_desc.in_err_state = 0;
401 		dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc,
402 					   func_name, RX_DESC_REPLENISHED);
403 		dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d",
404 				 nbuf_frag_info.virt_addr.nbuf,
405 				 (unsigned long long)(nbuf_frag_info.paddr),
406 				 (*desc_list)->rx_desc.cookie);
407 
408 		hal_rxdma_buff_addr_info_set(rxdma_ring_entry,
409 					     nbuf_frag_info.paddr,
410 						(*desc_list)->rx_desc.cookie,
411 						rx_desc_pool->owner);
412 
413 		*desc_list = next;
414 
415 	}
416 
417 	hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
418 
419 	dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
420 			 count, num_desc_to_free);
421 
422 	/* No need to count the number of bytes received during replenish.
423 	 * Therefore set replenish.pkts.bytes as 0.
424 	 */
425 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
426 
427 free_descs:
428 	DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
429 	/*
430 	 * add any available free desc back to the free list
431 	 */
432 	if (*desc_list)
433 		dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
434 			mac_id, rx_desc_pool);
435 
436 	return QDF_STATUS_SUCCESS;
437 }
438 
439 /*
440  * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
441  *				pkts to RAW mode simulation to
442  *				decapsulate the pkt.
443  *
444  * @vdev: vdev on which RAW mode is enabled
445  * @nbuf_list: list of RAW pkts to process
446  * @peer: peer object from which the pkt is rx
447  *
448  * Return: void
449  */
450 void
451 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
452 					struct dp_peer *peer)
453 {
454 	qdf_nbuf_t deliver_list_head = NULL;
455 	qdf_nbuf_t deliver_list_tail = NULL;
456 	qdf_nbuf_t nbuf;
457 
458 	nbuf = nbuf_list;
459 	while (nbuf) {
460 		qdf_nbuf_t next = qdf_nbuf_next(nbuf);
461 
462 		DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
463 
464 		DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
465 		DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf));
466 		/*
467 		 * reset the chfrag_start and chfrag_end bits in nbuf cb
468 		 * as this is a non-amsdu pkt and RAW mode simulation expects
469 		 * these bit s to be 0 for non-amsdu pkt.
470 		 */
471 		if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
472 			 qdf_nbuf_is_rx_chfrag_end(nbuf)) {
473 			qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
474 			qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
475 		}
476 
477 		nbuf = next;
478 	}
479 
480 	vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
481 				 &deliver_list_tail, peer->mac_addr.raw);
482 
483 	vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
484 }
485 
486 
487 #ifdef DP_LFR
488 /*
489  * In case of LFR, data of a new peer might be sent up
490  * even before peer is added.
491  */
492 static inline struct dp_vdev *
493 dp_get_vdev_from_peer(struct dp_soc *soc,
494 			uint16_t peer_id,
495 			struct dp_peer *peer,
496 			struct hal_rx_mpdu_desc_info mpdu_desc_info)
497 {
498 	struct dp_vdev *vdev;
499 	uint8_t vdev_id;
500 
501 	if (unlikely(!peer)) {
502 		if (peer_id != HTT_INVALID_PEER) {
503 			vdev_id = DP_PEER_METADATA_VDEV_ID_GET(
504 					mpdu_desc_info.peer_meta_data);
505 			QDF_TRACE(QDF_MODULE_ID_DP,
506 				QDF_TRACE_LEVEL_DEBUG,
507 				FL("PeerID %d not found use vdevID %d"),
508 				peer_id, vdev_id);
509 			vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
510 								  vdev_id);
511 		} else {
512 			QDF_TRACE(QDF_MODULE_ID_DP,
513 				QDF_TRACE_LEVEL_DEBUG,
514 				FL("Invalid PeerID %d"),
515 				peer_id);
516 			return NULL;
517 		}
518 	} else {
519 		vdev = peer->vdev;
520 	}
521 	return vdev;
522 }
523 #else
524 static inline struct dp_vdev *
525 dp_get_vdev_from_peer(struct dp_soc *soc,
526 			uint16_t peer_id,
527 			struct dp_peer *peer,
528 			struct hal_rx_mpdu_desc_info mpdu_desc_info)
529 {
530 	if (unlikely(!peer)) {
531 		QDF_TRACE(QDF_MODULE_ID_DP,
532 			QDF_TRACE_LEVEL_DEBUG,
533 			FL("Peer not found for peerID %d"),
534 			peer_id);
535 		return NULL;
536 	} else {
537 		return peer->vdev;
538 	}
539 }
540 #endif
541 
542 #ifndef FEATURE_WDS
543 static void
544 dp_rx_da_learn(struct dp_soc *soc,
545 	       uint8_t *rx_tlv_hdr,
546 	       struct dp_peer *ta_peer,
547 	       qdf_nbuf_t nbuf)
548 {
549 }
550 #endif
551 /*
552  * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
553  *
554  * @soc: core txrx main context
555  * @ta_peer	: source peer entry
556  * @rx_tlv_hdr	: start address of rx tlvs
557  * @nbuf	: nbuf that has to be intrabss forwarded
558  *
559  * Return: bool: true if it is forwarded else false
560  */
561 static bool
562 dp_rx_intrabss_fwd(struct dp_soc *soc,
563 			struct dp_peer *ta_peer,
564 			uint8_t *rx_tlv_hdr,
565 			qdf_nbuf_t nbuf,
566 			struct hal_rx_msdu_metadata msdu_metadata)
567 {
568 	uint16_t len;
569 	uint8_t is_frag;
570 	struct dp_peer *da_peer;
571 	struct dp_ast_entry *ast_entry;
572 	qdf_nbuf_t nbuf_copy;
573 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
574 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
575 	struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
576 					tid_stats.tid_rx_stats[ring_id][tid];
577 
578 	/* check if the destination peer is available in peer table
579 	 * and also check if the source peer and destination peer
580 	 * belong to the same vap and destination peer is not bss peer.
581 	 */
582 
583 	if ((qdf_nbuf_is_da_valid(nbuf) && !qdf_nbuf_is_da_mcbc(nbuf))) {
584 
585 		ast_entry = soc->ast_table[msdu_metadata.da_idx];
586 		if (!ast_entry)
587 			return false;
588 
589 		if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) {
590 			ast_entry->is_active = TRUE;
591 			return false;
592 		}
593 
594 		da_peer = ast_entry->peer;
595 
596 		if (!da_peer)
597 			return false;
598 		/* TA peer cannot be same as peer(DA) on which AST is present
599 		 * this indicates a change in topology and that AST entries
600 		 * are yet to be updated.
601 		 */
602 		if (da_peer == ta_peer)
603 			return false;
604 
605 		if (da_peer->vdev == ta_peer->vdev && !da_peer->bss_peer) {
606 			len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
607 			is_frag = qdf_nbuf_is_frag(nbuf);
608 			memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
609 
610 			/* If the source or destination peer in the isolation
611 			 * list then dont forward instead push to bridge stack.
612 			 */
613 			if (dp_get_peer_isolation(ta_peer) ||
614 			    dp_get_peer_isolation(da_peer))
615 				return false;
616 
617 			/* linearize the nbuf just before we send to
618 			 * dp_tx_send()
619 			 */
620 			if (qdf_unlikely(is_frag)) {
621 				if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
622 					return false;
623 
624 				nbuf = qdf_nbuf_unshare(nbuf);
625 				if (!nbuf) {
626 					DP_STATS_INC_PKT(ta_peer,
627 							 rx.intra_bss.fail,
628 							 1,
629 							 len);
630 					/* return true even though the pkt is
631 					 * not forwarded. Basically skb_unshare
632 					 * failed and we want to continue with
633 					 * next nbuf.
634 					 */
635 					tid_stats->fail_cnt[INTRABSS_DROP]++;
636 					return true;
637 				}
638 			}
639 
640 			if (!dp_tx_send((struct cdp_soc_t *)soc,
641 					ta_peer->vdev->vdev_id, nbuf)) {
642 				DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
643 						 len);
644 				return true;
645 			} else {
646 				DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
647 						len);
648 				tid_stats->fail_cnt[INTRABSS_DROP]++;
649 				return false;
650 			}
651 		}
652 	}
653 	/* if it is a broadcast pkt (eg: ARP) and it is not its own
654 	 * source, then clone the pkt and send the cloned pkt for
655 	 * intra BSS forwarding and original pkt up the network stack
656 	 * Note: how do we handle multicast pkts. do we forward
657 	 * all multicast pkts as is or let a higher layer module
658 	 * like igmpsnoop decide whether to forward or not with
659 	 * Mcast enhancement.
660 	 */
661 	else if (qdf_unlikely((qdf_nbuf_is_da_mcbc(nbuf) &&
662 			       !ta_peer->bss_peer))) {
663 		if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
664 			goto end;
665 
666 		/* If the source peer in the isolation list
667 		 * then dont forward instead push to bridge stack
668 		 */
669 		if (dp_get_peer_isolation(ta_peer))
670 			goto end;
671 
672 		nbuf_copy = qdf_nbuf_copy(nbuf);
673 		if (!nbuf_copy)
674 			goto end;
675 
676 		len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
677 		memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
678 
679 		/* Set cb->ftype to intrabss FWD */
680 		qdf_nbuf_set_tx_ftype(nbuf_copy, CB_FTYPE_INTRABSS_FWD);
681 		if (dp_tx_send((struct cdp_soc_t *)soc,
682 			       ta_peer->vdev->vdev_id, nbuf_copy)) {
683 			DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
684 			tid_stats->fail_cnt[INTRABSS_DROP]++;
685 			qdf_nbuf_free(nbuf_copy);
686 		} else {
687 			DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
688 			tid_stats->intrabss_cnt++;
689 		}
690 	}
691 
692 end:
693 	/* return false as we have to still send the original pkt
694 	 * up the stack
695 	 */
696 	return false;
697 }
698 
699 #ifdef MESH_MODE_SUPPORT
700 
701 /**
702  * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
703  *
704  * @vdev: DP Virtual device handle
705  * @nbuf: Buffer pointer
706  * @rx_tlv_hdr: start of rx tlv header
707  * @peer: pointer to peer
708  *
709  * This function allocated memory for mesh receive stats and fill the
710  * required stats. Stores the memory address in skb cb.
711  *
712  * Return: void
713  */
714 
715 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
716 				uint8_t *rx_tlv_hdr, struct dp_peer *peer)
717 {
718 	struct mesh_recv_hdr_s *rx_info = NULL;
719 	uint32_t pkt_type;
720 	uint32_t nss;
721 	uint32_t rate_mcs;
722 	uint32_t bw;
723 	uint8_t primary_chan_num;
724 	uint32_t center_chan_freq;
725 	struct dp_soc *soc;
726 
727 	/* fill recv mesh stats */
728 	rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
729 
730 	/* upper layers are resposible to free this memory */
731 
732 	if (!rx_info) {
733 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
734 			"Memory allocation failed for mesh rx stats");
735 		DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
736 		return;
737 	}
738 
739 	rx_info->rs_flags = MESH_RXHDR_VER1;
740 	if (qdf_nbuf_is_rx_chfrag_start(nbuf))
741 		rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
742 
743 	if (qdf_nbuf_is_rx_chfrag_end(nbuf))
744 		rx_info->rs_flags |= MESH_RX_LAST_MSDU;
745 
746 	if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) {
747 		rx_info->rs_flags |= MESH_RX_DECRYPTED;
748 		rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr);
749 		if (vdev->osif_get_key)
750 			vdev->osif_get_key(vdev->osif_vdev,
751 					&rx_info->rs_decryptkey[0],
752 					&peer->mac_addr.raw[0],
753 					rx_info->rs_keyix);
754 	}
755 
756 	rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr);
757 
758 	soc = vdev->pdev->soc;
759 	primary_chan_num = hal_rx_msdu_start_get_freq(rx_tlv_hdr);
760 	center_chan_freq = hal_rx_msdu_start_get_freq(rx_tlv_hdr) >> 16;
761 
762 	if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) {
763 		rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band(
764 							soc->ctrl_psoc,
765 							vdev->pdev->pdev_id,
766 							center_chan_freq);
767 	}
768 	rx_info->rs_channel = primary_chan_num;
769 	pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
770 	rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
771 	bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
772 	nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr);
773 	rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
774 				(bw << 24);
775 
776 	qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
777 
778 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
779 		FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"),
780 						rx_info->rs_flags,
781 						rx_info->rs_rssi,
782 						rx_info->rs_channel,
783 						rx_info->rs_ratephy1,
784 						rx_info->rs_keyix);
785 
786 }
787 
788 /**
789  * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
790  *
791  * @vdev: DP Virtual device handle
792  * @nbuf: Buffer pointer
793  * @rx_tlv_hdr: start of rx tlv header
794  *
795  * This checks if the received packet is matching any filter out
796  * catogery and and drop the packet if it matches.
797  *
798  * Return: status(0 indicates drop, 1 indicate to no drop)
799  */
800 
801 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
802 					uint8_t *rx_tlv_hdr)
803 {
804 	union dp_align_mac_addr mac_addr;
805 	struct dp_soc *soc = vdev->pdev->soc;
806 
807 	if (qdf_unlikely(vdev->mesh_rx_filter)) {
808 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
809 			if (hal_rx_mpdu_get_fr_ds(soc->hal_soc,
810 						  rx_tlv_hdr))
811 				return  QDF_STATUS_SUCCESS;
812 
813 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
814 			if (hal_rx_mpdu_get_to_ds(soc->hal_soc,
815 						  rx_tlv_hdr))
816 				return  QDF_STATUS_SUCCESS;
817 
818 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
819 			if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc,
820 						   rx_tlv_hdr) &&
821 			    !hal_rx_mpdu_get_to_ds(soc->hal_soc,
822 						   rx_tlv_hdr))
823 				return  QDF_STATUS_SUCCESS;
824 
825 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
826 			if (hal_rx_mpdu_get_addr1(soc->hal_soc,
827 						  rx_tlv_hdr,
828 					&mac_addr.raw[0]))
829 				return QDF_STATUS_E_FAILURE;
830 
831 			if (!qdf_mem_cmp(&mac_addr.raw[0],
832 					&vdev->mac_addr.raw[0],
833 					QDF_MAC_ADDR_SIZE))
834 				return  QDF_STATUS_SUCCESS;
835 		}
836 
837 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
838 			if (hal_rx_mpdu_get_addr2(soc->hal_soc,
839 						  rx_tlv_hdr,
840 						  &mac_addr.raw[0]))
841 				return QDF_STATUS_E_FAILURE;
842 
843 			if (!qdf_mem_cmp(&mac_addr.raw[0],
844 					&vdev->mac_addr.raw[0],
845 					QDF_MAC_ADDR_SIZE))
846 				return  QDF_STATUS_SUCCESS;
847 		}
848 	}
849 
850 	return QDF_STATUS_E_FAILURE;
851 }
852 
853 #else
854 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
855 				uint8_t *rx_tlv_hdr, struct dp_peer *peer)
856 {
857 }
858 
859 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
860 					uint8_t *rx_tlv_hdr)
861 {
862 	return QDF_STATUS_E_FAILURE;
863 }
864 
865 #endif
866 
867 #ifdef FEATURE_NAC_RSSI
868 /**
869  * dp_rx_nac_filter(): Function to perform filtering of non-associated
870  * clients
871  * @pdev: DP pdev handle
872  * @rx_pkt_hdr: Rx packet Header
873  *
874  * return: dp_vdev*
875  */
876 static
877 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
878 		uint8_t *rx_pkt_hdr)
879 {
880 	struct ieee80211_frame *wh;
881 	struct dp_neighbour_peer *peer = NULL;
882 
883 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
884 
885 	if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
886 		return NULL;
887 
888 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
889 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
890 				neighbour_peer_list_elem) {
891 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
892 				wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) {
893 			QDF_TRACE(
894 				QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
895 				FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"),
896 				peer->neighbour_peers_macaddr.raw[0],
897 				peer->neighbour_peers_macaddr.raw[1],
898 				peer->neighbour_peers_macaddr.raw[2],
899 				peer->neighbour_peers_macaddr.raw[3],
900 				peer->neighbour_peers_macaddr.raw[4],
901 				peer->neighbour_peers_macaddr.raw[5]);
902 
903 				qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
904 
905 			return pdev->monitor_vdev;
906 		}
907 	}
908 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
909 
910 	return NULL;
911 }
912 
913 /**
914  * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
915  * @soc: DP SOC handle
916  * @mpdu: mpdu for which peer is invalid
917  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
918  * pool_id has same mapping)
919  *
920  * return: integer type
921  */
922 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
923 				   uint8_t mac_id)
924 {
925 	struct dp_invalid_peer_msg msg;
926 	struct dp_vdev *vdev = NULL;
927 	struct dp_pdev *pdev = NULL;
928 	struct ieee80211_frame *wh;
929 	qdf_nbuf_t curr_nbuf, next_nbuf;
930 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
931 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
932 
933 	rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
934 
935 	if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) {
936 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
937 			  "Drop decapped frames");
938 		goto free;
939 	}
940 
941 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
942 
943 	if (!DP_FRAME_IS_DATA(wh)) {
944 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
945 			  "NAWDS valid only for data frames");
946 		goto free;
947 	}
948 
949 	if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
950 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
951 			"Invalid nbuf length");
952 		goto free;
953 	}
954 
955 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
956 
957 	if (!pdev || qdf_unlikely(pdev->is_pdev_down)) {
958 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
959 			  "PDEV %s", !pdev ? "not found" : "down");
960 		goto free;
961 	}
962 
963 	if (pdev->filter_neighbour_peers) {
964 		/* Next Hop scenario not yet handle */
965 		vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
966 		if (vdev) {
967 			dp_rx_mon_deliver(soc, pdev->pdev_id,
968 					  pdev->invalid_peer_head_msdu,
969 					  pdev->invalid_peer_tail_msdu);
970 
971 			pdev->invalid_peer_head_msdu = NULL;
972 			pdev->invalid_peer_tail_msdu = NULL;
973 
974 			return 0;
975 		}
976 	}
977 
978 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
979 
980 		if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
981 				QDF_MAC_ADDR_SIZE) == 0) {
982 			goto out;
983 		}
984 	}
985 
986 	if (!vdev) {
987 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
988 			"VDEV not found");
989 		goto free;
990 	}
991 
992 out:
993 	msg.wh = wh;
994 	qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN);
995 	msg.nbuf = mpdu;
996 	msg.vdev_id = vdev->vdev_id;
997 	if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer)
998 		pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(
999 				(struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc,
1000 				pdev->pdev_id, &msg);
1001 
1002 free:
1003 	/* Drop and free packet */
1004 	curr_nbuf = mpdu;
1005 	while (curr_nbuf) {
1006 		next_nbuf = qdf_nbuf_next(curr_nbuf);
1007 		qdf_nbuf_free(curr_nbuf);
1008 		curr_nbuf = next_nbuf;
1009 	}
1010 
1011 	return 0;
1012 }
1013 
1014 /**
1015  * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
1016  * @soc: DP SOC handle
1017  * @mpdu: mpdu for which peer is invalid
1018  * @mpdu_done: if an mpdu is completed
1019  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
1020  * pool_id has same mapping)
1021  *
1022  * return: integer type
1023  */
1024 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
1025 					qdf_nbuf_t mpdu, bool mpdu_done,
1026 					uint8_t mac_id)
1027 {
1028 	/* Only trigger the process when mpdu is completed */
1029 	if (mpdu_done)
1030 		dp_rx_process_invalid_peer(soc, mpdu, mac_id);
1031 }
1032 #else
1033 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
1034 				   uint8_t mac_id)
1035 {
1036 	qdf_nbuf_t curr_nbuf, next_nbuf;
1037 	struct dp_pdev *pdev;
1038 	struct dp_vdev *vdev = NULL;
1039 	struct ieee80211_frame *wh;
1040 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
1041 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
1042 
1043 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
1044 
1045 	if (!DP_FRAME_IS_DATA(wh)) {
1046 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP,
1047 				   "only for data frames");
1048 		goto free;
1049 	}
1050 
1051 	if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
1052 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1053 			  "Invalid nbuf length");
1054 		goto free;
1055 	}
1056 
1057 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1058 	if (!pdev) {
1059 		QDF_TRACE(QDF_MODULE_ID_DP,
1060 			  QDF_TRACE_LEVEL_ERROR,
1061 			  "PDEV not found");
1062 		goto free;
1063 	}
1064 
1065 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
1066 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1067 		if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
1068 				QDF_MAC_ADDR_SIZE) == 0) {
1069 			qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1070 			goto out;
1071 		}
1072 	}
1073 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1074 
1075 	if (!vdev) {
1076 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1077 			  "VDEV not found");
1078 		goto free;
1079 	}
1080 
1081 out:
1082 	if (soc->cdp_soc.ol_ops->rx_invalid_peer)
1083 		soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh);
1084 free:
1085 	/* reset the head and tail pointers */
1086 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1087 	if (pdev) {
1088 		pdev->invalid_peer_head_msdu = NULL;
1089 		pdev->invalid_peer_tail_msdu = NULL;
1090 	}
1091 
1092 	/* Drop and free packet */
1093 	curr_nbuf = mpdu;
1094 	while (curr_nbuf) {
1095 		next_nbuf = qdf_nbuf_next(curr_nbuf);
1096 		qdf_nbuf_free(curr_nbuf);
1097 		curr_nbuf = next_nbuf;
1098 	}
1099 
1100 	/* Reset the head and tail pointers */
1101 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1102 	if (pdev) {
1103 		pdev->invalid_peer_head_msdu = NULL;
1104 		pdev->invalid_peer_tail_msdu = NULL;
1105 	}
1106 
1107 	return 0;
1108 }
1109 
1110 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
1111 					qdf_nbuf_t mpdu, bool mpdu_done,
1112 					uint8_t mac_id)
1113 {
1114 	/* Process the nbuf */
1115 	dp_rx_process_invalid_peer(soc, mpdu, mac_id);
1116 }
1117 #endif
1118 
1119 #ifdef RECEIVE_OFFLOAD
1120 /**
1121  * dp_rx_print_offload_info() - Print offload info from RX TLV
1122  * @soc: dp soc handle
1123  * @rx_tlv: RX TLV for which offload information is to be printed
1124  *
1125  * Return: None
1126  */
1127 static void dp_rx_print_offload_info(struct dp_soc *soc, uint8_t *rx_tlv)
1128 {
1129 	dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------");
1130 	dp_verbose_debug("lro_eligible 0x%x", HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv));
1131 	dp_verbose_debug("pure_ack 0x%x", HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv));
1132 	dp_verbose_debug("chksum 0x%x", hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
1133 								  rx_tlv));
1134 	dp_verbose_debug("TCP seq num 0x%x", HAL_RX_TLV_GET_TCP_SEQ(rx_tlv));
1135 	dp_verbose_debug("TCP ack num 0x%x", HAL_RX_TLV_GET_TCP_ACK(rx_tlv));
1136 	dp_verbose_debug("TCP window 0x%x", HAL_RX_TLV_GET_TCP_WIN(rx_tlv));
1137 	dp_verbose_debug("TCP protocol 0x%x", HAL_RX_TLV_GET_TCP_PROTO(rx_tlv));
1138 	dp_verbose_debug("TCP offset 0x%x", HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv));
1139 	dp_verbose_debug("toeplitz 0x%x", HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv));
1140 	dp_verbose_debug("---------------------------------------------------------");
1141 }
1142 
1143 /**
1144  * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
1145  * @soc: DP SOC handle
1146  * @rx_tlv: RX TLV received for the msdu
1147  * @msdu: msdu for which GRO info needs to be filled
1148  * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
1149  *
1150  * Return: None
1151  */
1152 static
1153 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
1154 			 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
1155 {
1156 	if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
1157 		return;
1158 
1159 	/* Filling up RX offload info only for TCP packets */
1160 	if (!HAL_RX_TLV_GET_TCP_PROTO(rx_tlv))
1161 		return;
1162 
1163 	*rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1;
1164 
1165 	QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) =
1166 		 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv);
1167 	QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) =
1168 			HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv);
1169 	QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
1170 			hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
1171 						  rx_tlv);
1172 	QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) =
1173 			 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv);
1174 	QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) =
1175 			 HAL_RX_TLV_GET_TCP_ACK(rx_tlv);
1176 	QDF_NBUF_CB_RX_TCP_WIN(msdu) =
1177 			 HAL_RX_TLV_GET_TCP_WIN(rx_tlv);
1178 	QDF_NBUF_CB_RX_TCP_PROTO(msdu) =
1179 			 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv);
1180 	QDF_NBUF_CB_RX_IPV6_PROTO(msdu) =
1181 			 HAL_RX_TLV_GET_IPV6(rx_tlv);
1182 	QDF_NBUF_CB_RX_TCP_OFFSET(msdu) =
1183 			 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv);
1184 	QDF_NBUF_CB_RX_FLOW_ID(msdu) =
1185 			 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv);
1186 
1187 	dp_rx_print_offload_info(soc, rx_tlv);
1188 }
1189 #else
1190 static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
1191 				qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
1192 {
1193 }
1194 #endif /* RECEIVE_OFFLOAD */
1195 
1196 /**
1197  * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
1198  *
1199  * @nbuf: pointer to msdu.
1200  * @mpdu_len: mpdu length
1201  *
1202  * Return: returns true if nbuf is last msdu of mpdu else retuns false.
1203  */
1204 static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len)
1205 {
1206 	bool last_nbuf;
1207 
1208 	if (*mpdu_len > (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN)) {
1209 		qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE);
1210 		last_nbuf = false;
1211 	} else {
1212 		qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN));
1213 		last_nbuf = true;
1214 	}
1215 
1216 	*mpdu_len -= (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN);
1217 
1218 	return last_nbuf;
1219 }
1220 
1221 /**
1222  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
1223  *		     multiple nbufs.
1224  * @nbuf: pointer to the first msdu of an amsdu.
1225  *
1226  * This function implements the creation of RX frag_list for cases
1227  * where an MSDU is spread across multiple nbufs.
1228  *
1229  * Return: returns the head nbuf which contains complete frag_list.
1230  */
1231 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf)
1232 {
1233 	qdf_nbuf_t parent, frag_list, next = NULL;
1234 	uint16_t frag_list_len = 0;
1235 	uint16_t mpdu_len;
1236 	bool last_nbuf;
1237 
1238 	/*
1239 	 * Use msdu len got from REO entry descriptor instead since
1240 	 * there is case the RX PKT TLV is corrupted while msdu_len
1241 	 * from REO descriptor is right for non-raw RX scatter msdu.
1242 	 */
1243 	mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1244 	/*
1245 	 * this is a case where the complete msdu fits in one single nbuf.
1246 	 * in this case HW sets both start and end bit and we only need to
1247 	 * reset these bits for RAW mode simulator to decap the pkt
1248 	 */
1249 	if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
1250 					qdf_nbuf_is_rx_chfrag_end(nbuf)) {
1251 		qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN);
1252 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1253 		return nbuf;
1254 	}
1255 
1256 	/*
1257 	 * This is a case where we have multiple msdus (A-MSDU) spread across
1258 	 * multiple nbufs. here we create a fraglist out of these nbufs.
1259 	 *
1260 	 * the moment we encounter a nbuf with continuation bit set we
1261 	 * know for sure we have an MSDU which is spread across multiple
1262 	 * nbufs. We loop through and reap nbufs till we reach last nbuf.
1263 	 */
1264 	parent = nbuf;
1265 	frag_list = nbuf->next;
1266 	nbuf = nbuf->next;
1267 
1268 	/*
1269 	 * set the start bit in the first nbuf we encounter with continuation
1270 	 * bit set. This has the proper mpdu length set as it is the first
1271 	 * msdu of the mpdu. this becomes the parent nbuf and the subsequent
1272 	 * nbufs will form the frag_list of the parent nbuf.
1273 	 */
1274 	qdf_nbuf_set_rx_chfrag_start(parent, 1);
1275 	last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len);
1276 
1277 	/*
1278 	 * this is where we set the length of the fragments which are
1279 	 * associated to the parent nbuf. We iterate through the frag_list
1280 	 * till we hit the last_nbuf of the list.
1281 	 */
1282 	do {
1283 		last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len);
1284 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1285 		frag_list_len += qdf_nbuf_len(nbuf);
1286 
1287 		if (last_nbuf) {
1288 			next = nbuf->next;
1289 			nbuf->next = NULL;
1290 			break;
1291 		}
1292 
1293 		nbuf = nbuf->next;
1294 	} while (!last_nbuf);
1295 
1296 	qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
1297 	qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
1298 	parent->next = next;
1299 
1300 	qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN);
1301 	return parent;
1302 }
1303 
1304 #ifdef QCA_PEER_EXT_STATS
1305 /*
1306  * dp_rx_compute_tid_delay - Computer per TID delay stats
1307  * @peer: DP soc context
1308  * @nbuf: NBuffer
1309  *
1310  * Return: Void
1311  */
1312 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
1313 			     qdf_nbuf_t nbuf)
1314 {
1315 	struct cdp_delay_rx_stats  *rx_delay = &stats->rx_delay;
1316 	uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
1317 
1318 	dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack);
1319 }
1320 #endif /* QCA_PEER_EXT_STATS */
1321 
1322 /**
1323  * dp_rx_compute_delay() - Compute and fill in all timestamps
1324  *				to pass in correct fields
1325  *
1326  * @vdev: pdev handle
1327  * @tx_desc: tx descriptor
1328  * @tid: tid value
1329  * Return: none
1330  */
1331 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1332 {
1333 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
1334 	int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get());
1335 	uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
1336 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
1337 	uint32_t interframe_delay =
1338 		(uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp);
1339 
1340 	dp_update_delay_stats(vdev->pdev, to_stack, tid,
1341 			      CDP_DELAY_STATS_REAP_STACK, ring_id);
1342 	/*
1343 	 * Update interframe delay stats calculated at deliver_data_ol point.
1344 	 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so
1345 	 * interframe delay will not be calculate correctly for 1st frame.
1346 	 * On the other side, this will help in avoiding extra per packet check
1347 	 * of vdev->prev_rx_deliver_tstamp.
1348 	 */
1349 	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
1350 			      CDP_DELAY_STATS_RX_INTERFRAME, ring_id);
1351 	vdev->prev_rx_deliver_tstamp = current_ts;
1352 }
1353 
1354 /**
1355  * dp_rx_drop_nbuf_list() - drop an nbuf list
1356  * @pdev: dp pdev reference
1357  * @buf_list: buffer list to be dropepd
1358  *
1359  * Return: int (number of bufs dropped)
1360  */
1361 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
1362 				       qdf_nbuf_t buf_list)
1363 {
1364 	struct cdp_tid_rx_stats *stats = NULL;
1365 	uint8_t tid = 0, ring_id = 0;
1366 	int num_dropped = 0;
1367 	qdf_nbuf_t buf, next_buf;
1368 
1369 	buf = buf_list;
1370 	while (buf) {
1371 		ring_id = QDF_NBUF_CB_RX_CTX_ID(buf);
1372 		next_buf = qdf_nbuf_queue_next(buf);
1373 		tid = qdf_nbuf_get_tid_val(buf);
1374 		if (qdf_likely(pdev)) {
1375 			stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
1376 			stats->fail_cnt[INVALID_PEER_VDEV]++;
1377 			stats->delivered_to_stack--;
1378 		}
1379 		qdf_nbuf_free(buf);
1380 		buf = next_buf;
1381 		num_dropped++;
1382 	}
1383 
1384 	return num_dropped;
1385 }
1386 
1387 #ifdef PEER_CACHE_RX_PKTS
1388 /**
1389  * dp_rx_flush_rx_cached() - flush cached rx frames
1390  * @peer: peer
1391  * @drop: flag to drop frames or forward to net stack
1392  *
1393  * Return: None
1394  */
1395 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
1396 {
1397 	struct dp_peer_cached_bufq *bufqi;
1398 	struct dp_rx_cached_buf *cache_buf = NULL;
1399 	ol_txrx_rx_fp data_rx = NULL;
1400 	int num_buff_elem;
1401 	QDF_STATUS status;
1402 
1403 	if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
1404 		qdf_atomic_dec(&peer->flush_in_progress);
1405 		return;
1406 	}
1407 
1408 	qdf_spin_lock_bh(&peer->peer_info_lock);
1409 	if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx)
1410 		data_rx = peer->vdev->osif_rx;
1411 	else
1412 		drop = true;
1413 	qdf_spin_unlock_bh(&peer->peer_info_lock);
1414 
1415 	bufqi = &peer->bufq_info;
1416 
1417 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1418 	qdf_list_remove_front(&bufqi->cached_bufq,
1419 			      (qdf_list_node_t **)&cache_buf);
1420 	while (cache_buf) {
1421 		num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(
1422 								cache_buf->buf);
1423 		bufqi->entries -= num_buff_elem;
1424 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
1425 		if (drop) {
1426 			bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1427 							      cache_buf->buf);
1428 		} else {
1429 			/* Flush the cached frames to OSIF DEV */
1430 			status = data_rx(peer->vdev->osif_vdev, cache_buf->buf);
1431 			if (status != QDF_STATUS_SUCCESS)
1432 				bufqi->dropped = dp_rx_drop_nbuf_list(
1433 							peer->vdev->pdev,
1434 							cache_buf->buf);
1435 		}
1436 		qdf_mem_free(cache_buf);
1437 		cache_buf = NULL;
1438 		qdf_spin_lock_bh(&bufqi->bufq_lock);
1439 		qdf_list_remove_front(&bufqi->cached_bufq,
1440 				      (qdf_list_node_t **)&cache_buf);
1441 	}
1442 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1443 	qdf_atomic_dec(&peer->flush_in_progress);
1444 }
1445 
1446 /**
1447  * dp_rx_enqueue_rx() - cache rx frames
1448  * @peer: peer
1449  * @rx_buf_list: cache buffer list
1450  *
1451  * Return: None
1452  */
1453 static QDF_STATUS
1454 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1455 {
1456 	struct dp_rx_cached_buf *cache_buf;
1457 	struct dp_peer_cached_bufq *bufqi = &peer->bufq_info;
1458 	int num_buff_elem;
1459 
1460 	dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries,
1461 		    bufqi->dropped);
1462 	if (!peer->valid) {
1463 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1464 						      rx_buf_list);
1465 		return QDF_STATUS_E_INVAL;
1466 	}
1467 
1468 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1469 	if (bufqi->entries >= bufqi->thresh) {
1470 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1471 						      rx_buf_list);
1472 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
1473 		return QDF_STATUS_E_RESOURCES;
1474 	}
1475 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1476 
1477 	num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list);
1478 
1479 	cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf));
1480 	if (!cache_buf) {
1481 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1482 			  "Failed to allocate buf to cache rx frames");
1483 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1484 						      rx_buf_list);
1485 		return QDF_STATUS_E_NOMEM;
1486 	}
1487 
1488 	cache_buf->buf = rx_buf_list;
1489 
1490 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1491 	qdf_list_insert_back(&bufqi->cached_bufq,
1492 			     &cache_buf->node);
1493 	bufqi->entries += num_buff_elem;
1494 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1495 
1496 	return QDF_STATUS_SUCCESS;
1497 }
1498 
1499 static inline
1500 bool dp_rx_is_peer_cache_bufq_supported(void)
1501 {
1502 	return true;
1503 }
1504 #else
1505 static inline
1506 bool dp_rx_is_peer_cache_bufq_supported(void)
1507 {
1508 	return false;
1509 }
1510 
1511 static inline QDF_STATUS
1512 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1513 {
1514 	return QDF_STATUS_SUCCESS;
1515 }
1516 #endif
1517 
1518 #ifndef DELIVERY_TO_STACK_STATUS_CHECK
1519 /**
1520  * dp_rx_check_delivery_to_stack() - Deliver pkts to network
1521  * using the appropriate call back functions.
1522  * @soc: soc
1523  * @vdev: vdev
1524  * @peer: peer
1525  * @nbuf_head: skb list head
1526  * @nbuf_tail: skb list tail
1527  *
1528  * Return: None
1529  */
1530 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
1531 					  struct dp_vdev *vdev,
1532 					  struct dp_peer *peer,
1533 					  qdf_nbuf_t nbuf_head)
1534 {
1535 	/* Function pointer initialized only when FISA is enabled */
1536 	if (vdev->osif_fisa_rx)
1537 		/* on failure send it via regular path */
1538 		vdev->osif_fisa_rx(soc, vdev, nbuf_head);
1539 	else
1540 		vdev->osif_rx(vdev->osif_vdev, nbuf_head);
1541 }
1542 
1543 #else
1544 /**
1545  * dp_rx_check_delivery_to_stack() - Deliver pkts to network
1546  * using the appropriate call back functions.
1547  * @soc: soc
1548  * @vdev: vdev
1549  * @peer: peer
1550  * @nbuf_head: skb list head
1551  * @nbuf_tail: skb list tail
1552  *
1553  * Check the return status of the call back function and drop
1554  * the packets if the return status indicates a failure.
1555  *
1556  * Return: None
1557  */
1558 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
1559 					  struct dp_vdev *vdev,
1560 					  struct dp_peer *peer,
1561 					  qdf_nbuf_t nbuf_head)
1562 {
1563 	int num_nbuf = 0;
1564 	QDF_STATUS ret_val = QDF_STATUS_E_FAILURE;
1565 
1566 	/* Function pointer initialized only when FISA is enabled */
1567 	if (vdev->osif_fisa_rx)
1568 		/* on failure send it via regular path */
1569 		ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head);
1570 	else if (vdev->osif_rx)
1571 		ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head);
1572 
1573 	if (!QDF_IS_STATUS_SUCCESS(ret_val)) {
1574 		num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
1575 		DP_STATS_INC(soc, rx.err.rejected, num_nbuf);
1576 		if (peer)
1577 			DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf);
1578 	}
1579 }
1580 #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */
1581 
1582 void dp_rx_deliver_to_stack(struct dp_soc *soc,
1583 			    struct dp_vdev *vdev,
1584 			    struct dp_peer *peer,
1585 			    qdf_nbuf_t nbuf_head,
1586 			    qdf_nbuf_t nbuf_tail)
1587 {
1588 	int num_nbuf = 0;
1589 
1590 	if (qdf_unlikely(!vdev || vdev->delete.pending)) {
1591 		num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head);
1592 		/*
1593 		 * This is a special case where vdev is invalid,
1594 		 * so we cannot know the pdev to which this packet
1595 		 * belonged. Hence we update the soc rx error stats.
1596 		 */
1597 		DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf);
1598 		return;
1599 	}
1600 
1601 	/*
1602 	 * highly unlikely to have a vdev without a registered rx
1603 	 * callback function. if so let us free the nbuf_list.
1604 	 */
1605 	if (qdf_unlikely(!vdev->osif_rx)) {
1606 		if (peer && dp_rx_is_peer_cache_bufq_supported()) {
1607 			dp_rx_enqueue_rx(peer, nbuf_head);
1608 		} else {
1609 			num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev,
1610 							nbuf_head);
1611 			DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf);
1612 		}
1613 		return;
1614 	}
1615 
1616 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
1617 			(vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
1618 		vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
1619 				&nbuf_tail, peer->mac_addr.raw);
1620 	}
1621 
1622 	dp_rx_check_delivery_to_stack(soc, vdev, peer, nbuf_head);
1623 }
1624 
1625 /**
1626  * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
1627  * @nbuf: pointer to the first msdu of an amsdu.
1628  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1629  *
1630  * The ipsumed field of the skb is set based on whether HW validated the
1631  * IP/TCP/UDP checksum.
1632  *
1633  * Return: void
1634  */
1635 static inline void dp_rx_cksum_offload(struct dp_pdev *pdev,
1636 				       qdf_nbuf_t nbuf,
1637 				       uint8_t *rx_tlv_hdr)
1638 {
1639 	qdf_nbuf_rx_cksum_t cksum = {0};
1640 	bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr);
1641 	bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr);
1642 
1643 	if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) {
1644 		cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
1645 		qdf_nbuf_set_rx_cksum(nbuf, &cksum);
1646 	} else {
1647 		DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err);
1648 		DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er);
1649 	}
1650 }
1651 
1652 #ifdef VDEV_PEER_PROTOCOL_COUNT
1653 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer) \
1654 { \
1655 	qdf_nbuf_t nbuf_local; \
1656 	struct dp_peer *peer_local; \
1657 	struct dp_vdev *vdev_local = vdev_hdl; \
1658 	do { \
1659 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
1660 			break; \
1661 		nbuf_local = nbuf; \
1662 		peer_local = peer; \
1663 		if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \
1664 			break; \
1665 		else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \
1666 			break; \
1667 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
1668 						       (nbuf_local), \
1669 						       (peer_local), 0, 1); \
1670 	} while (0); \
1671 }
1672 #else
1673 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer)
1674 #endif
1675 
1676 /**
1677  * dp_rx_msdu_stats_update() - update per msdu stats.
1678  * @soc: core txrx main context
1679  * @nbuf: pointer to the first msdu of an amsdu.
1680  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1681  * @peer: pointer to the peer object.
1682  * @ring_id: reo dest ring number on which pkt is reaped.
1683  * @tid_stats: per tid rx stats.
1684  *
1685  * update all the per msdu stats for that nbuf.
1686  * Return: void
1687  */
1688 static void dp_rx_msdu_stats_update(struct dp_soc *soc,
1689 				    qdf_nbuf_t nbuf,
1690 				    uint8_t *rx_tlv_hdr,
1691 				    struct dp_peer *peer,
1692 				    uint8_t ring_id,
1693 				    struct cdp_tid_rx_stats *tid_stats)
1694 {
1695 	bool is_ampdu, is_not_amsdu;
1696 	uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
1697 	struct dp_vdev *vdev = peer->vdev;
1698 	qdf_ether_header_t *eh;
1699 	uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1700 
1701 	dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, peer);
1702 	is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
1703 			qdf_nbuf_is_rx_chfrag_end(nbuf);
1704 
1705 	DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len);
1706 	DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu);
1707 	DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu);
1708 	DP_STATS_INCC(peer, rx.rx_retries, 1, qdf_nbuf_is_rx_retry_flag(nbuf));
1709 
1710 	tid_stats->msdu_cnt++;
1711 	if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
1712 			 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
1713 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1714 		DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len);
1715 		tid_stats->mcast_msdu_cnt++;
1716 		if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
1717 			DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len);
1718 			tid_stats->bcast_msdu_cnt++;
1719 		}
1720 	}
1721 
1722 	/*
1723 	 * currently we can return from here as we have similar stats
1724 	 * updated at per ppdu level instead of msdu level
1725 	 */
1726 	if (!soc->process_rx_status)
1727 		return;
1728 
1729 	is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr);
1730 	DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu);
1731 	DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
1732 
1733 	sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr);
1734 	mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
1735 	tid = qdf_nbuf_get_tid_val(nbuf);
1736 	bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
1737 	reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
1738 							      rx_tlv_hdr);
1739 	nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
1740 	pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
1741 
1742 	DP_STATS_INCC(peer, rx.rx_mpdu_cnt[mcs], 1,
1743 		      ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
1744 	DP_STATS_INCC(peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1,
1745 		      ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
1746 	DP_STATS_INC(peer, rx.bw[bw], 1);
1747 	/*
1748 	 * only if nss > 0 and pkt_type is 11N/AC/AX,
1749 	 * then increase index [nss - 1] in array counter.
1750 	 */
1751 	if (nss > 0 && (pkt_type == DOT11_N ||
1752 			pkt_type == DOT11_AC ||
1753 			pkt_type == DOT11_AX))
1754 		DP_STATS_INC(peer, rx.nss[nss - 1], 1);
1755 
1756 	DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
1757 	DP_STATS_INCC(peer, rx.err.mic_err, 1,
1758 		      hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr));
1759 	DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
1760 		      hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr));
1761 
1762 	DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
1763 	DP_STATS_INC(peer, rx.reception_type[reception_type], 1);
1764 
1765 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1766 		      ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1767 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1768 		      ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1769 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1770 		      ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1771 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1772 		      ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1773 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1774 		      ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1775 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1776 		      ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1777 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1778 		      ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1779 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1780 		      ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1781 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1782 		      ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX)));
1783 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1784 		      ((mcs < MAX_MCS) && (pkt_type == DOT11_AX)));
1785 
1786 	if ((soc->process_rx_status) &&
1787 	    hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) {
1788 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
1789 		if (!vdev->pdev)
1790 			return;
1791 
1792 		dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
1793 				     &peer->stats, peer->peer_id,
1794 				     UPDATE_PEER_STATS,
1795 				     vdev->pdev->pdev_id);
1796 #endif
1797 
1798 	}
1799 }
1800 
1801 static inline bool is_sa_da_idx_valid(struct dp_soc *soc,
1802 				      uint8_t *rx_tlv_hdr,
1803 				      qdf_nbuf_t nbuf,
1804 				      struct hal_rx_msdu_metadata msdu_info)
1805 {
1806 	if ((qdf_nbuf_is_sa_valid(nbuf) &&
1807 	    (msdu_info.sa_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) ||
1808 	    (!qdf_nbuf_is_da_mcbc(nbuf) &&
1809 	     qdf_nbuf_is_da_valid(nbuf) &&
1810 	     (msdu_info.da_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))))
1811 		return false;
1812 
1813 	return true;
1814 }
1815 
1816 #ifndef WDS_VENDOR_EXTENSION
1817 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
1818 			   struct dp_vdev *vdev,
1819 			   struct dp_peer *peer)
1820 {
1821 	return 1;
1822 }
1823 #endif
1824 
1825 #ifdef RX_DESC_DEBUG_CHECK
1826 /**
1827  * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
1828  *				  corruption
1829  *
1830  * @ring_desc: REO ring descriptor
1831  * @rx_desc: Rx descriptor
1832  *
1833  * Return: NONE
1834  */
1835 static inline
1836 QDF_STATUS dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc,
1837 					struct dp_rx_desc *rx_desc)
1838 {
1839 	struct hal_buf_info hbi;
1840 
1841 	hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
1842 	/* Sanity check for possible buffer paddr corruption */
1843 	if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
1844 		return QDF_STATUS_SUCCESS;
1845 
1846 	return QDF_STATUS_E_FAILURE;
1847 }
1848 #else
1849 static inline
1850 QDF_STATUS dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc,
1851 					struct dp_rx_desc *rx_desc)
1852 {
1853 	return QDF_STATUS_SUCCESS;
1854 }
1855 #endif
1856 
1857 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1858 static inline
1859 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
1860 {
1861 	bool limit_hit = false;
1862 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
1863 
1864 	limit_hit =
1865 		(num_reaped >= cfg->rx_reap_loop_pkt_limit) ? true : false;
1866 
1867 	if (limit_hit)
1868 		DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1)
1869 
1870 	return limit_hit;
1871 }
1872 
1873 static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1874 {
1875 	return soc->wlan_cfg_ctx->rx_enable_eol_data_check;
1876 }
1877 
1878 #else
1879 static inline
1880 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
1881 {
1882 	return false;
1883 }
1884 
1885 static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1886 {
1887 	return false;
1888 }
1889 
1890 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
1891 
1892 #ifdef DP_RX_PKT_NO_PEER_DELIVER
1893 /**
1894  * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
1895  *				      no corresbonding peer found
1896  * @soc: core txrx main context
1897  * @nbuf: pkt skb pointer
1898  *
1899  * This function will try to deliver some RX special frames to stack
1900  * even there is no peer matched found. for instance, LFR case, some
1901  * eapol data will be sent to host before peer_map done.
1902  *
1903  * Return: None
1904  */
1905 static
1906 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
1907 {
1908 	uint16_t peer_id;
1909 	uint8_t vdev_id;
1910 	struct dp_vdev *vdev;
1911 	uint32_t l2_hdr_offset = 0;
1912 	uint16_t msdu_len = 0;
1913 	uint32_t pkt_len = 0;
1914 	uint8_t *rx_tlv_hdr;
1915 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
1916 				FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
1917 
1918 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
1919 	if (peer_id > soc->max_peers)
1920 		goto deliver_fail;
1921 
1922 	vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
1923 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
1924 	if (!vdev || vdev->delete.pending || !vdev->osif_rx)
1925 		goto deliver_fail;
1926 
1927 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf)))
1928 		goto deliver_fail;
1929 
1930 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
1931 	l2_hdr_offset =
1932 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
1933 
1934 	msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1935 	pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
1936 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
1937 
1938 	qdf_nbuf_set_pktlen(nbuf, pkt_len);
1939 	qdf_nbuf_pull_head(nbuf,
1940 			   RX_PKT_TLVS_LEN +
1941 			   l2_hdr_offset);
1942 
1943 	if (dp_rx_is_special_frame(nbuf, frame_mask)) {
1944 		qdf_nbuf_set_exc_frame(nbuf, 1);
1945 		if (QDF_STATUS_SUCCESS !=
1946 		    vdev->osif_rx(vdev->osif_vdev, nbuf))
1947 			goto deliver_fail;
1948 		DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1);
1949 		return;
1950 	}
1951 
1952 deliver_fail:
1953 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1954 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
1955 	qdf_nbuf_free(nbuf);
1956 }
1957 #else
1958 static inline
1959 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
1960 {
1961 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1962 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
1963 	qdf_nbuf_free(nbuf);
1964 }
1965 #endif
1966 
1967 /**
1968  * dp_rx_srng_get_num_pending() - get number of pending entries
1969  * @hal_soc: hal soc opaque pointer
1970  * @hal_ring: opaque pointer to the HAL Rx Ring
1971  * @num_entries: number of entries in the hal_ring.
1972  * @near_full: pointer to a boolean. This is set if ring is near full.
1973  *
1974  * The function returns the number of entries in a destination ring which are
1975  * yet to be reaped. The function also checks if the ring is near full.
1976  * If more than half of the ring needs to be reaped, the ring is considered
1977  * approaching full.
1978  * The function useses hal_srng_dst_num_valid_locked to get the number of valid
1979  * entries. It should not be called within a SRNG lock. HW pointer value is
1980  * synced into cached_hp.
1981  *
1982  * Return: Number of pending entries if any
1983  */
1984 static
1985 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
1986 				    hal_ring_handle_t hal_ring_hdl,
1987 				    uint32_t num_entries,
1988 				    bool *near_full)
1989 {
1990 	uint32_t num_pending = 0;
1991 
1992 	num_pending = hal_srng_dst_num_valid_locked(hal_soc,
1993 						    hal_ring_hdl,
1994 						    true);
1995 
1996 	if (num_entries && (num_pending >= num_entries >> 1))
1997 		*near_full = true;
1998 	else
1999 		*near_full = false;
2000 
2001 	return num_pending;
2002 }
2003 
2004 #ifdef WLAN_SUPPORT_RX_FISA
2005 void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding)
2006 {
2007 	QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
2008 	qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN);
2009 }
2010 
2011 /**
2012  * dp_rx_set_hdr_pad() - set l3 padding in nbuf cb
2013  * @nbuf: pkt skb pointer
2014  * @l3_padding: l3 padding
2015  *
2016  * Return: None
2017  */
2018 static inline
2019 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
2020 {
2021 	QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
2022 }
2023 #else
2024 void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding)
2025 {
2026 	qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN);
2027 }
2028 
2029 static inline
2030 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
2031 {
2032 }
2033 #endif
2034 
2035 #ifdef DP_RX_DROP_RAW_FRM
2036 /**
2037  * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop
2038  * @nbuf: pkt skb pointer
2039  *
2040  * Return: true - raw frame, dropped
2041  *	   false - not raw frame, do nothing
2042  */
2043 static inline
2044 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
2045 {
2046 	if (qdf_nbuf_is_raw_frame(nbuf)) {
2047 		qdf_nbuf_free(nbuf);
2048 		return true;
2049 	}
2050 
2051 	return false;
2052 }
2053 #else
2054 static inline
2055 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
2056 {
2057 	return false;
2058 }
2059 #endif
2060 
2061 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
2062 /**
2063  * dp_rx_ring_record_entry() - Record an entry into the rx ring history.
2064  * @soc: Datapath soc structure
2065  * @ring_num: REO ring number
2066  * @ring_desc: REO ring descriptor
2067  *
2068  * Returns: None
2069  */
2070 static inline void
2071 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
2072 			hal_ring_desc_t ring_desc)
2073 {
2074 	struct dp_buf_info_record *record;
2075 	uint8_t rbm;
2076 	struct hal_buf_info hbi;
2077 	uint32_t idx;
2078 
2079 	if (qdf_unlikely(!&soc->rx_ring_history[ring_num]))
2080 		return;
2081 
2082 	hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
2083 	rbm = hal_rx_ret_buf_manager_get(ring_desc);
2084 
2085 	idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index,
2086 					DP_RX_HIST_MAX);
2087 
2088 	/* No NULL check needed for record since its an array */
2089 	record = &soc->rx_ring_history[ring_num]->entry[idx];
2090 
2091 	record->timestamp = qdf_get_log_timestamp();
2092 	record->hbi.paddr = hbi.paddr;
2093 	record->hbi.sw_cookie = hbi.sw_cookie;
2094 	record->hbi.rbm = rbm;
2095 }
2096 #else
2097 static inline void
2098 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
2099 			hal_ring_desc_t ring_desc)
2100 {
2101 }
2102 #endif
2103 
2104 /**
2105  * dp_rx_process() - Brain of the Rx processing functionality
2106  *		     Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
2107  * @int_ctx: per interrupt context
2108  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
2109  * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
2110  * @quota: No. of units (packets) that can be serviced in one shot.
2111  *
2112  * This function implements the core of Rx functionality. This is
2113  * expected to handle only non-error frames.
2114  *
2115  * Return: uint32_t: No. of elements processed
2116  */
2117 uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
2118 			    uint8_t reo_ring_num, uint32_t quota)
2119 {
2120 	hal_ring_desc_t ring_desc;
2121 	hal_soc_handle_t hal_soc;
2122 	struct dp_rx_desc *rx_desc = NULL;
2123 	qdf_nbuf_t nbuf, next;
2124 	bool near_full;
2125 	union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT];
2126 	union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT];
2127 	uint32_t num_pending;
2128 	uint32_t rx_bufs_used = 0, rx_buf_cookie;
2129 	uint16_t msdu_len = 0;
2130 	uint16_t peer_id;
2131 	uint8_t vdev_id;
2132 	struct dp_peer *peer;
2133 	struct dp_vdev *vdev;
2134 	uint32_t pkt_len = 0;
2135 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
2136 	struct hal_rx_msdu_desc_info msdu_desc_info;
2137 	enum hal_reo_error_status error;
2138 	uint32_t peer_mdata;
2139 	uint8_t *rx_tlv_hdr;
2140 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT];
2141 	uint8_t mac_id = 0;
2142 	struct dp_pdev *rx_pdev;
2143 	struct dp_srng *dp_rxdma_srng;
2144 	struct rx_desc_pool *rx_desc_pool;
2145 	struct dp_soc *soc = int_ctx->soc;
2146 	uint8_t ring_id = 0;
2147 	uint8_t core_id = 0;
2148 	struct cdp_tid_rx_stats *tid_stats;
2149 	qdf_nbuf_t nbuf_head;
2150 	qdf_nbuf_t nbuf_tail;
2151 	qdf_nbuf_t deliver_list_head;
2152 	qdf_nbuf_t deliver_list_tail;
2153 	uint32_t num_rx_bufs_reaped = 0;
2154 	uint32_t intr_id;
2155 	struct hif_opaque_softc *scn;
2156 	int32_t tid = 0;
2157 	bool is_prev_msdu_last = true;
2158 	uint32_t num_entries_avail = 0;
2159 	uint32_t rx_ol_pkt_cnt = 0;
2160 	uint32_t num_entries = 0;
2161 	struct hal_rx_msdu_metadata msdu_metadata;
2162 	QDF_STATUS status;
2163 	qdf_nbuf_t ebuf_head;
2164 	qdf_nbuf_t ebuf_tail;
2165 
2166 	DP_HIST_INIT();
2167 
2168 	qdf_assert_always(soc && hal_ring_hdl);
2169 	hal_soc = soc->hal_soc;
2170 	qdf_assert_always(hal_soc);
2171 
2172 	scn = soc->hif_handle;
2173 	hif_pm_runtime_mark_dp_rx_busy(scn);
2174 	intr_id = int_ctx->dp_intr_id;
2175 	num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl);
2176 
2177 more_data:
2178 	/* reset local variables here to be re-used in the function */
2179 	nbuf_head = NULL;
2180 	nbuf_tail = NULL;
2181 	deliver_list_head = NULL;
2182 	deliver_list_tail = NULL;
2183 	peer = NULL;
2184 	vdev = NULL;
2185 	num_rx_bufs_reaped = 0;
2186 	ebuf_head = NULL;
2187 	ebuf_tail = NULL;
2188 
2189 	qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped));
2190 	qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
2191 	qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
2192 	qdf_mem_zero(head, sizeof(head));
2193 	qdf_mem_zero(tail, sizeof(tail));
2194 
2195 	if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
2196 
2197 		/*
2198 		 * Need API to convert from hal_ring pointer to
2199 		 * Ring Type / Ring Id combo
2200 		 */
2201 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
2202 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2203 			FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
2204 		goto done;
2205 	}
2206 
2207 	/*
2208 	 * start reaping the buffers from reo ring and queue
2209 	 * them in per vdev queue.
2210 	 * Process the received pkts in a different per vdev loop.
2211 	 */
2212 	while (qdf_likely(quota &&
2213 			  (ring_desc = hal_srng_dst_peek(hal_soc,
2214 							 hal_ring_hdl)))) {
2215 
2216 		error = HAL_RX_ERROR_STATUS_GET(ring_desc);
2217 		ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2218 
2219 		if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
2220 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2221 			FL("HAL RING 0x%pK:error %d"), hal_ring_hdl, error);
2222 			DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1);
2223 			/* Don't know how to deal with this -- assert */
2224 			qdf_assert(0);
2225 		}
2226 
2227 		dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc);
2228 		rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
2229 		status = dp_rx_cookie_check_and_invalidate(ring_desc);
2230 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
2231 			DP_STATS_INC(soc, rx.err.stale_cookie, 1);
2232 			break;
2233 		}
2234 
2235 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
2236 		status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl,
2237 					   ring_desc, rx_desc);
2238 		if (QDF_IS_STATUS_ERROR(status)) {
2239 			if (qdf_unlikely(rx_desc && rx_desc->nbuf)) {
2240 				qdf_assert_always(rx_desc->unmapped);
2241 				dp_ipa_handle_rx_buf_smmu_mapping(
2242 							soc,
2243 							rx_desc->nbuf,
2244 							RX_DATA_BUFFER_SIZE,
2245 							false);
2246 				qdf_nbuf_unmap_nbytes_single(
2247 							soc->osdev,
2248 							rx_desc->nbuf,
2249 							QDF_DMA_FROM_DEVICE,
2250 							RX_DATA_BUFFER_SIZE);
2251 				rx_desc->unmapped = 1;
2252 				dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
2253 							    rx_desc->pool_id);
2254 				dp_rx_add_to_free_desc_list(
2255 							&head[rx_desc->pool_id],
2256 							&tail[rx_desc->pool_id],
2257 							rx_desc);
2258 			}
2259 			hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2260 			continue;
2261 		}
2262 
2263 		/*
2264 		 * this is a unlikely scenario where the host is reaping
2265 		 * a descriptor which it already reaped just a while ago
2266 		 * but is yet to replenish it back to HW.
2267 		 * In this case host will dump the last 128 descriptors
2268 		 * including the software descriptor rx_desc and assert.
2269 		 */
2270 
2271 		if (qdf_unlikely(!rx_desc->in_use)) {
2272 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
2273 			dp_info_rl("Reaping rx_desc not in use!");
2274 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2275 						   ring_desc, rx_desc);
2276 			/* ignore duplicate RX desc and continue to process */
2277 			/* Pop out the descriptor */
2278 			hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2279 			continue;
2280 		}
2281 
2282 		status = dp_rx_desc_nbuf_sanity_check(ring_desc, rx_desc);
2283 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
2284 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2285 			rx_desc->in_err_state = 1;
2286 			hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2287 			continue;
2288 		}
2289 
2290 		if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) {
2291 			dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie);
2292 			DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1);
2293 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2294 						   ring_desc, rx_desc);
2295 		}
2296 
2297 		/* Get MPDU DESC info */
2298 		hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
2299 
2300 		/* Get MSDU DESC info */
2301 		hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info);
2302 
2303 		if (qdf_unlikely(msdu_desc_info.msdu_flags &
2304 				 HAL_MSDU_F_MSDU_CONTINUATION)) {
2305 			/* previous msdu has end bit set, so current one is
2306 			 * the new MPDU
2307 			 */
2308 			if (is_prev_msdu_last) {
2309 				/* Get number of entries available in HW ring */
2310 				num_entries_avail =
2311 				hal_srng_dst_num_valid(hal_soc,
2312 						       hal_ring_hdl, 1);
2313 
2314 				/* For new MPDU check if we can read complete
2315 				 * MPDU by comparing the number of buffers
2316 				 * available and number of buffers needed to
2317 				 * reap this MPDU
2318 				 */
2319 				if (((msdu_desc_info.msdu_len /
2320 				     (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN) +
2321 				     1)) > num_entries_avail) {
2322 					DP_STATS_INC(
2323 						soc,
2324 						rx.msdu_scatter_wait_break,
2325 						1);
2326 					break;
2327 				}
2328 				is_prev_msdu_last = false;
2329 			}
2330 
2331 		}
2332 
2333 		core_id = smp_processor_id();
2334 		DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1);
2335 
2336 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
2337 			qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1);
2338 
2339 		if (qdf_unlikely(mpdu_desc_info.mpdu_flags &
2340 				 HAL_MPDU_F_RAW_AMPDU))
2341 			qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1);
2342 
2343 		if (!is_prev_msdu_last &&
2344 		    msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
2345 			is_prev_msdu_last = true;
2346 
2347 		/* Pop out the descriptor*/
2348 		hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2349 
2350 		rx_bufs_reaped[rx_desc->pool_id]++;
2351 		peer_mdata = mpdu_desc_info.peer_meta_data;
2352 		QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) =
2353 			DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
2354 		QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) =
2355 			DP_PEER_METADATA_VDEV_ID_GET(peer_mdata);
2356 
2357 		/*
2358 		 * save msdu flags first, last and continuation msdu in
2359 		 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
2360 		 * length to nbuf->cb. This ensures the info required for
2361 		 * per pkt processing is always in the same cache line.
2362 		 * This helps in improving throughput for smaller pkt
2363 		 * sizes.
2364 		 */
2365 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
2366 			qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
2367 
2368 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
2369 			qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
2370 
2371 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
2372 			qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
2373 
2374 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
2375 			qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1);
2376 
2377 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
2378 			qdf_nbuf_set_da_valid(rx_desc->nbuf, 1);
2379 
2380 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
2381 			qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1);
2382 
2383 		qdf_nbuf_set_tid_val(rx_desc->nbuf,
2384 				     HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc));
2385 
2386 		QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len;
2387 
2388 		QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num;
2389 
2390 		/*
2391 		 * move unmap after scattered msdu waiting break logic
2392 		 * in case double skb unmap happened.
2393 		 */
2394 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2395 		dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
2396 						  rx_desc_pool->buf_size,
2397 						  false);
2398 		qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
2399 					     QDF_DMA_FROM_DEVICE,
2400 					     rx_desc_pool->buf_size);
2401 		rx_desc->unmapped = 1;
2402 		DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head,
2403 				   ebuf_tail, rx_desc);
2404 		/*
2405 		 * if continuation bit is set then we have MSDU spread
2406 		 * across multiple buffers, let us not decrement quota
2407 		 * till we reap all buffers of that MSDU.
2408 		 */
2409 		if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)))
2410 			quota -= 1;
2411 
2412 		dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
2413 						&tail[rx_desc->pool_id],
2414 						rx_desc);
2415 
2416 		num_rx_bufs_reaped++;
2417 		/*
2418 		 * only if complete msdu is received for scatter case,
2419 		 * then allow break.
2420 		 */
2421 		if (is_prev_msdu_last &&
2422 		    dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped))
2423 			break;
2424 	}
2425 done:
2426 	dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl);
2427 
2428 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2429 		/*
2430 		 * continue with next mac_id if no pkts were reaped
2431 		 * from that pool
2432 		 */
2433 		if (!rx_bufs_reaped[mac_id])
2434 			continue;
2435 
2436 		dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2437 
2438 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
2439 
2440 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2441 					rx_desc_pool, rx_bufs_reaped[mac_id],
2442 					&head[mac_id], &tail[mac_id]);
2443 	}
2444 
2445 	dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]);
2446 	/* Peer can be NULL is case of LFR */
2447 	if (qdf_likely(peer))
2448 		vdev = NULL;
2449 
2450 	/*
2451 	 * BIG loop where each nbuf is dequeued from global queue,
2452 	 * processed and queued back on a per vdev basis. These nbufs
2453 	 * are sent to stack as and when we run out of nbufs
2454 	 * or a new nbuf dequeued from global queue has a different
2455 	 * vdev when compared to previous nbuf.
2456 	 */
2457 	nbuf = nbuf_head;
2458 	while (nbuf) {
2459 		next = nbuf->next;
2460 		if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) {
2461 			nbuf = next;
2462 			DP_STATS_INC(soc, rx.err.raw_frm_drop, 1);
2463 			continue;
2464 		}
2465 
2466 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
2467 		vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
2468 
2469 		if (deliver_list_head && vdev && (vdev->vdev_id != vdev_id)) {
2470 			dp_rx_deliver_to_stack(soc, vdev, peer,
2471 					       deliver_list_head,
2472 					       deliver_list_tail);
2473 			deliver_list_head = NULL;
2474 			deliver_list_tail = NULL;
2475 		}
2476 
2477 		/* Get TID from struct cb->tid_val, save to tid */
2478 		if (qdf_nbuf_is_rx_chfrag_start(nbuf))
2479 			tid = qdf_nbuf_get_tid_val(nbuf);
2480 
2481 		peer_id =  QDF_NBUF_CB_RX_PEER_ID(nbuf);
2482 
2483 		if (qdf_unlikely(!peer)) {
2484 			peer = dp_peer_find_by_id(soc, peer_id);
2485 		} else if (peer && peer->peer_id != peer_id) {
2486 			dp_peer_unref_del_find_by_id(peer);
2487 			peer = dp_peer_find_by_id(soc, peer_id);
2488 		}
2489 
2490 		if (peer) {
2491 			QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
2492 			qdf_dp_trace_set_track(nbuf, QDF_RX);
2493 			QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
2494 			QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
2495 				QDF_NBUF_RX_PKT_DATA_TRACK;
2496 		}
2497 
2498 		rx_bufs_used++;
2499 
2500 		if (qdf_likely(peer)) {
2501 			vdev = peer->vdev;
2502 		} else {
2503 			nbuf->next = NULL;
2504 			dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2505 			nbuf = next;
2506 			continue;
2507 		}
2508 
2509 		if (qdf_unlikely(!vdev)) {
2510 			qdf_nbuf_free(nbuf);
2511 			nbuf = next;
2512 			DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
2513 			continue;
2514 		}
2515 
2516 		rx_pdev = vdev->pdev;
2517 		DP_RX_TID_SAVE(nbuf, tid);
2518 		if (qdf_unlikely(rx_pdev->delay_stats_flag) ||
2519 		    qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled(
2520 				 soc->wlan_cfg_ctx)))
2521 			qdf_nbuf_set_timestamp(nbuf);
2522 
2523 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
2524 		tid_stats =
2525 			&rx_pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
2526 
2527 		/*
2528 		 * Check if DMA completed -- msdu_done is the last bit
2529 		 * to be written
2530 		 */
2531 		if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) &&
2532 				 !hal_rx_attn_msdu_done_get(rx_tlv_hdr))) {
2533 			dp_err("MSDU DONE failure");
2534 			DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
2535 			hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
2536 					     QDF_TRACE_LEVEL_INFO);
2537 			tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
2538 			qdf_nbuf_free(nbuf);
2539 			qdf_assert(0);
2540 			nbuf = next;
2541 			continue;
2542 		}
2543 
2544 		DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
2545 		/*
2546 		 * First IF condition:
2547 		 * 802.11 Fragmented pkts are reinjected to REO
2548 		 * HW block as SG pkts and for these pkts we only
2549 		 * need to pull the RX TLVS header length.
2550 		 * Second IF condition:
2551 		 * The below condition happens when an MSDU is spread
2552 		 * across multiple buffers. This can happen in two cases
2553 		 * 1. The nbuf size is smaller then the received msdu.
2554 		 *    ex: we have set the nbuf size to 2048 during
2555 		 *        nbuf_alloc. but we received an msdu which is
2556 		 *        2304 bytes in size then this msdu is spread
2557 		 *        across 2 nbufs.
2558 		 *
2559 		 * 2. AMSDUs when RAW mode is enabled.
2560 		 *    ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
2561 		 *        across 1st nbuf and 2nd nbuf and last MSDU is
2562 		 *        spread across 2nd nbuf and 3rd nbuf.
2563 		 *
2564 		 * for these scenarios let us create a skb frag_list and
2565 		 * append these buffers till the last MSDU of the AMSDU
2566 		 * Third condition:
2567 		 * This is the most likely case, we receive 802.3 pkts
2568 		 * decapsulated by HW, here we need to set the pkt length.
2569 		 */
2570 		hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata);
2571 		if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
2572 			bool is_mcbc, is_sa_vld, is_da_vld;
2573 
2574 			is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
2575 								 rx_tlv_hdr);
2576 			is_sa_vld =
2577 				hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
2578 								rx_tlv_hdr);
2579 			is_da_vld =
2580 				hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
2581 								rx_tlv_hdr);
2582 
2583 			qdf_nbuf_set_da_mcbc(nbuf, is_mcbc);
2584 			qdf_nbuf_set_da_valid(nbuf, is_da_vld);
2585 			qdf_nbuf_set_sa_valid(nbuf, is_sa_vld);
2586 
2587 			qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
2588 		} else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
2589 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
2590 			nbuf = dp_rx_sg_create(nbuf);
2591 			next = nbuf->next;
2592 
2593 			if (qdf_nbuf_is_raw_frame(nbuf)) {
2594 				DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
2595 				DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len);
2596 			} else {
2597 				qdf_nbuf_free(nbuf);
2598 				DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
2599 				dp_info_rl("scatter msdu len %d, dropped",
2600 					   msdu_len);
2601 				nbuf = next;
2602 				continue;
2603 			}
2604 		} else {
2605 
2606 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
2607 			pkt_len = msdu_len +
2608 				  msdu_metadata.l3_hdr_pad +
2609 				  RX_PKT_TLVS_LEN;
2610 
2611 			qdf_nbuf_set_pktlen(nbuf, pkt_len);
2612 			dp_rx_skip_tlvs(nbuf, msdu_metadata.l3_hdr_pad);
2613 		}
2614 
2615 		/*
2616 		 * process frame for mulitpass phrase processing
2617 		 */
2618 		if (qdf_unlikely(vdev->multipass_en)) {
2619 			if (dp_rx_multipass_process(peer, nbuf, tid) == false) {
2620 				DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1);
2621 				qdf_nbuf_free(nbuf);
2622 				nbuf = next;
2623 				continue;
2624 			}
2625 		}
2626 
2627 		if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
2628 			QDF_TRACE(QDF_MODULE_ID_DP,
2629 					QDF_TRACE_LEVEL_ERROR,
2630 					FL("Policy Check Drop pkt"));
2631 			tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
2632 			/* Drop & free packet */
2633 			qdf_nbuf_free(nbuf);
2634 			/* Statistics */
2635 			nbuf = next;
2636 			continue;
2637 		}
2638 
2639 		if (qdf_unlikely(peer && (peer->nawds_enabled) &&
2640 				 (qdf_nbuf_is_da_mcbc(nbuf)) &&
2641 				 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc,
2642 								rx_tlv_hdr) ==
2643 				  false))) {
2644 			tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
2645 			DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
2646 			qdf_nbuf_free(nbuf);
2647 			nbuf = next;
2648 			continue;
2649 		}
2650 
2651 		if (soc->process_rx_status)
2652 			dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
2653 
2654 		/* Update the protocol tag in SKB based on CCE metadata */
2655 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
2656 					  reo_ring_num, false, true);
2657 
2658 		/* Update the flow tag in SKB based on FSE metadata */
2659 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
2660 
2661 		dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer,
2662 					ring_id, tid_stats);
2663 
2664 		if (qdf_unlikely(vdev->mesh_vdev)) {
2665 			if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
2666 					== QDF_STATUS_SUCCESS) {
2667 				QDF_TRACE(QDF_MODULE_ID_DP,
2668 						QDF_TRACE_LEVEL_INFO_MED,
2669 						FL("mesh pkt filtered"));
2670 				tid_stats->fail_cnt[MESH_FILTER_DROP]++;
2671 				DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
2672 					     1);
2673 
2674 				qdf_nbuf_free(nbuf);
2675 				nbuf = next;
2676 				continue;
2677 			}
2678 			dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
2679 		}
2680 
2681 		if (qdf_likely(vdev->rx_decap_type ==
2682 			       htt_cmn_pkt_type_ethernet) &&
2683 		    qdf_likely(!vdev->mesh_vdev)) {
2684 			/* WDS Destination Address Learning */
2685 			dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf);
2686 
2687 			/* Due to HW issue, sometimes we see that the sa_idx
2688 			 * and da_idx are invalid with sa_valid and da_valid
2689 			 * bits set
2690 			 *
2691 			 * in this case we also see that value of
2692 			 * sa_sw_peer_id is set as 0
2693 			 *
2694 			 * Drop the packet if sa_idx and da_idx OOB or
2695 			 * sa_sw_peerid is 0
2696 			 */
2697 			if (!is_sa_da_idx_valid(soc, rx_tlv_hdr, nbuf,
2698 						msdu_metadata)) {
2699 				qdf_nbuf_free(nbuf);
2700 				nbuf = next;
2701 				DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
2702 				continue;
2703 			}
2704 			/* WDS Source Port Learning */
2705 			if (qdf_likely(vdev->wds_enabled))
2706 				dp_rx_wds_srcport_learn(soc,
2707 							rx_tlv_hdr,
2708 							peer,
2709 							nbuf,
2710 							msdu_metadata);
2711 
2712 			/* Intrabss-fwd */
2713 			if (dp_rx_check_ap_bridge(vdev))
2714 				if (dp_rx_intrabss_fwd(soc,
2715 							peer,
2716 							rx_tlv_hdr,
2717 							nbuf,
2718 							msdu_metadata)) {
2719 					nbuf = next;
2720 					tid_stats->intrabss_cnt++;
2721 					continue; /* Get next desc */
2722 				}
2723 		}
2724 
2725 		dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt);
2726 
2727 		DP_RX_LIST_APPEND(deliver_list_head,
2728 				  deliver_list_tail,
2729 				  nbuf);
2730 		DP_STATS_INC_PKT(peer, rx.to_stack, 1,
2731 				 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
2732 		if (qdf_unlikely(peer->in_twt))
2733 			DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1,
2734 					 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
2735 
2736 		tid_stats->delivered_to_stack++;
2737 		nbuf = next;
2738 	}
2739 
2740 	if (qdf_likely(deliver_list_head)) {
2741 		if (qdf_likely(peer))
2742 			dp_rx_deliver_to_stack(soc, vdev, peer,
2743 					       deliver_list_head,
2744 					       deliver_list_tail);
2745 		else {
2746 			nbuf = deliver_list_head;
2747 			while (nbuf) {
2748 				next = nbuf->next;
2749 				nbuf->next = NULL;
2750 				dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2751 				nbuf = next;
2752 			}
2753 		}
2754 	}
2755 
2756 	if (qdf_likely(peer))
2757 		dp_peer_unref_del_find_by_id(peer);
2758 
2759 	if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
2760 		if (quota) {
2761 			num_pending =
2762 				dp_rx_srng_get_num_pending(hal_soc,
2763 							   hal_ring_hdl,
2764 							   num_entries,
2765 							   &near_full);
2766 			if (num_pending) {
2767 				DP_STATS_INC(soc, rx.hp_oos2, 1);
2768 
2769 				if (!hif_exec_should_yield(scn, intr_id))
2770 					goto more_data;
2771 
2772 				if (qdf_unlikely(near_full)) {
2773 					DP_STATS_INC(soc, rx.near_full, 1);
2774 					goto more_data;
2775 				}
2776 			}
2777 		}
2778 
2779 		if (vdev && vdev->osif_fisa_flush)
2780 			vdev->osif_fisa_flush(soc, reo_ring_num);
2781 
2782 		if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) {
2783 			vdev->osif_gro_flush(vdev->osif_vdev,
2784 					     reo_ring_num);
2785 		}
2786 	}
2787 
2788 	/* Update histogram statistics by looping through pdev's */
2789 	DP_RX_HIST_STATS_PER_PDEV();
2790 
2791 	return rx_bufs_used; /* Assume no scale factor for now */
2792 }
2793 
2794 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
2795 {
2796 	QDF_STATUS ret;
2797 
2798 	if (vdev->osif_rx_flush) {
2799 		ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id);
2800 		if (!QDF_IS_STATUS_SUCCESS(ret)) {
2801 			dp_err("Failed to flush rx pkts for vdev %d\n",
2802 			       vdev->vdev_id);
2803 			return ret;
2804 		}
2805 	}
2806 
2807 	return QDF_STATUS_SUCCESS;
2808 }
2809 
2810 static QDF_STATUS
2811 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc,
2812 			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
2813 			   struct dp_pdev *dp_pdev,
2814 			   struct rx_desc_pool *rx_desc_pool)
2815 {
2816 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
2817 
2818 	(nbuf_frag_info_t->virt_addr).nbuf =
2819 		qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size,
2820 			       RX_BUFFER_RESERVATION,
2821 			       rx_desc_pool->buf_alignment, FALSE);
2822 	if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
2823 		dp_err("nbuf alloc failed");
2824 		DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
2825 		return ret;
2826 	}
2827 
2828 	ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
2829 					 (nbuf_frag_info_t->virt_addr).nbuf,
2830 					 QDF_DMA_FROM_DEVICE,
2831 					 rx_desc_pool->buf_size);
2832 
2833 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
2834 		qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf);
2835 		dp_err("nbuf map failed");
2836 		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
2837 		return ret;
2838 	}
2839 
2840 	nbuf_frag_info_t->paddr =
2841 		qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
2842 
2843 	ret = check_x86_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
2844 			      &nbuf_frag_info_t->paddr,
2845 			      rx_desc_pool);
2846 	if (ret == QDF_STATUS_E_FAILURE) {
2847 		qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
2848 					     (nbuf_frag_info_t->virt_addr).nbuf,
2849 					     QDF_DMA_FROM_DEVICE,
2850 					     rx_desc_pool->buf_size);
2851 		qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf);
2852 		dp_err("nbuf check x86 failed");
2853 		DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
2854 		return ret;
2855 	}
2856 
2857 	return QDF_STATUS_SUCCESS;
2858 }
2859 
2860 QDF_STATUS
2861 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
2862 			  struct dp_srng *dp_rxdma_srng,
2863 			  struct rx_desc_pool *rx_desc_pool,
2864 			  uint32_t num_req_buffers)
2865 {
2866 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
2867 	hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng;
2868 	union dp_rx_desc_list_elem_t *next;
2869 	void *rxdma_ring_entry;
2870 	qdf_dma_addr_t paddr;
2871 	struct dp_rx_nbuf_frag_info *nf_info;
2872 	uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0;
2873 	uint32_t buffer_index, nbuf_ptrs_per_page;
2874 	qdf_nbuf_t nbuf;
2875 	QDF_STATUS ret;
2876 	int page_idx, total_pages;
2877 	union dp_rx_desc_list_elem_t *desc_list = NULL;
2878 	union dp_rx_desc_list_elem_t *tail = NULL;
2879 	int sync_hw_ptr = 1;
2880 	uint32_t num_entries_avail;
2881 
2882 	if (qdf_unlikely(!rxdma_srng)) {
2883 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
2884 		return QDF_STATUS_E_FAILURE;
2885 	}
2886 
2887 	dp_debug("requested %u RX buffers for driver attach", num_req_buffers);
2888 
2889 	hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
2890 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
2891 						   rxdma_srng,
2892 						   sync_hw_ptr);
2893 	hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
2894 
2895 	if (!num_entries_avail) {
2896 		dp_err("Num of available entries is zero, nothing to do");
2897 		return QDF_STATUS_E_NOMEM;
2898 	}
2899 
2900 	if (num_entries_avail < num_req_buffers)
2901 		num_req_buffers = num_entries_avail;
2902 
2903 	nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool,
2904 					    num_req_buffers, &desc_list, &tail);
2905 	if (!nr_descs) {
2906 		dp_err("no free rx_descs in freelist");
2907 		DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
2908 		return QDF_STATUS_E_NOMEM;
2909 	}
2910 
2911 	dp_debug("got %u RX descs for driver attach", nr_descs);
2912 
2913 	/*
2914 	 * Try to allocate pointers to the nbuf one page at a time.
2915 	 * Take pointers that can fit in one page of memory and
2916 	 * iterate through the total descriptors that need to be
2917 	 * allocated in order of pages. Reuse the pointers that
2918 	 * have been allocated to fit in one page across each
2919 	 * iteration to index into the nbuf.
2920 	 */
2921 	total_pages = (nr_descs * sizeof(*nf_info)) / PAGE_SIZE;
2922 
2923 	/*
2924 	 * Add an extra page to store the remainder if any
2925 	 */
2926 	if ((nr_descs * sizeof(*nf_info)) % PAGE_SIZE)
2927 		total_pages++;
2928 	nf_info = qdf_mem_malloc(PAGE_SIZE);
2929 	if (!nf_info) {
2930 		dp_err("failed to allocate nbuf array");
2931 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
2932 		QDF_BUG(0);
2933 		return QDF_STATUS_E_NOMEM;
2934 	}
2935 	nbuf_ptrs_per_page = PAGE_SIZE / sizeof(*nf_info);
2936 
2937 	for (page_idx = 0; page_idx < total_pages; page_idx++) {
2938 		qdf_mem_zero(nf_info, PAGE_SIZE);
2939 
2940 		for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) {
2941 			/*
2942 			 * The last page of buffer pointers may not be required
2943 			 * completely based on the number of descriptors. Below
2944 			 * check will ensure we are allocating only the
2945 			 * required number of descriptors.
2946 			 */
2947 			if (nr_nbuf_total >= nr_descs)
2948 				break;
2949 			/* Flag is set while pdev rx_desc_pool initialization */
2950 			if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
2951 				ret = dp_pdev_frag_alloc_and_map(dp_soc,
2952 						&nf_info[nr_nbuf], dp_pdev,
2953 						rx_desc_pool);
2954 			else
2955 				ret = dp_pdev_nbuf_alloc_and_map(dp_soc,
2956 						&nf_info[nr_nbuf], dp_pdev,
2957 						rx_desc_pool);
2958 			if (QDF_IS_STATUS_ERROR(ret))
2959 				break;
2960 
2961 			nr_nbuf_total++;
2962 		}
2963 
2964 		hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
2965 
2966 		for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) {
2967 			rxdma_ring_entry =
2968 				hal_srng_src_get_next(dp_soc->hal_soc,
2969 						      rxdma_srng);
2970 			qdf_assert_always(rxdma_ring_entry);
2971 
2972 			next = desc_list->next;
2973 			paddr = nf_info[buffer_index].paddr;
2974 			nbuf = nf_info[buffer_index].virt_addr.nbuf;
2975 
2976 			/* Flag is set while pdev rx_desc_pool initialization */
2977 			if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
2978 				dp_rx_desc_frag_prep(&desc_list->rx_desc,
2979 						     &nf_info[buffer_index]);
2980 			else
2981 				dp_rx_desc_prep(&desc_list->rx_desc,
2982 						&nf_info[buffer_index]);
2983 			desc_list->rx_desc.in_use = 1;
2984 			dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc);
2985 			dp_rx_desc_update_dbg_info(&desc_list->rx_desc,
2986 						   __func__,
2987 						   RX_DESC_REPLENISHED);
2988 
2989 			hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
2990 						     desc_list->rx_desc.cookie,
2991 						     rx_desc_pool->owner);
2992 			dp_ipa_handle_rx_buf_smmu_mapping(
2993 						dp_soc, nbuf,
2994 						rx_desc_pool->buf_size,
2995 						true);
2996 
2997 			desc_list = next;
2998 		}
2999 
3000 		hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
3001 	}
3002 
3003 	dp_info("filled %u RX buffers for driver attach", nr_nbuf_total);
3004 	qdf_mem_free(nf_info);
3005 
3006 	if (!nr_nbuf_total) {
3007 		dp_err("No nbuf's allocated");
3008 		QDF_BUG(0);
3009 		return QDF_STATUS_E_RESOURCES;
3010 	}
3011 
3012 	/* No need to count the number of bytes received during replenish.
3013 	 * Therefore set replenish.pkts.bytes as 0.
3014 	 */
3015 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0);
3016 
3017 	return QDF_STATUS_SUCCESS;
3018 }
3019 
3020 /**
3021  * dp_rx_enable_mon_dest_frag() - Enable frag processing for
3022  *              monitor destination ring via frag.
3023  *
3024  * Enable this flag only for monitor destination buffer processing
3025  * if DP_RX_MON_MEM_FRAG feature is enabled.
3026  * If flag is set then frag based function will be called for alloc,
3027  * map, prep desc and free ops for desc buffer else normal nbuf based
3028  * function will be called.
3029  *
3030  * @rx_desc_pool: Rx desc pool
3031  * @is_mon_dest_desc: Is it for monitor dest buffer
3032  *
3033  * Return: None
3034  */
3035 #ifdef DP_RX_MON_MEM_FRAG
3036 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
3037 				bool is_mon_dest_desc)
3038 {
3039 	rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc;
3040 }
3041 #else
3042 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
3043 				bool is_mon_dest_desc)
3044 {
3045 	rx_desc_pool->rx_mon_dest_frag_enable = false;
3046 }
3047 #endif
3048 
3049 /*
3050  * dp_rx_pdev_desc_pool_alloc() -  allocate memory for software rx descriptor
3051  *				   pool
3052  *
3053  * @pdev: core txrx pdev context
3054  *
3055  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
3056  *			QDF_STATUS_E_NOMEM
3057  */
3058 QDF_STATUS
3059 dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev)
3060 {
3061 	struct dp_soc *soc = pdev->soc;
3062 	uint32_t rxdma_entries;
3063 	uint32_t rx_sw_desc_num;
3064 	struct dp_srng *dp_rxdma_srng;
3065 	struct rx_desc_pool *rx_desc_pool;
3066 	uint32_t status = QDF_STATUS_SUCCESS;
3067 	int mac_for_pdev;
3068 
3069 	mac_for_pdev = pdev->lmac_id;
3070 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
3071 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3072 			  "nss-wifi<4> skip Rx refil %d", mac_for_pdev);
3073 		return status;
3074 	}
3075 
3076 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
3077 	rxdma_entries = dp_rxdma_srng->num_entries;
3078 
3079 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3080 	rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
3081 
3082 	status = dp_rx_desc_pool_alloc(soc,
3083 				       rx_sw_desc_num,
3084 				       rx_desc_pool);
3085 	if (status != QDF_STATUS_SUCCESS)
3086 		return status;
3087 
3088 	return status;
3089 }
3090 
3091 /*
3092  * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool
3093  *
3094  * @pdev: core txrx pdev context
3095  */
3096 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev)
3097 {
3098 	int mac_for_pdev = pdev->lmac_id;
3099 	struct dp_soc *soc = pdev->soc;
3100 	struct rx_desc_pool *rx_desc_pool;
3101 
3102 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3103 
3104 	dp_rx_desc_pool_free(soc, rx_desc_pool);
3105 }
3106 
3107 /*
3108  * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors
3109  *
3110  * @pdev: core txrx pdev context
3111  *
3112  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
3113  *			QDF_STATUS_E_NOMEM
3114  */
3115 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev)
3116 {
3117 	int mac_for_pdev = pdev->lmac_id;
3118 	struct dp_soc *soc = pdev->soc;
3119 	uint32_t rxdma_entries;
3120 	uint32_t rx_sw_desc_num;
3121 	struct dp_srng *dp_rxdma_srng;
3122 	struct rx_desc_pool *rx_desc_pool;
3123 
3124 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
3125 		/**
3126 		 * If NSS is enabled, rx_desc_pool is already filled.
3127 		 * Hence, just disable desc_pool frag flag.
3128 		 */
3129 		rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3130 		dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
3131 
3132 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3133 			  "nss-wifi<4> skip Rx refil %d", mac_for_pdev);
3134 		return QDF_STATUS_SUCCESS;
3135 	}
3136 
3137 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3138 	if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM)
3139 		return QDF_STATUS_E_NOMEM;
3140 
3141 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
3142 	rxdma_entries = dp_rxdma_srng->num_entries;
3143 
3144 	soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
3145 
3146 	rx_sw_desc_num =
3147 	wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
3148 
3149 	rx_desc_pool->owner = DP_WBM2SW_RBM;
3150 	rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
3151 	rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
3152 	/* Disable monitor dest processing via frag */
3153 	dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
3154 
3155 	dp_rx_desc_pool_init(soc, mac_for_pdev,
3156 			     rx_sw_desc_num, rx_desc_pool);
3157 	return QDF_STATUS_SUCCESS;
3158 }
3159 
3160 /*
3161  * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools
3162  * @pdev: core txrx pdev context
3163  *
3164  * This function resets the freelist of rx descriptors and destroys locks
3165  * associated with this list of descriptors.
3166  */
3167 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev)
3168 {
3169 	int mac_for_pdev = pdev->lmac_id;
3170 	struct dp_soc *soc = pdev->soc;
3171 	struct rx_desc_pool *rx_desc_pool;
3172 
3173 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3174 
3175 	dp_rx_desc_pool_deinit(soc, rx_desc_pool);
3176 }
3177 
3178 /*
3179  * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring
3180  *
3181  * @pdev: core txrx pdev context
3182  *
3183  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
3184  *			QDF_STATUS_E_NOMEM
3185  */
3186 QDF_STATUS
3187 dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev)
3188 {
3189 	int mac_for_pdev = pdev->lmac_id;
3190 	struct dp_soc *soc = pdev->soc;
3191 	struct dp_srng *dp_rxdma_srng;
3192 	struct rx_desc_pool *rx_desc_pool;
3193 	uint32_t rxdma_entries;
3194 
3195 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
3196 	rxdma_entries = dp_rxdma_srng->num_entries;
3197 
3198 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3199 
3200 	/* Initialize RX buffer pool which will be
3201 	 * used during low memory conditions
3202 	 */
3203 	dp_rx_buffer_pool_init(soc, mac_for_pdev);
3204 
3205 	return dp_pdev_rx_buffers_attach(soc, mac_for_pdev, dp_rxdma_srng,
3206 					 rx_desc_pool, rxdma_entries - 1);
3207 }
3208 
3209 /*
3210  * dp_rx_pdev_buffers_free - Free nbufs (skbs)
3211  *
3212  * @pdev: core txrx pdev context
3213  */
3214 void
3215 dp_rx_pdev_buffers_free(struct dp_pdev *pdev)
3216 {
3217 	int mac_for_pdev = pdev->lmac_id;
3218 	struct dp_soc *soc = pdev->soc;
3219 	struct rx_desc_pool *rx_desc_pool;
3220 
3221 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3222 
3223 	dp_rx_desc_nbuf_free(soc, rx_desc_pool);
3224 	dp_rx_buffer_pool_deinit(soc, mac_for_pdev);
3225 }
3226 
3227 #ifdef DP_RX_SPECIAL_FRAME_NEED
3228 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
3229 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
3230 				 uint8_t *rx_tlv_hdr)
3231 {
3232 	uint32_t l2_hdr_offset = 0;
3233 	uint16_t msdu_len = 0;
3234 	uint32_t skip_len;
3235 
3236 	l2_hdr_offset =
3237 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
3238 
3239 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
3240 		skip_len = l2_hdr_offset;
3241 	} else {
3242 		msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
3243 		skip_len = l2_hdr_offset + RX_PKT_TLVS_LEN;
3244 		qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
3245 	}
3246 
3247 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
3248 	dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
3249 	qdf_nbuf_pull_head(nbuf, skip_len);
3250 
3251 	if (dp_rx_is_special_frame(nbuf, frame_mask)) {
3252 		qdf_nbuf_set_exc_frame(nbuf, 1);
3253 		dp_rx_deliver_to_stack(soc, peer->vdev, peer,
3254 				       nbuf, NULL);
3255 		return true;
3256 	}
3257 
3258 	return false;
3259 }
3260 #endif
3261