xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_rx.c (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "cdp_txrx_cmn_struct.h"
21 #include "hal_hw_headers.h"
22 #include "dp_types.h"
23 #include "dp_rx.h"
24 #include "dp_tx.h"
25 #include "dp_be_rx.h"
26 #include "dp_peer.h"
27 #include "hal_rx.h"
28 #include "hal_be_rx.h"
29 #include "hal_api.h"
30 #include "hal_be_api.h"
31 #include "qdf_nbuf.h"
32 #include "hal_be_rx_tlv.h"
33 #ifdef MESH_MODE_SUPPORT
34 #include "if_meta_hdr.h"
35 #endif
36 #include "dp_internal.h"
37 #include "dp_ipa.h"
38 #ifdef FEATURE_WDS
39 #include "dp_txrx_wds.h"
40 #endif
41 #include "dp_hist.h"
42 #include "dp_rx_buffer_pool.h"
43 
44 #ifndef AST_OFFLOAD_ENABLE
45 static void
46 dp_rx_wds_learn(struct dp_soc *soc,
47 		struct dp_vdev *vdev,
48 		uint8_t *rx_tlv_hdr,
49 		struct dp_txrx_peer *txrx_peer,
50 		qdf_nbuf_t nbuf,
51 		struct hal_rx_msdu_metadata msdu_metadata)
52 {
53 	/* WDS Source Port Learning */
54 	if (qdf_likely(vdev->wds_enabled))
55 		dp_rx_wds_srcport_learn(soc,
56 				rx_tlv_hdr,
57 				txrx_peer,
58 				nbuf,
59 				msdu_metadata);
60 }
61 #else
62 #ifdef QCA_SUPPORT_WDS_EXTENDED
63 /**
64  * dp_wds_ext_peer_learn_be() - function to send event to control
65  * path on receiving 1st 4-address frame from backhaul.
66  * @soc: DP soc
67  * @ta_txrx_peer: WDS repeater txrx peer
68  * @rx_tlv_hdr  : start address of rx tlvs
69  * @nbuf: RX packet buffer
70  *
71  * Return: void
72  */
73 static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc,
74 					    struct dp_txrx_peer *ta_txrx_peer,
75 					    uint8_t *rx_tlv_hdr,
76 					    qdf_nbuf_t nbuf)
77 {
78 	uint8_t wds_ext_src_mac[QDF_MAC_ADDR_SIZE];
79 	struct dp_peer *ta_base_peer;
80 
81 	/* instead of checking addr4 is valid or not in per packet path
82 	 * check for init bit, which will be set on reception of
83 	 * first addr4 valid packet.
84 	 */
85 	if (!ta_txrx_peer->vdev->wds_ext_enabled ||
86 	    qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT,
87 				&ta_txrx_peer->wds_ext.init))
88 		return;
89 
90 	if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
91 	    hal_rx_get_mpdu_mac_ad4_valid_be(rx_tlv_hdr)) {
92 		qdf_atomic_test_and_set_bit(WDS_EXT_PEER_INIT_BIT,
93 					    &ta_txrx_peer->wds_ext.init);
94 
95 		ta_base_peer = dp_peer_get_ref_by_id(soc, ta_txrx_peer->peer_id,
96 						     DP_MOD_ID_RX);
97 
98 		if (!ta_base_peer)
99 			return;
100 
101 		qdf_mem_copy(wds_ext_src_mac, &ta_base_peer->mac_addr.raw[0],
102 			     QDF_MAC_ADDR_SIZE);
103 		dp_peer_unref_delete(ta_base_peer, DP_MOD_ID_RX);
104 
105 		soc->cdp_soc.ol_ops->rx_wds_ext_peer_learn(
106 						soc->ctrl_psoc,
107 						ta_txrx_peer->peer_id,
108 						ta_txrx_peer->vdev->vdev_id,
109 						wds_ext_src_mac);
110 	}
111 }
112 #else
113 static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc,
114 					    struct dp_txrx_peer *ta_txrx_peer,
115 					    uint8_t *rx_tlv_hdr,
116 					    qdf_nbuf_t nbuf)
117 {
118 }
119 #endif
120 static void
121 dp_rx_wds_learn(struct dp_soc *soc,
122 		struct dp_vdev *vdev,
123 		uint8_t *rx_tlv_hdr,
124 		struct dp_txrx_peer *ta_txrx_peer,
125 		qdf_nbuf_t nbuf,
126 		struct hal_rx_msdu_metadata msdu_metadata)
127 {
128 	dp_wds_ext_peer_learn_be(soc, ta_txrx_peer, rx_tlv_hdr, nbuf);
129 }
130 #endif
131 
132 #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO)
133 static inline void
134 dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
135 {
136 	uint8_t lmac_id;
137 
138 	lmac_id = dp_rx_peer_metadata_lmac_id_get_be(peer_mdata);
139 	qdf_nbuf_set_lmac_id(nbuf, lmac_id);
140 }
141 #else
142 static inline void
143 dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
144 {
145 }
146 #endif
147 
148 /**
149  * dp_rx_process_be() - Brain of the Rx processing functionality
150  *		     Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
151  * @int_ctx: per interrupt context
152  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
153  * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
154  * @quota: No. of units (packets) that can be serviced in one shot.
155  *
156  * This function implements the core of Rx functionality. This is
157  * expected to handle only non-error frames.
158  *
159  * Return: uint32_t: No. of elements processed
160  */
161 uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
162 			  hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
163 			  uint32_t quota)
164 {
165 	hal_ring_desc_t ring_desc;
166 	hal_ring_desc_t last_prefetched_hw_desc;
167 	hal_soc_handle_t hal_soc;
168 	struct dp_rx_desc *rx_desc = NULL;
169 	struct dp_rx_desc *last_prefetched_sw_desc = NULL;
170 	qdf_nbuf_t nbuf, next;
171 	bool near_full;
172 	union dp_rx_desc_list_elem_t *head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT];
173 	union dp_rx_desc_list_elem_t *tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT];
174 	uint32_t num_pending = 0;
175 	uint32_t rx_bufs_used = 0, rx_buf_cookie;
176 	uint16_t msdu_len = 0;
177 	uint16_t peer_id;
178 	uint8_t vdev_id;
179 	struct dp_txrx_peer *txrx_peer;
180 	dp_txrx_ref_handle txrx_ref_handle = NULL;
181 	struct dp_vdev *vdev;
182 	uint32_t pkt_len = 0;
183 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
184 	struct hal_rx_msdu_desc_info msdu_desc_info;
185 	enum hal_reo_error_status error;
186 	uint32_t peer_mdata;
187 	uint8_t *rx_tlv_hdr;
188 	uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT];
189 	uint8_t mac_id = 0;
190 	struct dp_pdev *rx_pdev;
191 	bool enh_flag;
192 	struct dp_srng *dp_rxdma_srng;
193 	struct rx_desc_pool *rx_desc_pool;
194 	struct dp_soc *soc = int_ctx->soc;
195 	struct cdp_tid_rx_stats *tid_stats;
196 	qdf_nbuf_t nbuf_head;
197 	qdf_nbuf_t nbuf_tail;
198 	qdf_nbuf_t deliver_list_head;
199 	qdf_nbuf_t deliver_list_tail;
200 	uint32_t num_rx_bufs_reaped = 0;
201 	uint32_t intr_id;
202 	struct hif_opaque_softc *scn;
203 	int32_t tid = 0;
204 	bool is_prev_msdu_last = true;
205 	uint32_t num_entries_avail = 0;
206 	uint32_t rx_ol_pkt_cnt = 0;
207 	uint32_t num_entries = 0;
208 	struct hal_rx_msdu_metadata msdu_metadata;
209 	QDF_STATUS status;
210 	qdf_nbuf_t ebuf_head;
211 	qdf_nbuf_t ebuf_tail;
212 	uint8_t pkt_capture_offload = 0;
213 	struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num];
214 	int max_reap_limit, ring_near_full;
215 	struct dp_soc *replenish_soc;
216 	uint8_t chip_id;
217 	uint64_t current_time = 0;
218 	uint32_t old_tid;
219 	uint32_t peer_ext_stats;
220 	uint32_t dsf;
221 
222 	DP_HIST_INIT();
223 
224 	qdf_assert_always(soc && hal_ring_hdl);
225 	hal_soc = soc->hal_soc;
226 	qdf_assert_always(hal_soc);
227 
228 	scn = soc->hif_handle;
229 	intr_id = int_ctx->dp_intr_id;
230 	num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl);
231 	dp_runtime_pm_mark_last_busy(soc);
232 
233 more_data:
234 	/* reset local variables here to be re-used in the function */
235 	nbuf_head = NULL;
236 	nbuf_tail = NULL;
237 	deliver_list_head = NULL;
238 	deliver_list_tail = NULL;
239 	txrx_peer = NULL;
240 	vdev = NULL;
241 	num_rx_bufs_reaped = 0;
242 	ebuf_head = NULL;
243 	ebuf_tail = NULL;
244 	ring_near_full = 0;
245 	max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
246 
247 	qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped));
248 	qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
249 	qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
250 	qdf_mem_zero(head, sizeof(head));
251 	qdf_mem_zero(tail, sizeof(tail));
252 	old_tid = 0xff;
253 	dsf = 0;
254 	peer_ext_stats = 0;
255 	rx_pdev = NULL;
256 	tid_stats = NULL;
257 
258 	dp_pkt_get_timestamp(&current_time);
259 
260 	ring_near_full = _dp_srng_test_and_update_nf_params(soc, rx_ring,
261 							    &max_reap_limit);
262 
263 	peer_ext_stats = wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
264 	if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
265 		/*
266 		 * Need API to convert from hal_ring pointer to
267 		 * Ring Type / Ring Id combo
268 		 */
269 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
270 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
271 			  FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
272 		goto done;
273 	}
274 
275 	hal_srng_update_ring_usage_wm_no_lock(soc->hal_soc, hal_ring_hdl);
276 
277 	if (!num_pending)
278 		num_pending = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
279 
280 	if (num_pending > quota)
281 		num_pending = quota;
282 
283 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_pending);
284 	last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc,
285 							    hal_ring_hdl,
286 							    num_pending);
287 	/*
288 	 * start reaping the buffers from reo ring and queue
289 	 * them in per vdev queue.
290 	 * Process the received pkts in a different per vdev loop.
291 	 */
292 	while (qdf_likely(num_pending)) {
293 		ring_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
294 
295 		if (qdf_unlikely(!ring_desc))
296 			break;
297 
298 		error = HAL_RX_ERROR_STATUS_GET(ring_desc);
299 
300 		if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
301 			dp_rx_err("%pK: HAL RING 0x%pK:error %d",
302 				  soc, hal_ring_hdl, error);
303 			DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num],
304 				     1);
305 			/* Don't know how to deal with this -- assert */
306 			qdf_assert(0);
307 		}
308 
309 		dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc);
310 		rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
311 		status = dp_rx_cookie_check_and_invalidate(ring_desc);
312 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
313 			DP_STATS_INC(soc, rx.err.stale_cookie, 1);
314 			break;
315 		}
316 
317 		rx_desc = (struct dp_rx_desc *)
318 				hal_rx_get_reo_desc_va(ring_desc);
319 		dp_rx_desc_sw_cc_check(soc, rx_buf_cookie, &rx_desc);
320 
321 		status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl,
322 					   ring_desc, rx_desc);
323 		if (QDF_IS_STATUS_ERROR(status)) {
324 			if (qdf_unlikely(rx_desc && rx_desc->nbuf)) {
325 				qdf_assert_always(!rx_desc->unmapped);
326 				dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
327 				rx_desc->unmapped = 1;
328 				dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
329 							    rx_desc->pool_id);
330 				dp_rx_add_to_free_desc_list(
331 					&head[rx_desc->chip_id][rx_desc->pool_id],
332 					&tail[rx_desc->chip_id][rx_desc->pool_id],
333 					rx_desc);
334 			}
335 			hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
336 			continue;
337 		}
338 
339 		/*
340 		 * this is a unlikely scenario where the host is reaping
341 		 * a descriptor which it already reaped just a while ago
342 		 * but is yet to replenish it back to HW.
343 		 * In this case host will dump the last 128 descriptors
344 		 * including the software descriptor rx_desc and assert.
345 		 */
346 
347 		if (qdf_unlikely(!rx_desc->in_use)) {
348 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
349 			dp_info_rl("Reaping rx_desc not in use!");
350 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
351 						   ring_desc, rx_desc);
352 			/* ignore duplicate RX desc and continue to process */
353 			/* Pop out the descriptor */
354 			hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
355 			continue;
356 		}
357 
358 		status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc);
359 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
360 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
361 			dp_info_rl("Nbuf sanity check failure!");
362 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
363 						   ring_desc, rx_desc);
364 			rx_desc->in_err_state = 1;
365 			hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
366 			continue;
367 		}
368 
369 		if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) {
370 			dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie);
371 			DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1);
372 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
373 						   ring_desc, rx_desc);
374 		}
375 
376 		/* Get MPDU DESC info */
377 		hal_rx_mpdu_desc_info_get_be(ring_desc, &mpdu_desc_info);
378 
379 		/* Get MSDU DESC info */
380 		hal_rx_msdu_desc_info_get_be(ring_desc, &msdu_desc_info);
381 
382 		if (qdf_unlikely(msdu_desc_info.msdu_flags &
383 				 HAL_MSDU_F_MSDU_CONTINUATION)) {
384 			/* previous msdu has end bit set, so current one is
385 			 * the new MPDU
386 			 */
387 			if (is_prev_msdu_last) {
388 				/* Get number of entries available in HW ring */
389 				num_entries_avail =
390 				hal_srng_dst_num_valid(hal_soc,
391 						       hal_ring_hdl, 1);
392 
393 				/* For new MPDU check if we can read complete
394 				 * MPDU by comparing the number of buffers
395 				 * available and number of buffers needed to
396 				 * reap this MPDU
397 				 */
398 				if ((msdu_desc_info.msdu_len /
399 				     (RX_DATA_BUFFER_SIZE -
400 				      soc->rx_pkt_tlv_size) + 1) >
401 				    num_pending) {
402 					DP_STATS_INC(soc,
403 						     rx.msdu_scatter_wait_break,
404 						     1);
405 					dp_rx_cookie_reset_invalid_bit(
406 								     ring_desc);
407 					/* As we are going to break out of the
408 					 * loop because of unavailability of
409 					 * descs to form complete SG, we need to
410 					 * reset the TP in the REO destination
411 					 * ring.
412 					 */
413 					hal_srng_dst_dec_tp(hal_soc,
414 							    hal_ring_hdl);
415 					break;
416 				}
417 				is_prev_msdu_last = false;
418 			}
419 		}
420 
421 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
422 			qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1);
423 
424 		if (qdf_unlikely(mpdu_desc_info.mpdu_flags &
425 				 HAL_MPDU_F_RAW_AMPDU))
426 			qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1);
427 
428 		if (!is_prev_msdu_last &&
429 		    msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
430 			is_prev_msdu_last = true;
431 
432 		rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
433 
434 		peer_mdata = mpdu_desc_info.peer_meta_data;
435 		QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) =
436 			dp_rx_peer_metadata_peer_id_get_be(soc, peer_mdata);
437 		QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) =
438 			dp_rx_peer_metadata_vdev_id_get_be(soc, peer_mdata);
439 		dp_rx_set_msdu_lmac_id(rx_desc->nbuf, peer_mdata);
440 
441 		/* to indicate whether this msdu is rx offload */
442 		pkt_capture_offload =
443 			DP_PEER_METADATA_OFFLOAD_GET_BE(peer_mdata);
444 
445 		/*
446 		 * save msdu flags first, last and continuation msdu in
447 		 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
448 		 * length to nbuf->cb. This ensures the info required for
449 		 * per pkt processing is always in the same cache line.
450 		 * This helps in improving throughput for smaller pkt
451 		 * sizes.
452 		 */
453 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
454 			qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
455 
456 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
457 			qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
458 
459 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
460 			qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
461 
462 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
463 			qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1);
464 
465 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
466 			qdf_nbuf_set_da_valid(rx_desc->nbuf, 1);
467 
468 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
469 			qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1);
470 
471 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_INTRA_BSS)
472 			qdf_nbuf_set_intra_bss(rx_desc->nbuf, 1);
473 
474 		if (qdf_likely(mpdu_desc_info.mpdu_flags &
475 			       HAL_MPDU_F_QOS_CONTROL_VALID))
476 			qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid);
477 
478 		/* set sw exception */
479 		qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt(
480 				rx_desc->nbuf,
481 				hal_rx_sw_exception_get_be(ring_desc));
482 
483 		QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len;
484 
485 		QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num;
486 
487 		/*
488 		 * move unmap after scattered msdu waiting break logic
489 		 * in case double skb unmap happened.
490 		 */
491 		dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
492 		rx_desc->unmapped = 1;
493 		DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head,
494 				   ebuf_tail, rx_desc);
495 
496 		quota -= 1;
497 		num_pending -= 1;
498 
499 		dp_rx_add_to_free_desc_list
500 			(&head[rx_desc->chip_id][rx_desc->pool_id],
501 			 &tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc);
502 		num_rx_bufs_reaped++;
503 
504 		dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(soc, hal_soc,
505 					       num_pending,
506 					       hal_ring_hdl,
507 					       &last_prefetched_hw_desc,
508 					       &last_prefetched_sw_desc);
509 
510 		/*
511 		 * only if complete msdu is received for scatter case,
512 		 * then allow break.
513 		 */
514 		if (is_prev_msdu_last &&
515 		    dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped,
516 						  max_reap_limit))
517 			break;
518 	}
519 done:
520 	dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl);
521 	qdf_dsb();
522 
523 	dp_rx_per_core_stats_update(soc, reo_ring_num, num_rx_bufs_reaped);
524 
525 	for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
526 		for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
527 			/*
528 			 * continue with next mac_id if no pkts were reaped
529 			 * from that pool
530 			 */
531 			if (!rx_bufs_reaped[chip_id][mac_id])
532 				continue;
533 
534 			replenish_soc = dp_rx_replensih_soc_get(soc, chip_id);
535 
536 			dp_rxdma_srng =
537 				&replenish_soc->rx_refill_buf_ring[mac_id];
538 
539 			rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id];
540 
541 			dp_rx_buffers_replenish_simple(replenish_soc, mac_id,
542 					       dp_rxdma_srng,
543 					       rx_desc_pool,
544 					       rx_bufs_reaped[chip_id][mac_id],
545 					       &head[chip_id][mac_id],
546 					       &tail[chip_id][mac_id]);
547 		}
548 	}
549 
550 	/* Peer can be NULL is case of LFR */
551 	if (qdf_likely(txrx_peer))
552 		vdev = NULL;
553 
554 	/*
555 	 * BIG loop where each nbuf is dequeued from global queue,
556 	 * processed and queued back on a per vdev basis. These nbufs
557 	 * are sent to stack as and when we run out of nbufs
558 	 * or a new nbuf dequeued from global queue has a different
559 	 * vdev when compared to previous nbuf.
560 	 */
561 	nbuf = nbuf_head;
562 	while (nbuf) {
563 		next = nbuf->next;
564 		dp_rx_prefetch_nbuf_data_be(nbuf, next);
565 		if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) {
566 			nbuf = next;
567 			DP_STATS_INC(soc, rx.err.raw_frm_drop, 1);
568 			continue;
569 		}
570 
571 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
572 		vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
573 		peer_id =  QDF_NBUF_CB_RX_PEER_ID(nbuf);
574 
575 		if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer,
576 					peer_id, vdev_id)) {
577 			dp_rx_deliver_to_stack(soc, vdev, txrx_peer,
578 					       deliver_list_head,
579 					       deliver_list_tail);
580 			deliver_list_head = NULL;
581 			deliver_list_tail = NULL;
582 		}
583 
584 		/* Get TID from struct cb->tid_val, save to tid */
585 		tid = qdf_nbuf_get_tid_val(nbuf);
586 
587 		if (qdf_unlikely(!txrx_peer)) {
588 			txrx_peer = dp_rx_get_txrx_peer_and_vdev(soc, nbuf,
589 								 peer_id,
590 								 &txrx_ref_handle,
591 								 pkt_capture_offload,
592 								 &vdev,
593 								 &rx_pdev, &dsf,
594 								 &old_tid);
595 			if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) {
596 				nbuf = next;
597 				continue;
598 			}
599 			enh_flag = rx_pdev->enhanced_stats_en;
600 		} else if (txrx_peer && txrx_peer->peer_id != peer_id) {
601 			dp_txrx_peer_unref_delete(txrx_ref_handle,
602 						  DP_MOD_ID_RX);
603 
604 			txrx_peer = dp_rx_get_txrx_peer_and_vdev(soc, nbuf,
605 								 peer_id,
606 								 &txrx_ref_handle,
607 								 pkt_capture_offload,
608 								 &vdev,
609 								 &rx_pdev, &dsf,
610 								 &old_tid);
611 			if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) {
612 				nbuf = next;
613 				continue;
614 			}
615 			enh_flag = rx_pdev->enhanced_stats_en;
616 		}
617 
618 		if (txrx_peer) {
619 			QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
620 			qdf_dp_trace_set_track(nbuf, QDF_RX);
621 			QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
622 			QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
623 				QDF_NBUF_RX_PKT_DATA_TRACK;
624 		}
625 
626 		rx_bufs_used++;
627 
628 		/* when hlos tid override is enabled, save tid in
629 		 * skb->priority
630 		 */
631 		if (qdf_unlikely(vdev->skip_sw_tid_classification &
632 					DP_TXRX_HLOS_TID_OVERRIDE_ENABLED))
633 			qdf_nbuf_set_priority(nbuf, tid);
634 
635 		DP_RX_TID_SAVE(nbuf, tid);
636 		if (qdf_unlikely(dsf) || qdf_unlikely(peer_ext_stats) ||
637 		    dp_rx_pkt_tracepoints_enabled())
638 			qdf_nbuf_set_timestamp(nbuf);
639 
640 		if (qdf_likely(old_tid != tid)) {
641 			tid_stats =
642 		&rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid];
643 			old_tid = tid;
644 		}
645 
646 		/*
647 		 * Check if DMA completed -- msdu_done is the last bit
648 		 * to be written
649 		 */
650 		if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) &&
651 				 !hal_rx_tlv_msdu_done_get_be(rx_tlv_hdr))) {
652 			dp_err("MSDU DONE failure");
653 			DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
654 			hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
655 					     QDF_TRACE_LEVEL_INFO);
656 			tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
657 			dp_rx_nbuf_free(nbuf);
658 			qdf_assert(0);
659 			nbuf = next;
660 			continue;
661 		}
662 
663 		DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
664 		/*
665 		 * First IF condition:
666 		 * 802.11 Fragmented pkts are reinjected to REO
667 		 * HW block as SG pkts and for these pkts we only
668 		 * need to pull the RX TLVS header length.
669 		 * Second IF condition:
670 		 * The below condition happens when an MSDU is spread
671 		 * across multiple buffers. This can happen in two cases
672 		 * 1. The nbuf size is smaller then the received msdu.
673 		 *    ex: we have set the nbuf size to 2048 during
674 		 *        nbuf_alloc. but we received an msdu which is
675 		 *        2304 bytes in size then this msdu is spread
676 		 *        across 2 nbufs.
677 		 *
678 		 * 2. AMSDUs when RAW mode is enabled.
679 		 *    ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
680 		 *        across 1st nbuf and 2nd nbuf and last MSDU is
681 		 *        spread across 2nd nbuf and 3rd nbuf.
682 		 *
683 		 * for these scenarios let us create a skb frag_list and
684 		 * append these buffers till the last MSDU of the AMSDU
685 		 * Third condition:
686 		 * This is the most likely case, we receive 802.3 pkts
687 		 * decapsulated by HW, here we need to set the pkt length.
688 		 */
689 		hal_rx_msdu_packet_metadata_get_generic_be(rx_tlv_hdr,
690 							   &msdu_metadata);
691 		if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
692 			bool is_mcbc, is_sa_vld, is_da_vld;
693 
694 			is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
695 								 rx_tlv_hdr);
696 			is_sa_vld =
697 				hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
698 								rx_tlv_hdr);
699 			is_da_vld =
700 				hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
701 								rx_tlv_hdr);
702 
703 			qdf_nbuf_set_da_mcbc(nbuf, is_mcbc);
704 			qdf_nbuf_set_da_valid(nbuf, is_da_vld);
705 			qdf_nbuf_set_sa_valid(nbuf, is_sa_vld);
706 
707 			qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
708 		} else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
709 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
710 			nbuf = dp_rx_sg_create(soc, nbuf);
711 			next = nbuf->next;
712 
713 			if (qdf_nbuf_is_raw_frame(nbuf)) {
714 				DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
715 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
716 							      rx.raw, 1,
717 							      msdu_len);
718 			} else {
719 				dp_rx_nbuf_free(nbuf);
720 				DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
721 				dp_info_rl("scatter msdu len %d, dropped",
722 					   msdu_len);
723 				nbuf = next;
724 				continue;
725 			}
726 		} else {
727 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
728 			pkt_len = msdu_len +
729 				  msdu_metadata.l3_hdr_pad +
730 				  soc->rx_pkt_tlv_size;
731 
732 			qdf_nbuf_set_pktlen(nbuf, pkt_len);
733 			dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad);
734 		}
735 
736 		dp_rx_send_pktlog(soc, rx_pdev, nbuf, QDF_TX_RX_STATUS_OK);
737 
738 		if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
739 			dp_rx_err("%pK: Policy Check Drop pkt", soc);
740 			DP_PEER_PER_PKT_STATS_INC(txrx_peer,
741 						  rx.policy_check_drop, 1);
742 			tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
743 			/* Drop & free packet */
744 			dp_rx_nbuf_free(nbuf);
745 			/* Statistics */
746 			nbuf = next;
747 			continue;
748 		}
749 
750 		/*
751 		 * Drop non-EAPOL frames from unauthorized peer.
752 		 */
753 		if (qdf_likely(txrx_peer) &&
754 		    qdf_unlikely(!txrx_peer->authorize) &&
755 		    !qdf_nbuf_is_raw_frame(nbuf)) {
756 			bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
757 					qdf_nbuf_is_ipv4_wapi_pkt(nbuf);
758 
759 			if (!is_eapol) {
760 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
761 							  rx.peer_unauth_rx_pkt_drop,
762 							  1);
763 				dp_rx_nbuf_free(nbuf);
764 				nbuf = next;
765 				continue;
766 			}
767 		}
768 
769 		dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
770 
771 		if (qdf_unlikely(!rx_pdev->rx_fast_flag)) {
772 			/*
773 			 * process frame for mulitpass phrase processing
774 			 */
775 			if (qdf_unlikely(vdev->multipass_en)) {
776 				if (dp_rx_multipass_process(txrx_peer, nbuf,
777 							    tid) == false) {
778 					DP_PEER_PER_PKT_STATS_INC
779 						(txrx_peer,
780 						 rx.multipass_rx_pkt_drop, 1);
781 					dp_rx_nbuf_free(nbuf);
782 					nbuf = next;
783 					continue;
784 				}
785 			}
786 			if (qdf_unlikely(txrx_peer &&
787 					 (txrx_peer->nawds_enabled) &&
788 					 (qdf_nbuf_is_da_mcbc(nbuf)) &&
789 					 (hal_rx_get_mpdu_mac_ad4_valid_be
790 						(rx_tlv_hdr) == false))) {
791 				tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
792 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
793 							  rx.nawds_mcast_drop,
794 							  1);
795 				dp_rx_nbuf_free(nbuf);
796 				nbuf = next;
797 				continue;
798 			}
799 
800 			/* Update the protocol tag in SKB based on CCE metadata
801 			 */
802 			dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
803 						  reo_ring_num, false, true);
804 
805 			/* Update the flow tag in SKB based on FSE metadata */
806 			dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
807 					      true);
808 
809 			if (qdf_likely(vdev->rx_decap_type ==
810 				       htt_cmn_pkt_type_ethernet) &&
811 			    qdf_likely(!vdev->mesh_vdev)) {
812 				dp_rx_wds_learn(soc, vdev,
813 						rx_tlv_hdr,
814 						txrx_peer,
815 						nbuf,
816 						msdu_metadata);
817 			}
818 
819 			if (qdf_unlikely(vdev->mesh_vdev)) {
820 				if (dp_rx_filter_mesh_packets(vdev, nbuf,
821 							      rx_tlv_hdr)
822 						== QDF_STATUS_SUCCESS) {
823 					dp_rx_info("%pK: mesh pkt filtered",
824 						   soc);
825 					tid_stats->fail_cnt[MESH_FILTER_DROP]++;
826 					DP_STATS_INC(vdev->pdev,
827 						     dropped.mesh_filter, 1);
828 
829 					dp_rx_nbuf_free(nbuf);
830 					nbuf = next;
831 					continue;
832 				}
833 				dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr,
834 						      txrx_peer);
835 			}
836 		}
837 
838 		dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
839 					reo_ring_num, tid_stats);
840 
841 		if (qdf_likely(vdev->rx_decap_type ==
842 			       htt_cmn_pkt_type_ethernet) &&
843 		    qdf_likely(!vdev->mesh_vdev)) {
844 			/* Intrabss-fwd */
845 			if (dp_rx_check_ap_bridge(vdev))
846 				if (dp_rx_intrabss_fwd_be(soc, txrx_peer,
847 							  rx_tlv_hdr,
848 							  nbuf,
849 							  msdu_metadata)) {
850 					nbuf = next;
851 					tid_stats->intrabss_cnt++;
852 					continue; /* Get next desc */
853 				}
854 		}
855 
856 		dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt);
857 
858 		dp_rx_mark_first_packet_after_wow_wakeup(vdev->pdev, rx_tlv_hdr,
859 							 nbuf);
860 
861 		dp_rx_update_stats(soc, nbuf);
862 
863 		dp_pkt_add_timestamp(txrx_peer->vdev, QDF_PKT_RX_DRIVER_ENTRY,
864 				     current_time, nbuf);
865 
866 		DP_RX_LIST_APPEND(deliver_list_head,
867 				  deliver_list_tail,
868 				  nbuf);
869 
870 		DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
871 					  QDF_NBUF_CB_RX_PKT_LEN(nbuf),
872 					  enh_flag);
873 		if (qdf_unlikely(txrx_peer->in_twt))
874 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
875 						      rx.to_stack_twt, 1,
876 						      QDF_NBUF_CB_RX_PKT_LEN(nbuf));
877 
878 		tid_stats->delivered_to_stack++;
879 		nbuf = next;
880 	}
881 
882 	DP_RX_DELIVER_TO_STACK(soc, vdev, txrx_peer, peer_id,
883 			       pkt_capture_offload,
884 			       deliver_list_head,
885 			       deliver_list_tail);
886 
887 	if (qdf_likely(txrx_peer))
888 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
889 
890 	/*
891 	 * If we are processing in near-full condition, there are 3 scenario
892 	 * 1) Ring entries has reached critical state
893 	 * 2) Ring entries are still near high threshold
894 	 * 3) Ring entries are below the safe level
895 	 *
896 	 * One more loop will move the state to normal processing and yield
897 	 */
898 	if (ring_near_full && quota)
899 		goto more_data;
900 
901 	if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
902 		if (quota) {
903 			num_pending =
904 				dp_rx_srng_get_num_pending(hal_soc,
905 							   hal_ring_hdl,
906 							   num_entries,
907 							   &near_full);
908 			if (num_pending) {
909 				DP_STATS_INC(soc, rx.hp_oos2, 1);
910 
911 				if (!hif_exec_should_yield(scn, intr_id))
912 					goto more_data;
913 
914 				if (qdf_unlikely(near_full)) {
915 					DP_STATS_INC(soc, rx.near_full, 1);
916 					goto more_data;
917 				}
918 			}
919 		}
920 
921 		if (vdev && vdev->osif_fisa_flush)
922 			vdev->osif_fisa_flush(soc, reo_ring_num);
923 
924 		if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) {
925 			vdev->osif_gro_flush(vdev->osif_vdev,
926 					     reo_ring_num);
927 		}
928 	}
929 
930 	/* Update histogram statistics by looping through pdev's */
931 	DP_RX_HIST_STATS_PER_PDEV();
932 
933 	return rx_bufs_used; /* Assume no scale factor for now */
934 }
935 
936 #ifdef RX_DESC_MULTI_PAGE_ALLOC
937 /**
938  * dp_rx_desc_pool_init_be_cc() - initial RX desc pool for cookie conversion
939  * @soc: Handle to DP Soc structure
940  * @rx_desc_pool: Rx descriptor pool handler
941  * @pool_id: Rx descriptor pool ID
942  *
943  * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
944  */
945 static QDF_STATUS
946 dp_rx_desc_pool_init_be_cc(struct dp_soc *soc,
947 			   struct rx_desc_pool *rx_desc_pool,
948 			   uint32_t pool_id)
949 {
950 	struct dp_hw_cookie_conversion_t *cc_ctx;
951 	struct dp_soc_be *be_soc;
952 	union dp_rx_desc_list_elem_t *rx_desc_elem;
953 	struct dp_spt_page_desc *page_desc;
954 	uint32_t ppt_idx = 0;
955 	uint32_t avail_entry_index = 0;
956 
957 	if (!rx_desc_pool->pool_size) {
958 		dp_err("desc_num 0 !!");
959 		return QDF_STATUS_E_FAILURE;
960 	}
961 
962 	be_soc = dp_get_be_soc_from_dp_soc(soc);
963 	cc_ctx  = &be_soc->rx_cc_ctx[pool_id];
964 
965 	page_desc = &cc_ctx->page_desc_base[0];
966 	rx_desc_elem = rx_desc_pool->freelist;
967 	while (rx_desc_elem) {
968 		if (avail_entry_index == 0) {
969 			if (ppt_idx >= cc_ctx->total_page_num) {
970 				dp_alert("insufficient secondary page tables");
971 				qdf_assert_always(0);
972 			}
973 			page_desc = &cc_ctx->page_desc_base[ppt_idx++];
974 		}
975 
976 		/* put each RX Desc VA to SPT pages and
977 		 * get corresponding ID
978 		 */
979 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
980 					 avail_entry_index,
981 					 &rx_desc_elem->rx_desc);
982 		rx_desc_elem->rx_desc.cookie =
983 			dp_cc_desc_id_generate(page_desc->ppt_index,
984 					       avail_entry_index);
985 		rx_desc_elem->rx_desc.chip_id = dp_mlo_get_chip_id(soc);
986 		rx_desc_elem->rx_desc.pool_id = pool_id;
987 		rx_desc_elem->rx_desc.in_use = 0;
988 		rx_desc_elem = rx_desc_elem->next;
989 
990 		avail_entry_index = (avail_entry_index + 1) &
991 					DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
992 	}
993 
994 	return QDF_STATUS_SUCCESS;
995 }
996 #else
997 static QDF_STATUS
998 dp_rx_desc_pool_init_be_cc(struct dp_soc *soc,
999 			   struct rx_desc_pool *rx_desc_pool,
1000 			   uint32_t pool_id)
1001 {
1002 	struct dp_hw_cookie_conversion_t *cc_ctx;
1003 	struct dp_soc_be *be_soc;
1004 	struct dp_spt_page_desc *page_desc;
1005 	uint32_t ppt_idx = 0;
1006 	uint32_t avail_entry_index = 0;
1007 	int i = 0;
1008 
1009 	if (!rx_desc_pool->pool_size) {
1010 		dp_err("desc_num 0 !!");
1011 		return QDF_STATUS_E_FAILURE;
1012 	}
1013 
1014 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1015 	cc_ctx  = &be_soc->rx_cc_ctx[pool_id];
1016 
1017 	page_desc = &cc_ctx->page_desc_base[0];
1018 	for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
1019 		if (i == rx_desc_pool->pool_size - 1)
1020 			rx_desc_pool->array[i].next = NULL;
1021 		else
1022 			rx_desc_pool->array[i].next =
1023 				&rx_desc_pool->array[i + 1];
1024 
1025 		if (avail_entry_index == 0) {
1026 			if (ppt_idx >= cc_ctx->total_page_num) {
1027 				dp_alert("insufficient secondary page tables");
1028 				qdf_assert_always(0);
1029 			}
1030 			page_desc = &cc_ctx->page_desc_base[ppt_idx++];
1031 		}
1032 
1033 		/* put each RX Desc VA to SPT pages and
1034 		 * get corresponding ID
1035 		 */
1036 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
1037 					 avail_entry_index,
1038 					 &rx_desc_pool->array[i].rx_desc);
1039 		rx_desc_pool->array[i].rx_desc.cookie =
1040 			dp_cc_desc_id_generate(page_desc->ppt_index,
1041 					       avail_entry_index);
1042 		rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
1043 		rx_desc_pool->array[i].rx_desc.in_use = 0;
1044 		rx_desc_pool->array[i].rx_desc.chip_id =
1045 					dp_mlo_get_chip_id(soc);
1046 
1047 		avail_entry_index = (avail_entry_index + 1) &
1048 					DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
1049 	}
1050 	return QDF_STATUS_SUCCESS;
1051 }
1052 #endif
1053 
1054 static void
1055 dp_rx_desc_pool_deinit_be_cc(struct dp_soc *soc,
1056 			     struct rx_desc_pool *rx_desc_pool,
1057 			     uint32_t pool_id)
1058 {
1059 	struct dp_spt_page_desc *page_desc;
1060 	struct dp_soc_be *be_soc;
1061 	int i = 0;
1062 	struct dp_hw_cookie_conversion_t *cc_ctx;
1063 
1064 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1065 	cc_ctx  = &be_soc->rx_cc_ctx[pool_id];
1066 
1067 	for (i = 0; i < cc_ctx->total_page_num; i++) {
1068 		page_desc = &cc_ctx->page_desc_base[i];
1069 		qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
1070 	}
1071 }
1072 
1073 QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
1074 				   struct rx_desc_pool *rx_desc_pool,
1075 				   uint32_t pool_id)
1076 {
1077 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1078 
1079 	/* Only regular RX buffer desc pool use HW cookie conversion */
1080 	if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE) {
1081 		dp_info("rx_desc_buf pool init");
1082 		status = dp_rx_desc_pool_init_be_cc(soc,
1083 						    rx_desc_pool,
1084 						    pool_id);
1085 	} else {
1086 		dp_info("non_rx_desc_buf_pool init");
1087 		status = dp_rx_desc_pool_init_generic(soc, rx_desc_pool,
1088 						      pool_id);
1089 	}
1090 
1091 	return status;
1092 }
1093 
1094 void dp_rx_desc_pool_deinit_be(struct dp_soc *soc,
1095 			       struct rx_desc_pool *rx_desc_pool,
1096 			       uint32_t pool_id)
1097 {
1098 	if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE)
1099 		dp_rx_desc_pool_deinit_be_cc(soc, rx_desc_pool, pool_id);
1100 }
1101 
1102 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
1103 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
1104 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
1105 					       void *ring_desc,
1106 					       struct dp_rx_desc **r_rx_desc)
1107 {
1108 	if (hal_rx_wbm_get_cookie_convert_done(ring_desc)) {
1109 		/* HW cookie conversion done */
1110 		*r_rx_desc = (struct dp_rx_desc *)
1111 				hal_rx_wbm_get_desc_va(ring_desc);
1112 	} else {
1113 		/* SW do cookie conversion */
1114 		uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc);
1115 
1116 		*r_rx_desc = (struct dp_rx_desc *)
1117 				dp_cc_desc_find(soc, cookie);
1118 	}
1119 
1120 	return QDF_STATUS_SUCCESS;
1121 }
1122 #else
1123 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
1124 					       void *ring_desc,
1125 					       struct dp_rx_desc **r_rx_desc)
1126 {
1127 	 *r_rx_desc = (struct dp_rx_desc *)
1128 			hal_rx_wbm_get_desc_va(ring_desc);
1129 
1130 	return QDF_STATUS_SUCCESS;
1131 }
1132 #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
1133 #else
1134 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
1135 					       void *ring_desc,
1136 					       struct dp_rx_desc **r_rx_desc)
1137 {
1138 	/* SW do cookie conversion */
1139 	uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc);
1140 
1141 	*r_rx_desc = (struct dp_rx_desc *)
1142 			dp_cc_desc_find(soc, cookie);
1143 
1144 	return QDF_STATUS_SUCCESS;
1145 }
1146 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
1147 
1148 struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc,
1149 					     uint32_t cookie)
1150 {
1151 	return (struct dp_rx_desc *)dp_cc_desc_find(soc, cookie);
1152 }
1153 
1154 #if defined(WLAN_FEATURE_11BE_MLO)
1155 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO)
1156 #define DP_RANDOM_MAC_ID_BIT_MASK	0xC0
1157 #define DP_RANDOM_MAC_OFFSET	1
1158 #define DP_MAC_LOCAL_ADMBIT_MASK	0x2
1159 #define DP_MAC_LOCAL_ADMBIT_OFFSET	0
1160 static inline void dp_rx_dummy_src_mac(struct dp_vdev *vdev,
1161 				       qdf_nbuf_t nbuf)
1162 {
1163 	uint8_t random_mac[QDF_MAC_ADDR_SIZE] = {0};
1164 	qdf_ether_header_t *eh =
1165 			(qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1166 
1167 	qdf_mem_copy(random_mac, &vdev->mld_mac_addr.raw[0], QDF_MAC_ADDR_SIZE);
1168 	random_mac[DP_MAC_LOCAL_ADMBIT_OFFSET] =
1169 					random_mac[DP_MAC_LOCAL_ADMBIT_OFFSET] |
1170 					DP_MAC_LOCAL_ADMBIT_MASK;
1171 	random_mac[DP_RANDOM_MAC_OFFSET] =
1172 		random_mac[DP_RANDOM_MAC_OFFSET] ^ DP_RANDOM_MAC_ID_BIT_MASK;
1173 
1174 	qdf_mem_copy(&eh->ether_shost[0], random_mac,  QDF_MAC_ADDR_SIZE);
1175 }
1176 
1177 #ifdef QCA_SUPPORT_WDS_EXTENDED
1178 static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer)
1179 {
1180 	return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init);
1181 }
1182 #else
1183 static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer)
1184 {
1185 	return false;
1186 }
1187 #endif
1188 
1189 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
1190 			    struct dp_vdev *vdev,
1191 			    struct dp_txrx_peer *peer,
1192 			    qdf_nbuf_t nbuf)
1193 {
1194 	struct dp_vdev *mcast_primary_vdev = NULL;
1195 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
1196 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1197 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
1198 	struct cdp_tid_rx_stats *tid_stats = &peer->vdev->pdev->stats.
1199 					tid_stats.tid_rx_wbm_stats[0][tid];
1200 
1201 	if (!(qdf_nbuf_is_ipv4_igmp_pkt(nbuf) ||
1202 	      qdf_nbuf_is_ipv6_igmp_pkt(nbuf)))
1203 		return false;
1204 
1205 	if (!peer->bss_peer) {
1206 		if (dp_rx_intrabss_mcbc_fwd(soc, peer, NULL, nbuf, tid_stats))
1207 			dp_rx_err("forwarding failed");
1208 	}
1209 
1210 	/*
1211 	 * In the case of ME6, Backhaul WDS, NAWDS
1212 	 * send the igmp pkt on the same link where it received,
1213 	 * as these features will use peer based tcl metadata
1214 	 */
1215 
1216 	qdf_nbuf_set_next(nbuf, NULL);
1217 
1218 	if (vdev->mcast_enhancement_en || be_vdev->mcast_primary ||
1219 	    peer->nawds_enabled)
1220 		goto send_pkt;
1221 
1222 	if (qdf_unlikely(dp_rx_mlo_igmp_wds_ext_handler(peer)))
1223 		goto send_pkt;
1224 
1225 	mcast_primary_vdev = dp_mlo_get_mcast_primary_vdev(be_soc, be_vdev,
1226 							   DP_MOD_ID_RX);
1227 	if (!mcast_primary_vdev) {
1228 		dp_rx_debug("Non mlo vdev");
1229 		goto send_pkt;
1230 	}
1231 
1232 	if (qdf_unlikely(vdev->wrap_vdev)) {
1233 		/* In the case of qwrap repeater send the original
1234 		 * packet on the interface where it received,
1235 		 * packet with dummy src on the mcast primary interface.
1236 		 */
1237 		qdf_nbuf_t nbuf_copy;
1238 
1239 		nbuf_copy = qdf_nbuf_copy(nbuf);
1240 		if (qdf_likely(nbuf_copy))
1241 			dp_rx_deliver_to_stack(soc, vdev, peer, nbuf_copy,
1242 					       NULL);
1243 	}
1244 
1245 	dp_rx_dummy_src_mac(vdev, nbuf);
1246 	dp_rx_deliver_to_stack(mcast_primary_vdev->pdev->soc,
1247 			       mcast_primary_vdev,
1248 			       peer,
1249 			       nbuf,
1250 			       NULL);
1251 	dp_vdev_unref_delete(mcast_primary_vdev->pdev->soc,
1252 			     mcast_primary_vdev,
1253 			     DP_MOD_ID_RX);
1254 	return true;
1255 send_pkt:
1256 	dp_rx_deliver_to_stack(be_vdev->vdev.pdev->soc,
1257 			       &be_vdev->vdev,
1258 			       peer,
1259 			       nbuf,
1260 			       NULL);
1261 	return true;
1262 }
1263 #else
1264 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
1265 			    struct dp_vdev *vdev,
1266 			    struct dp_txrx_peer *peer,
1267 			    qdf_nbuf_t nbuf)
1268 {
1269 	return false;
1270 }
1271 #endif
1272 #endif
1273 
1274 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1275 uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
1276 			  hal_ring_handle_t hal_ring_hdl,
1277 			  uint8_t reo_ring_num,
1278 			  uint32_t quota)
1279 {
1280 	struct dp_soc *soc = int_ctx->soc;
1281 	struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num];
1282 	uint32_t work_done = 0;
1283 
1284 	if (dp_srng_get_near_full_level(soc, rx_ring) <
1285 			DP_SRNG_THRESH_NEAR_FULL)
1286 		return 0;
1287 
1288 	qdf_atomic_set(&rx_ring->near_full, 1);
1289 	work_done++;
1290 
1291 	return work_done;
1292 }
1293 #endif
1294 
1295 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1296 #ifdef WLAN_FEATURE_11BE_MLO
1297 /**
1298  * dp_rx_intrabss_fwd_mlo_allow() - check if MLO forwarding is allowed
1299  * @ta_peer: transmitter peer handle
1300  * @da_peer: destination peer handle
1301  *
1302  * Return: true - MLO forwarding case, false: not
1303  */
1304 static inline bool
1305 dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer,
1306 			     struct dp_txrx_peer *da_peer)
1307 {
1308 	/* one of TA/DA peer should belong to MLO connection peer,
1309 	 * only MLD peer type is as expected
1310 	 */
1311 	if (!IS_MLO_DP_MLD_TXRX_PEER(ta_peer) &&
1312 	    !IS_MLO_DP_MLD_TXRX_PEER(da_peer))
1313 		return false;
1314 
1315 	/* TA peer and DA peer's vdev should be partner MLO vdevs */
1316 	if (dp_peer_find_mac_addr_cmp(&ta_peer->vdev->mld_mac_addr,
1317 				      &da_peer->vdev->mld_mac_addr))
1318 		return false;
1319 
1320 	return true;
1321 }
1322 #else
1323 static inline bool
1324 dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer,
1325 			     struct dp_txrx_peer *da_peer)
1326 {
1327 	return false;
1328 }
1329 #endif
1330 
1331 #ifdef INTRA_BSS_FWD_OFFLOAD
1332 /**
1333  * dp_rx_intrabss_ucast_check_be() - Check if intrabss is allowed
1334 				     for unicast frame
1335  * @soc: SOC hanlde
1336  * @nbuf: RX packet buffer
1337  * @ta_peer: transmitter DP peer handle
1338  * @msdu_metadata: MSDU meta data info
1339  * @p_tx_vdev_id: get vdev id for Intra-BSS TX
1340  *
1341  * Return: true - intrabss allowed
1342 	   false - not allow
1343  */
1344 static bool
1345 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
1346 			      struct dp_txrx_peer *ta_peer,
1347 			      struct hal_rx_msdu_metadata *msdu_metadata,
1348 			      struct dp_be_intrabss_params *params)
1349 {
1350 	uint16_t da_peer_id;
1351 	struct dp_txrx_peer *da_peer;
1352 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1353 
1354 	if (!qdf_nbuf_is_intra_bss(nbuf))
1355 		return false;
1356 
1357 	da_peer_id = dp_rx_peer_metadata_peer_id_get_be(
1358 						params->dest_soc,
1359 						msdu_metadata->da_idx);
1360 	da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id,
1361 					     &txrx_ref_handle, DP_MOD_ID_RX);
1362 	if (!da_peer)
1363 		return false;
1364 	params->tx_vdev_id = da_peer->vdev->vdev_id;
1365 	dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
1366 
1367 	return true;
1368 }
1369 #else
1370 #ifdef WLAN_MLO_MULTI_CHIP
1371 static bool
1372 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
1373 			      struct dp_txrx_peer *ta_peer,
1374 			      struct hal_rx_msdu_metadata *msdu_metadata,
1375 			      struct dp_be_intrabss_params *params)
1376 {
1377 	uint16_t da_peer_id;
1378 	struct dp_txrx_peer *da_peer;
1379 	bool ret = false;
1380 	uint8_t dest_chip_id;
1381 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1382 	struct dp_vdev_be *be_vdev =
1383 		dp_get_be_vdev_from_dp_vdev(ta_peer->vdev);
1384 	struct dp_soc_be *be_soc =
1385 		dp_get_be_soc_from_dp_soc(params->dest_soc);
1386 
1387 	if (!(qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf)))
1388 		return false;
1389 
1390 	dest_chip_id = HAL_RX_DEST_CHIP_ID_GET(msdu_metadata);
1391 	qdf_assert_always(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1));
1392 	da_peer_id = HAL_RX_PEER_ID_GET(msdu_metadata);
1393 
1394 	/* use dest chip id when TA is MLD peer and DA is legacy */
1395 	if (be_soc->mlo_enabled &&
1396 	    ta_peer->mld_peer &&
1397 	    !(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) {
1398 		/* validate chip_id, get a ref, and re-assign soc */
1399 		params->dest_soc =
1400 			dp_mlo_get_soc_ref_by_chip_id(be_soc->ml_ctxt,
1401 						      dest_chip_id);
1402 		if (!params->dest_soc)
1403 			return false;
1404 
1405 		da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc,
1406 						     da_peer_id,
1407 						     &txrx_ref_handle,
1408 						     DP_MOD_ID_RX);
1409 		if (!da_peer)
1410 			return false;
1411 
1412 	} else {
1413 		da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc,
1414 						     da_peer_id,
1415 						     &txrx_ref_handle,
1416 						     DP_MOD_ID_RX);
1417 		if (!da_peer)
1418 			return false;
1419 
1420 		params->dest_soc = da_peer->vdev->pdev->soc;
1421 		if (!params->dest_soc)
1422 			goto rel_da_peer;
1423 
1424 	}
1425 
1426 	params->tx_vdev_id = da_peer->vdev->vdev_id;
1427 
1428 	/* If the source or destination peer in the isolation
1429 	 * list then dont forward instead push to bridge stack.
1430 	 */
1431 	if (dp_get_peer_isolation(ta_peer) ||
1432 	    dp_get_peer_isolation(da_peer)) {
1433 		ret = false;
1434 		goto rel_da_peer;
1435 	}
1436 
1437 	if (da_peer->bss_peer || (da_peer == ta_peer)) {
1438 		ret = false;
1439 		goto rel_da_peer;
1440 	}
1441 
1442 	/* Same vdev, support Inra-BSS */
1443 	if (da_peer->vdev == ta_peer->vdev) {
1444 		ret = true;
1445 		goto rel_da_peer;
1446 	}
1447 
1448 	/* MLO specific Intra-BSS check */
1449 	if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) {
1450 		/* use dest chip id for legacy dest peer */
1451 		if (!(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) {
1452 			if (!(be_vdev->partner_vdev_list[dest_chip_id][0] ==
1453 			      params->tx_vdev_id) &&
1454 			    !(be_vdev->partner_vdev_list[dest_chip_id][1] ==
1455 			      params->tx_vdev_id)) {
1456 				/*dp_soc_unref_delete(soc);*/
1457 				goto rel_da_peer;
1458 			}
1459 		}
1460 		ret = true;
1461 	}
1462 
1463 rel_da_peer:
1464 	dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
1465 	return ret;
1466 }
1467 #else
1468 static bool
1469 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
1470 			      struct dp_txrx_peer *ta_peer,
1471 			      struct hal_rx_msdu_metadata *msdu_metadata,
1472 			      struct dp_be_intrabss_params *params)
1473 {
1474 	uint16_t da_peer_id;
1475 	struct dp_txrx_peer *da_peer;
1476 	bool ret = false;
1477 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1478 
1479 	if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf))
1480 		return false;
1481 
1482 	da_peer_id = dp_rx_peer_metadata_peer_id_get_be(
1483 						params->dest_soc,
1484 						msdu_metadata->da_idx);
1485 
1486 	da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id,
1487 					     &txrx_ref_handle, DP_MOD_ID_RX);
1488 	if (!da_peer)
1489 		return false;
1490 
1491 	params->tx_vdev_id = da_peer->vdev->vdev_id;
1492 	/* If the source or destination peer in the isolation
1493 	 * list then dont forward instead push to bridge stack.
1494 	 */
1495 	if (dp_get_peer_isolation(ta_peer) ||
1496 	    dp_get_peer_isolation(da_peer))
1497 		goto rel_da_peer;
1498 
1499 	if (da_peer->bss_peer || da_peer == ta_peer)
1500 		goto rel_da_peer;
1501 
1502 	/* Same vdev, support Inra-BSS */
1503 	if (da_peer->vdev == ta_peer->vdev) {
1504 		ret = true;
1505 		goto rel_da_peer;
1506 	}
1507 
1508 	/* MLO specific Intra-BSS check */
1509 	if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) {
1510 		ret = true;
1511 		goto rel_da_peer;
1512 	}
1513 
1514 rel_da_peer:
1515 	dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
1516 	return ret;
1517 }
1518 #endif /* WLAN_MLO_MULTI_CHIP */
1519 #endif /* INTRA_BSS_FWD_OFFLOAD */
1520 
1521 /*
1522  * dp_rx_intrabss_handle_nawds_be() - Forward mcbc intrabss pkts in nawds case
1523  * @soc: core txrx main context
1524  * @ta_txrx_peer: source txrx_peer entry
1525  * @nbuf_copy: nbuf that has to be intrabss forwarded
1526  * @tid_stats: tid_stats structure
1527  *
1528  * Return: true if it is forwarded else false
1529  */
1530 bool
1531 dp_rx_intrabss_handle_nawds_be(struct dp_soc *soc,
1532 			       struct dp_txrx_peer *ta_txrx_peer,
1533 			       qdf_nbuf_t nbuf_copy,
1534 			       struct cdp_tid_rx_stats *tid_stats)
1535 {
1536 	if (qdf_unlikely(ta_txrx_peer->vdev->nawds_enabled)) {
1537 		struct cdp_tx_exception_metadata tx_exc_metadata = {0};
1538 		uint16_t len = QDF_NBUF_CB_RX_PKT_LEN(nbuf_copy);
1539 
1540 		tx_exc_metadata.peer_id = ta_txrx_peer->peer_id;
1541 		tx_exc_metadata.is_intrabss_fwd = 1;
1542 		tx_exc_metadata.tid = HTT_TX_EXT_TID_INVALID;
1543 		if (dp_tx_send_exception((struct cdp_soc_t *)soc,
1544 					 ta_txrx_peer->vdev->vdev_id,
1545 					 nbuf_copy,
1546 					 &tx_exc_metadata)) {
1547 			DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer,
1548 						      rx.intra_bss.fail, 1,
1549 						      len);
1550 			tid_stats->fail_cnt[INTRABSS_DROP]++;
1551 			qdf_nbuf_free(nbuf_copy);
1552 		} else {
1553 			DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer,
1554 						      rx.intra_bss.pkts, 1,
1555 						      len);
1556 			tid_stats->intrabss_cnt++;
1557 		}
1558 		return true;
1559 	}
1560 	return false;
1561 }
1562 
1563 /*
1564  * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL
1565  *  pkt with DA not equal to vdev mac addr, fwd is not allowed.
1566  * @soc: core txrx main context
1567  * @ta_peer: source peer entry
1568  * @rx_tlv_hdr: start address of rx tlvs
1569  * @nbuf: nbuf that has to be intrabss forwarded
1570  * @msdu_metadata: msdu metadata
1571  *
1572  * Return: true if it is forwarded else false
1573  */
1574 bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
1575 			   uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
1576 			   struct hal_rx_msdu_metadata msdu_metadata)
1577 {
1578 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
1579 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
1580 	struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
1581 					tid_stats.tid_rx_stats[ring_id][tid];
1582 	bool ret = false;
1583 	struct dp_be_intrabss_params params;
1584 
1585 	/* if it is a broadcast pkt (eg: ARP) and it is not its own
1586 	 * source, then clone the pkt and send the cloned pkt for
1587 	 * intra BSS forwarding and original pkt up the network stack
1588 	 * Note: how do we handle multicast pkts. do we forward
1589 	 * all multicast pkts as is or let a higher layer module
1590 	 * like igmpsnoop decide whether to forward or not with
1591 	 * Mcast enhancement.
1592 	 */
1593 	if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_peer->bss_peer) {
1594 		return dp_rx_intrabss_mcbc_fwd(soc, ta_peer, rx_tlv_hdr,
1595 					       nbuf, tid_stats);
1596 	}
1597 
1598 	if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
1599 					    nbuf))
1600 		return true;
1601 
1602 	params.dest_soc = soc;
1603 	if (dp_rx_intrabss_ucast_check_be(nbuf, ta_peer,
1604 					  &msdu_metadata, &params)) {
1605 		ret = dp_rx_intrabss_ucast_fwd(params.dest_soc, ta_peer,
1606 					       params.tx_vdev_id,
1607 					       rx_tlv_hdr, nbuf, tid_stats);
1608 	}
1609 
1610 	return ret;
1611 }
1612 #endif
1613