xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be_rx.c (revision d3be64a66deb873bac895fb0ecea12cbfca02017)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "cdp_txrx_cmn_struct.h"
21 #include "hal_hw_headers.h"
22 #include "dp_types.h"
23 #include "dp_rx.h"
24 #include "dp_tx.h"
25 #include "dp_be_rx.h"
26 #include "dp_peer.h"
27 #include "hal_rx.h"
28 #include "hal_be_rx.h"
29 #include "hal_api.h"
30 #include "hal_be_api.h"
31 #include "qdf_nbuf.h"
32 #ifdef MESH_MODE_SUPPORT
33 #include "if_meta_hdr.h"
34 #endif
35 #include "dp_internal.h"
36 #include "dp_ipa.h"
37 #ifdef FEATURE_WDS
38 #include "dp_txrx_wds.h"
39 #endif
40 #include "dp_hist.h"
41 #include "dp_rx_buffer_pool.h"
42 
43 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
44 static inline void
45 dp_rx_update_flow_info(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
46 {
47 	uint32_t fse_metadata;
48 
49 	/* Set the flow idx valid flag only when there is no timeout */
50 	if (hal_rx_msdu_flow_idx_timeout_be(rx_tlv_hdr))
51 		return;
52 
53 	/*
54 	 * If invalid bit is not set and the fse metadata indicates that it is
55 	 * a valid SFE flow match in FSE, do not set the rx flow tag and let it
56 	 * go via stack instead of VP.
57 	 */
58 	fse_metadata = hal_rx_msdu_fse_metadata_get_be(rx_tlv_hdr);
59 	if (!hal_rx_msdu_flow_idx_invalid_be(rx_tlv_hdr) && (fse_metadata == DP_RX_FSE_FLOW_MATCH_SFE))
60 		return;
61 
62 	qdf_nbuf_set_rx_flow_idx_valid(nbuf,
63 				 !hal_rx_msdu_flow_idx_invalid_be(rx_tlv_hdr));
64 }
65 #else
66 static inline void
67 dp_rx_update_flow_info(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
68 {
69 }
70 #endif
71 
72 #ifndef AST_OFFLOAD_ENABLE
73 static void
74 dp_rx_wds_learn(struct dp_soc *soc,
75 		struct dp_vdev *vdev,
76 		uint8_t *rx_tlv_hdr,
77 		struct dp_txrx_peer *txrx_peer,
78 		qdf_nbuf_t nbuf)
79 {
80 	struct hal_rx_msdu_metadata msdu_metadata;
81 
82 	hal_rx_msdu_packet_metadata_get_generic_be(rx_tlv_hdr, &msdu_metadata);
83 	/* WDS Source Port Learning */
84 	if (qdf_likely(vdev->wds_enabled))
85 		dp_rx_wds_srcport_learn(soc,
86 				rx_tlv_hdr,
87 				txrx_peer,
88 				nbuf,
89 				msdu_metadata);
90 }
91 #else
92 #ifdef QCA_SUPPORT_WDS_EXTENDED
93 /**
94  * dp_wds_ext_peer_learn_be() - function to send event to control
95  * path on receiving 1st 4-address frame from backhaul.
96  * @soc: DP soc
97  * @ta_txrx_peer: WDS repeater txrx peer
98  * @rx_tlv_hdr: start address of rx tlvs
99  * @nbuf: RX packet buffer
100  *
101  * Return: void
102  */
103 static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc,
104 					    struct dp_txrx_peer *ta_txrx_peer,
105 					    uint8_t *rx_tlv_hdr,
106 					    qdf_nbuf_t nbuf)
107 {
108 	uint8_t wds_ext_src_mac[QDF_MAC_ADDR_SIZE];
109 	struct dp_peer *ta_base_peer;
110 
111 	/* instead of checking addr4 is valid or not in per packet path
112 	 * check for init bit, which will be set on reception of
113 	 * first addr4 valid packet.
114 	 */
115 	if (!ta_txrx_peer->vdev->wds_ext_enabled ||
116 	    qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT,
117 				&ta_txrx_peer->wds_ext.init))
118 		return;
119 
120 	if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
121 	    (qdf_nbuf_is_fr_ds_set(nbuf) && qdf_nbuf_is_to_ds_set(nbuf))) {
122 		qdf_atomic_test_and_set_bit(WDS_EXT_PEER_INIT_BIT,
123 					    &ta_txrx_peer->wds_ext.init);
124 
125 		if (qdf_unlikely(ta_txrx_peer->nawds_enabled &&
126 				 ta_txrx_peer->is_mld_peer)) {
127 			ta_base_peer = dp_get_primary_link_peer_by_id(
128 							soc,
129 							ta_txrx_peer->peer_id,
130 							DP_MOD_ID_RX);
131 		} else {
132 			ta_base_peer = dp_peer_get_ref_by_id(
133 							soc,
134 							ta_txrx_peer->peer_id,
135 							DP_MOD_ID_RX);
136 		}
137 
138 		if (!ta_base_peer)
139 			return;
140 
141 		qdf_mem_copy(wds_ext_src_mac, &ta_base_peer->mac_addr.raw[0],
142 			     QDF_MAC_ADDR_SIZE);
143 		dp_peer_unref_delete(ta_base_peer, DP_MOD_ID_RX);
144 
145 		soc->cdp_soc.ol_ops->rx_wds_ext_peer_learn(
146 						soc->ctrl_psoc,
147 						ta_txrx_peer->peer_id,
148 						ta_txrx_peer->vdev->vdev_id,
149 						wds_ext_src_mac);
150 	}
151 }
152 #else
153 static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc,
154 					    struct dp_txrx_peer *ta_txrx_peer,
155 					    uint8_t *rx_tlv_hdr,
156 					    qdf_nbuf_t nbuf)
157 {
158 }
159 #endif
160 static void
161 dp_rx_wds_learn(struct dp_soc *soc,
162 		struct dp_vdev *vdev,
163 		uint8_t *rx_tlv_hdr,
164 		struct dp_txrx_peer *ta_txrx_peer,
165 		qdf_nbuf_t nbuf)
166 {
167 	dp_wds_ext_peer_learn_be(soc, ta_txrx_peer, rx_tlv_hdr, nbuf);
168 }
169 #endif
170 
171 uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
172 			  hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
173 			  uint32_t quota)
174 {
175 	hal_ring_desc_t ring_desc;
176 	hal_ring_desc_t last_prefetched_hw_desc;
177 	hal_soc_handle_t hal_soc;
178 	struct dp_rx_desc *rx_desc = NULL;
179 	struct dp_rx_desc *last_prefetched_sw_desc = NULL;
180 	qdf_nbuf_t nbuf, next;
181 	bool near_full;
182 	union dp_rx_desc_list_elem_t *head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT];
183 	union dp_rx_desc_list_elem_t *tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT];
184 	uint32_t num_pending = 0;
185 	uint32_t rx_bufs_used = 0, rx_buf_cookie;
186 	uint16_t msdu_len = 0;
187 	uint16_t peer_id;
188 	uint8_t vdev_id;
189 	struct dp_txrx_peer *txrx_peer;
190 	dp_txrx_ref_handle txrx_ref_handle = NULL;
191 	struct dp_vdev *vdev;
192 	uint32_t pkt_len = 0;
193 	enum hal_reo_error_status error;
194 	uint8_t *rx_tlv_hdr;
195 	uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT];
196 	uint8_t mac_id = 0;
197 	struct dp_pdev *rx_pdev;
198 	uint8_t enh_flag;
199 	struct dp_srng *dp_rxdma_srng;
200 	struct rx_desc_pool *rx_desc_pool;
201 	struct dp_soc *soc = int_ctx->soc;
202 	struct cdp_tid_rx_stats *tid_stats;
203 	qdf_nbuf_t nbuf_head;
204 	qdf_nbuf_t nbuf_tail;
205 	qdf_nbuf_t deliver_list_head;
206 	qdf_nbuf_t deliver_list_tail;
207 	uint32_t num_rx_bufs_reaped = 0;
208 	uint32_t intr_id;
209 	struct hif_opaque_softc *scn;
210 	int32_t tid = 0;
211 	bool is_prev_msdu_last = true;
212 	uint32_t num_entries_avail = 0;
213 	uint32_t rx_ol_pkt_cnt = 0;
214 	uint32_t num_entries = 0;
215 	QDF_STATUS status;
216 	qdf_nbuf_t ebuf_head;
217 	qdf_nbuf_t ebuf_tail;
218 	uint8_t pkt_capture_offload = 0;
219 	struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num];
220 	int max_reap_limit, ring_near_full;
221 	struct dp_soc *replenish_soc;
222 	uint8_t chip_id;
223 	uint64_t current_time = 0;
224 	uint32_t old_tid;
225 	uint32_t peer_ext_stats;
226 	uint32_t dsf;
227 	uint32_t l3_pad;
228 	uint8_t link_id = 0;
229 
230 	DP_HIST_INIT();
231 
232 	qdf_assert_always(soc && hal_ring_hdl);
233 	hal_soc = soc->hal_soc;
234 	qdf_assert_always(hal_soc);
235 
236 	scn = soc->hif_handle;
237 	intr_id = int_ctx->dp_intr_id;
238 	num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl);
239 	dp_runtime_pm_mark_last_busy(soc);
240 
241 more_data:
242 	/* reset local variables here to be re-used in the function */
243 	nbuf_head = NULL;
244 	nbuf_tail = NULL;
245 	deliver_list_head = NULL;
246 	deliver_list_tail = NULL;
247 	txrx_peer = NULL;
248 	vdev = NULL;
249 	num_rx_bufs_reaped = 0;
250 	ebuf_head = NULL;
251 	ebuf_tail = NULL;
252 	ring_near_full = 0;
253 	max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
254 
255 	qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped));
256 	qdf_mem_zero(head, sizeof(head));
257 	qdf_mem_zero(tail, sizeof(tail));
258 	old_tid = 0xff;
259 	dsf = 0;
260 	peer_ext_stats = 0;
261 	rx_pdev = NULL;
262 	tid_stats = NULL;
263 
264 	dp_pkt_get_timestamp(&current_time);
265 
266 	ring_near_full = _dp_srng_test_and_update_nf_params(soc, rx_ring,
267 							    &max_reap_limit);
268 
269 	peer_ext_stats = wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
270 	if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
271 		/*
272 		 * Need API to convert from hal_ring pointer to
273 		 * Ring Type / Ring Id combo
274 		 */
275 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
276 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
277 			  FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
278 		goto done;
279 	}
280 
281 	hal_srng_update_ring_usage_wm_no_lock(soc->hal_soc, hal_ring_hdl);
282 
283 	if (!num_pending)
284 		num_pending = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
285 
286 	if (num_pending > quota)
287 		num_pending = quota;
288 
289 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_pending);
290 	last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc,
291 							    hal_ring_hdl,
292 							    num_pending);
293 	/*
294 	 * start reaping the buffers from reo ring and queue
295 	 * them in per vdev queue.
296 	 * Process the received pkts in a different per vdev loop.
297 	 */
298 	while (qdf_likely(num_pending)) {
299 		ring_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
300 
301 		if (qdf_unlikely(!ring_desc))
302 			break;
303 
304 		error = HAL_RX_ERROR_STATUS_GET(ring_desc);
305 
306 		if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
307 			dp_rx_err("%pK: HAL RING 0x%pK:error %d",
308 				  soc, hal_ring_hdl, error);
309 			DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num],
310 				     1);
311 			/* Don't know how to deal with this -- assert */
312 			qdf_assert(0);
313 		}
314 
315 		dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc);
316 		rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
317 		status = dp_rx_cookie_check_and_invalidate(ring_desc);
318 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
319 			DP_STATS_INC(soc, rx.err.stale_cookie, 1);
320 			break;
321 		}
322 
323 		rx_desc = (struct dp_rx_desc *)
324 				hal_rx_get_reo_desc_va(ring_desc);
325 		dp_rx_desc_sw_cc_check(soc, rx_buf_cookie, &rx_desc);
326 
327 		status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl,
328 					   ring_desc, rx_desc);
329 		if (QDF_IS_STATUS_ERROR(status)) {
330 			if (qdf_unlikely(rx_desc && rx_desc->nbuf)) {
331 				qdf_assert_always(!rx_desc->unmapped);
332 				dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
333 				rx_desc->unmapped = 1;
334 				dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
335 							    rx_desc->pool_id);
336 				dp_rx_add_to_free_desc_list(
337 					&head[rx_desc->chip_id][rx_desc->pool_id],
338 					&tail[rx_desc->chip_id][rx_desc->pool_id],
339 					rx_desc);
340 			}
341 			continue;
342 		}
343 
344 		/*
345 		 * this is a unlikely scenario where the host is reaping
346 		 * a descriptor which it already reaped just a while ago
347 		 * but is yet to replenish it back to HW.
348 		 * In this case host will dump the last 128 descriptors
349 		 * including the software descriptor rx_desc and assert.
350 		 */
351 
352 		if (qdf_unlikely(!rx_desc->in_use)) {
353 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
354 			dp_info_rl("Reaping rx_desc not in use!");
355 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
356 						   ring_desc, rx_desc);
357 			continue;
358 		}
359 
360 		status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc);
361 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
362 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
363 			dp_info_rl("Nbuf sanity check failure!");
364 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
365 						   ring_desc, rx_desc);
366 			rx_desc->in_err_state = 1;
367 			continue;
368 		}
369 
370 		if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) {
371 			dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie);
372 			DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1);
373 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
374 						   ring_desc, rx_desc);
375 		}
376 
377 		pkt_capture_offload =
378 			dp_rx_copy_desc_info_in_nbuf_cb(soc, ring_desc,
379 							rx_desc->nbuf,
380 							reo_ring_num);
381 
382 		if (qdf_unlikely(qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) {
383 			/* In dp_rx_sg_create() until the last buffer,
384 			 * end bit should not be set. As continuation bit set,
385 			 * this is not a last buffer.
386 			 */
387 			qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 0);
388 
389 			/* previous msdu has end bit set, so current one is
390 			 * the new MPDU
391 			 */
392 			if (is_prev_msdu_last) {
393 				/* Get number of entries available in HW ring */
394 				num_entries_avail =
395 				hal_srng_dst_num_valid(hal_soc,
396 						       hal_ring_hdl, 1);
397 
398 				/* For new MPDU check if we can read complete
399 				 * MPDU by comparing the number of buffers
400 				 * available and number of buffers needed to
401 				 * reap this MPDU
402 				 */
403 				if ((QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) /
404 				     (RX_DATA_BUFFER_SIZE -
405 				      soc->rx_pkt_tlv_size) + 1) >
406 				    num_pending) {
407 					DP_STATS_INC(soc,
408 						     rx.msdu_scatter_wait_break,
409 						     1);
410 					dp_rx_cookie_reset_invalid_bit(
411 								     ring_desc);
412 					/* As we are going to break out of the
413 					 * loop because of unavailability of
414 					 * descs to form complete SG, we need to
415 					 * reset the TP in the REO destination
416 					 * ring.
417 					 */
418 					hal_srng_dst_dec_tp(hal_soc,
419 							    hal_ring_hdl);
420 					break;
421 				}
422 				is_prev_msdu_last = false;
423 			}
424 		}
425 
426 		if (!is_prev_msdu_last &&
427 		    !(qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)))
428 			is_prev_msdu_last = true;
429 
430 		rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
431 
432 		/*
433 		 * move unmap after scattered msdu waiting break logic
434 		 * in case double skb unmap happened.
435 		 */
436 		dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
437 		rx_desc->unmapped = 1;
438 		DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head,
439 				   ebuf_tail, rx_desc);
440 
441 		quota -= 1;
442 		num_pending -= 1;
443 
444 		dp_rx_add_to_free_desc_list
445 			(&head[rx_desc->chip_id][rx_desc->pool_id],
446 			 &tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc);
447 		num_rx_bufs_reaped++;
448 
449 		dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(soc, hal_soc,
450 					       num_pending,
451 					       hal_ring_hdl,
452 					       &last_prefetched_hw_desc,
453 					       &last_prefetched_sw_desc);
454 
455 		/*
456 		 * only if complete msdu is received for scatter case,
457 		 * then allow break.
458 		 */
459 		if (is_prev_msdu_last &&
460 		    dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped,
461 						  max_reap_limit))
462 			break;
463 	}
464 done:
465 	dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl);
466 	qdf_dsb();
467 
468 	dp_rx_per_core_stats_update(soc, reo_ring_num, num_rx_bufs_reaped);
469 
470 	for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
471 		for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
472 			/*
473 			 * continue with next mac_id if no pkts were reaped
474 			 * from that pool
475 			 */
476 			if (!rx_bufs_reaped[chip_id][mac_id])
477 				continue;
478 
479 			replenish_soc = dp_rx_replenish_soc_get(soc, chip_id);
480 
481 			dp_rxdma_srng =
482 				&replenish_soc->rx_refill_buf_ring[mac_id];
483 
484 			rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id];
485 
486 			dp_rx_buffers_replenish_simple(replenish_soc, mac_id,
487 					       dp_rxdma_srng,
488 					       rx_desc_pool,
489 					       rx_bufs_reaped[chip_id][mac_id],
490 					       &head[chip_id][mac_id],
491 					       &tail[chip_id][mac_id]);
492 		}
493 	}
494 
495 	/* Peer can be NULL is case of LFR */
496 	if (qdf_likely(txrx_peer))
497 		vdev = NULL;
498 
499 	/*
500 	 * BIG loop where each nbuf is dequeued from global queue,
501 	 * processed and queued back on a per vdev basis. These nbufs
502 	 * are sent to stack as and when we run out of nbufs
503 	 * or a new nbuf dequeued from global queue has a different
504 	 * vdev when compared to previous nbuf.
505 	 */
506 	nbuf = nbuf_head;
507 	while (nbuf) {
508 		next = nbuf->next;
509 		dp_rx_prefetch_nbuf_data_be(nbuf, next);
510 		if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) {
511 			nbuf = next;
512 			DP_STATS_INC(soc, rx.err.raw_frm_drop, 1);
513 			continue;
514 		}
515 
516 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
517 		vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
518 		peer_id = dp_rx_get_peer_id_be(nbuf);
519 
520 		if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer,
521 					peer_id, vdev_id)) {
522 			dp_rx_deliver_to_stack(soc, vdev, txrx_peer,
523 					       deliver_list_head,
524 					       deliver_list_tail);
525 			deliver_list_head = NULL;
526 			deliver_list_tail = NULL;
527 		}
528 
529 		/* Get TID from struct cb->tid_val, save to tid */
530 		tid = qdf_nbuf_get_tid_val(nbuf);
531 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) {
532 			DP_STATS_INC(soc, rx.err.rx_invalid_tid_err, 1);
533 			dp_rx_nbuf_free(nbuf);
534 			nbuf = next;
535 			continue;
536 		}
537 
538 		if (qdf_unlikely(!txrx_peer)) {
539 			txrx_peer = dp_rx_get_txrx_peer_and_vdev(soc, nbuf,
540 								 peer_id,
541 								 &txrx_ref_handle,
542 								 pkt_capture_offload,
543 								 &vdev,
544 								 &rx_pdev, &dsf,
545 								 &old_tid);
546 			if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) {
547 				nbuf = next;
548 				continue;
549 			}
550 			enh_flag = rx_pdev->enhanced_stats_en;
551 		} else if (txrx_peer && txrx_peer->peer_id != peer_id) {
552 			dp_txrx_peer_unref_delete(txrx_ref_handle,
553 						  DP_MOD_ID_RX);
554 
555 			txrx_peer = dp_rx_get_txrx_peer_and_vdev(soc, nbuf,
556 								 peer_id,
557 								 &txrx_ref_handle,
558 								 pkt_capture_offload,
559 								 &vdev,
560 								 &rx_pdev, &dsf,
561 								 &old_tid);
562 			if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) {
563 				nbuf = next;
564 				continue;
565 			}
566 			enh_flag = rx_pdev->enhanced_stats_en;
567 		}
568 
569 		if (txrx_peer) {
570 			QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
571 			qdf_dp_trace_set_track(nbuf, QDF_RX);
572 			QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
573 			QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
574 				QDF_NBUF_RX_PKT_DATA_TRACK;
575 		}
576 
577 		rx_bufs_used++;
578 
579 		/* MLD Link Peer Statistics support */
580 		if (txrx_peer->is_mld_peer && rx_pdev->link_peer_stats) {
581 			link_id = dp_rx_get_stats_arr_idx_from_link_id(
582 								nbuf,
583 								txrx_peer);
584 		} else {
585 			link_id = 0;
586 		}
587 
588 		/* when hlos tid override is enabled, save tid in
589 		 * skb->priority
590 		 */
591 		if (qdf_unlikely(vdev->skip_sw_tid_classification &
592 					DP_TXRX_HLOS_TID_OVERRIDE_ENABLED))
593 			qdf_nbuf_set_priority(nbuf, tid);
594 
595 		DP_RX_TID_SAVE(nbuf, tid);
596 		if (qdf_unlikely(dsf) || qdf_unlikely(peer_ext_stats) ||
597 		    dp_rx_pkt_tracepoints_enabled())
598 			qdf_nbuf_set_timestamp(nbuf);
599 
600 		if (qdf_likely(old_tid != tid)) {
601 			tid_stats =
602 		&rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid];
603 			old_tid = tid;
604 		}
605 
606 		/*
607 		 * Check if DMA completed -- msdu_done is the last bit
608 		 * to be written
609 		 */
610 		if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) &&
611 				 !hal_rx_tlv_msdu_done_get_be(rx_tlv_hdr))) {
612 			dp_err("MSDU DONE failure");
613 			DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
614 			hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
615 					     QDF_TRACE_LEVEL_INFO);
616 			tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
617 			dp_rx_nbuf_free(nbuf);
618 			qdf_assert(0);
619 			nbuf = next;
620 			continue;
621 		}
622 
623 		DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
624 		/*
625 		 * First IF condition:
626 		 * 802.11 Fragmented pkts are reinjected to REO
627 		 * HW block as SG pkts and for these pkts we only
628 		 * need to pull the RX TLVS header length.
629 		 * Second IF condition:
630 		 * The below condition happens when an MSDU is spread
631 		 * across multiple buffers. This can happen in two cases
632 		 * 1. The nbuf size is smaller then the received msdu.
633 		 *    ex: we have set the nbuf size to 2048 during
634 		 *        nbuf_alloc. but we received an msdu which is
635 		 *        2304 bytes in size then this msdu is spread
636 		 *        across 2 nbufs.
637 		 *
638 		 * 2. AMSDUs when RAW mode is enabled.
639 		 *    ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
640 		 *        across 1st nbuf and 2nd nbuf and last MSDU is
641 		 *        spread across 2nd nbuf and 3rd nbuf.
642 		 *
643 		 * for these scenarios let us create a skb frag_list and
644 		 * append these buffers till the last MSDU of the AMSDU
645 		 * Third condition:
646 		 * This is the most likely case, we receive 802.3 pkts
647 		 * decapsulated by HW, here we need to set the pkt length.
648 		 */
649 		if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
650 			bool is_mcbc, is_sa_vld, is_da_vld;
651 
652 			is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
653 								 rx_tlv_hdr);
654 			is_sa_vld =
655 				hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
656 								rx_tlv_hdr);
657 			is_da_vld =
658 				hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
659 								rx_tlv_hdr);
660 
661 			qdf_nbuf_set_da_mcbc(nbuf, is_mcbc);
662 			qdf_nbuf_set_da_valid(nbuf, is_da_vld);
663 			qdf_nbuf_set_sa_valid(nbuf, is_sa_vld);
664 
665 			qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
666 		} else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
667 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
668 			nbuf = dp_rx_sg_create(soc, nbuf);
669 			next = nbuf->next;
670 
671 			if (qdf_nbuf_is_raw_frame(nbuf)) {
672 				DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
673 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
674 							      rx.raw, 1,
675 							      msdu_len,
676 							      link_id);
677 			} else {
678 				DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
679 
680 				if (!dp_rx_is_sg_supported()) {
681 					dp_rx_nbuf_free(nbuf);
682 					dp_info_rl("sg msdu len %d, dropped",
683 						   msdu_len);
684 					nbuf = next;
685 					continue;
686 				}
687 			}
688 		} else {
689 			l3_pad = hal_rx_get_l3_pad_bytes_be(nbuf, rx_tlv_hdr);
690 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
691 			pkt_len = msdu_len + l3_pad + soc->rx_pkt_tlv_size;
692 
693 			qdf_nbuf_set_pktlen(nbuf, pkt_len);
694 			dp_rx_skip_tlvs(soc, nbuf, l3_pad);
695 		}
696 
697 		dp_rx_send_pktlog(soc, rx_pdev, nbuf, QDF_TX_RX_STATUS_OK);
698 
699 		if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
700 			dp_rx_err("%pK: Policy Check Drop pkt", soc);
701 			DP_PEER_PER_PKT_STATS_INC(txrx_peer,
702 						  rx.policy_check_drop,
703 						  1, link_id);
704 			tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
705 			/* Drop & free packet */
706 			dp_rx_nbuf_free(nbuf);
707 			/* Statistics */
708 			nbuf = next;
709 			continue;
710 		}
711 
712 		/*
713 		 * Drop non-EAPOL frames from unauthorized peer.
714 		 */
715 		if (qdf_likely(txrx_peer) &&
716 		    qdf_unlikely(!txrx_peer->authorize) &&
717 		    !qdf_nbuf_is_raw_frame(nbuf)) {
718 			bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
719 					qdf_nbuf_is_ipv4_wapi_pkt(nbuf);
720 
721 			if (!is_eapol) {
722 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
723 							  rx.peer_unauth_rx_pkt_drop,
724 							  1, link_id);
725 				dp_rx_nbuf_free(nbuf);
726 				nbuf = next;
727 				continue;
728 			}
729 		}
730 
731 		dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
732 		dp_rx_update_flow_info(nbuf, rx_tlv_hdr);
733 
734 		if (qdf_unlikely(!rx_pdev->rx_fast_flag)) {
735 			/*
736 			 * process frame for mulitpass phrase processing
737 			 */
738 			if (qdf_unlikely(vdev->multipass_en)) {
739 				if (dp_rx_multipass_process(txrx_peer, nbuf,
740 							    tid) == false) {
741 					DP_PEER_PER_PKT_STATS_INC
742 						(txrx_peer,
743 						 rx.multipass_rx_pkt_drop,
744 						 1, link_id);
745 					dp_rx_nbuf_free(nbuf);
746 					nbuf = next;
747 					continue;
748 				}
749 			}
750 			if (qdf_unlikely(txrx_peer &&
751 					 (txrx_peer->nawds_enabled) &&
752 					 (qdf_nbuf_is_da_mcbc(nbuf)) &&
753 					 (hal_rx_get_mpdu_mac_ad4_valid_be
754 						(rx_tlv_hdr) == false))) {
755 				tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
756 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
757 							  rx.nawds_mcast_drop,
758 							  1, link_id);
759 				dp_rx_nbuf_free(nbuf);
760 				nbuf = next;
761 				continue;
762 			}
763 
764 			/* Update the protocol tag in SKB based on CCE metadata
765 			 */
766 			dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
767 						  reo_ring_num, false, true);
768 
769 			/* Update the flow tag in SKB based on FSE metadata */
770 			dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
771 					      true);
772 
773 			if (qdf_unlikely(vdev->mesh_vdev)) {
774 				if (dp_rx_filter_mesh_packets(vdev, nbuf,
775 							      rx_tlv_hdr)
776 						== QDF_STATUS_SUCCESS) {
777 					dp_rx_info("%pK: mesh pkt filtered",
778 						   soc);
779 					tid_stats->fail_cnt[MESH_FILTER_DROP]++;
780 					DP_STATS_INC(vdev->pdev,
781 						     dropped.mesh_filter, 1);
782 
783 					dp_rx_nbuf_free(nbuf);
784 					nbuf = next;
785 					continue;
786 				}
787 				dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr,
788 						      txrx_peer);
789 			}
790 		}
791 
792 		if (qdf_likely(vdev->rx_decap_type ==
793 			       htt_cmn_pkt_type_ethernet) &&
794 		    qdf_likely(!vdev->mesh_vdev)) {
795 			dp_rx_wds_learn(soc, vdev,
796 					rx_tlv_hdr,
797 					txrx_peer,
798 					nbuf);
799 		}
800 
801 		dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
802 					reo_ring_num, tid_stats, link_id);
803 
804 		if (qdf_likely(vdev->rx_decap_type ==
805 			       htt_cmn_pkt_type_ethernet) &&
806 		    qdf_likely(!vdev->mesh_vdev)) {
807 			/* Intrabss-fwd */
808 			if (dp_rx_check_ap_bridge(vdev))
809 				if (dp_rx_intrabss_fwd_be(soc, txrx_peer,
810 							  rx_tlv_hdr,
811 							  nbuf,
812 							  link_id)) {
813 					nbuf = next;
814 					tid_stats->intrabss_cnt++;
815 					continue; /* Get next desc */
816 				}
817 		}
818 
819 		dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt);
820 
821 		dp_rx_mark_first_packet_after_wow_wakeup(vdev->pdev, rx_tlv_hdr,
822 							 nbuf);
823 
824 		dp_rx_update_stats(soc, nbuf);
825 
826 		dp_pkt_add_timestamp(txrx_peer->vdev, QDF_PKT_RX_DRIVER_ENTRY,
827 				     current_time, nbuf);
828 
829 		DP_RX_LIST_APPEND(deliver_list_head,
830 				  deliver_list_tail,
831 				  nbuf);
832 
833 		DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
834 					  QDF_NBUF_CB_RX_PKT_LEN(nbuf),
835 					  enh_flag);
836 		if (qdf_unlikely(txrx_peer->in_twt))
837 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
838 						      rx.to_stack_twt, 1,
839 						      QDF_NBUF_CB_RX_PKT_LEN(nbuf),
840 						      link_id);
841 
842 		tid_stats->delivered_to_stack++;
843 		nbuf = next;
844 	}
845 
846 	DP_RX_DELIVER_TO_STACK(soc, vdev, txrx_peer, peer_id,
847 			       pkt_capture_offload,
848 			       deliver_list_head,
849 			       deliver_list_tail);
850 
851 	if (qdf_likely(txrx_peer))
852 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
853 
854 	/*
855 	 * If we are processing in near-full condition, there are 3 scenario
856 	 * 1) Ring entries has reached critical state
857 	 * 2) Ring entries are still near high threshold
858 	 * 3) Ring entries are below the safe level
859 	 *
860 	 * One more loop will move the state to normal processing and yield
861 	 */
862 	if (ring_near_full && quota)
863 		goto more_data;
864 
865 	if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
866 		if (quota) {
867 			num_pending =
868 				dp_rx_srng_get_num_pending(hal_soc,
869 							   hal_ring_hdl,
870 							   num_entries,
871 							   &near_full);
872 			if (num_pending) {
873 				DP_STATS_INC(soc, rx.hp_oos2, 1);
874 
875 				if (!hif_exec_should_yield(scn, intr_id))
876 					goto more_data;
877 
878 				if (qdf_unlikely(near_full)) {
879 					DP_STATS_INC(soc, rx.near_full, 1);
880 					goto more_data;
881 				}
882 			}
883 		}
884 
885 		if (vdev && vdev->osif_fisa_flush)
886 			vdev->osif_fisa_flush(soc, reo_ring_num);
887 
888 		if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) {
889 			vdev->osif_gro_flush(vdev->osif_vdev,
890 					     reo_ring_num);
891 		}
892 	}
893 
894 	/* Update histogram statistics by looping through pdev's */
895 	DP_RX_HIST_STATS_PER_PDEV();
896 
897 	return rx_bufs_used; /* Assume no scale factor for now */
898 }
899 
900 #ifdef RX_DESC_MULTI_PAGE_ALLOC
901 /**
902  * dp_rx_desc_pool_init_be_cc() - initial RX desc pool for cookie conversion
903  * @soc: Handle to DP Soc structure
904  * @rx_desc_pool: Rx descriptor pool handler
905  * @pool_id: Rx descriptor pool ID
906  *
907  * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
908  */
909 static QDF_STATUS
910 dp_rx_desc_pool_init_be_cc(struct dp_soc *soc,
911 			   struct rx_desc_pool *rx_desc_pool,
912 			   uint32_t pool_id)
913 {
914 	struct dp_hw_cookie_conversion_t *cc_ctx;
915 	struct dp_soc_be *be_soc;
916 	union dp_rx_desc_list_elem_t *rx_desc_elem;
917 	struct dp_spt_page_desc *page_desc;
918 	uint32_t ppt_idx = 0;
919 	uint32_t avail_entry_index = 0;
920 
921 	if (!rx_desc_pool->pool_size) {
922 		dp_err("desc_num 0 !!");
923 		return QDF_STATUS_E_FAILURE;
924 	}
925 
926 	be_soc = dp_get_be_soc_from_dp_soc(soc);
927 	cc_ctx  = &be_soc->rx_cc_ctx[pool_id];
928 
929 	page_desc = &cc_ctx->page_desc_base[0];
930 	rx_desc_elem = rx_desc_pool->freelist;
931 	while (rx_desc_elem) {
932 		if (avail_entry_index == 0) {
933 			if (ppt_idx >= cc_ctx->total_page_num) {
934 				dp_alert("insufficient secondary page tables");
935 				qdf_assert_always(0);
936 			}
937 			page_desc = &cc_ctx->page_desc_base[ppt_idx++];
938 		}
939 
940 		/* put each RX Desc VA to SPT pages and
941 		 * get corresponding ID
942 		 */
943 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
944 					 avail_entry_index,
945 					 &rx_desc_elem->rx_desc);
946 		rx_desc_elem->rx_desc.cookie =
947 			dp_cc_desc_id_generate(page_desc->ppt_index,
948 					       avail_entry_index);
949 		rx_desc_elem->rx_desc.chip_id = dp_mlo_get_chip_id(soc);
950 		rx_desc_elem->rx_desc.pool_id = pool_id;
951 		rx_desc_elem->rx_desc.in_use = 0;
952 		rx_desc_elem = rx_desc_elem->next;
953 
954 		avail_entry_index = (avail_entry_index + 1) &
955 					DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
956 	}
957 
958 	return QDF_STATUS_SUCCESS;
959 }
960 #else
961 static QDF_STATUS
962 dp_rx_desc_pool_init_be_cc(struct dp_soc *soc,
963 			   struct rx_desc_pool *rx_desc_pool,
964 			   uint32_t pool_id)
965 {
966 	struct dp_hw_cookie_conversion_t *cc_ctx;
967 	struct dp_soc_be *be_soc;
968 	struct dp_spt_page_desc *page_desc;
969 	uint32_t ppt_idx = 0;
970 	uint32_t avail_entry_index = 0;
971 	int i = 0;
972 
973 	if (!rx_desc_pool->pool_size) {
974 		dp_err("desc_num 0 !!");
975 		return QDF_STATUS_E_FAILURE;
976 	}
977 
978 	be_soc = dp_get_be_soc_from_dp_soc(soc);
979 	cc_ctx  = &be_soc->rx_cc_ctx[pool_id];
980 
981 	page_desc = &cc_ctx->page_desc_base[0];
982 	for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
983 		if (i == rx_desc_pool->pool_size - 1)
984 			rx_desc_pool->array[i].next = NULL;
985 		else
986 			rx_desc_pool->array[i].next =
987 				&rx_desc_pool->array[i + 1];
988 
989 		if (avail_entry_index == 0) {
990 			if (ppt_idx >= cc_ctx->total_page_num) {
991 				dp_alert("insufficient secondary page tables");
992 				qdf_assert_always(0);
993 			}
994 			page_desc = &cc_ctx->page_desc_base[ppt_idx++];
995 		}
996 
997 		/* put each RX Desc VA to SPT pages and
998 		 * get corresponding ID
999 		 */
1000 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
1001 					 avail_entry_index,
1002 					 &rx_desc_pool->array[i].rx_desc);
1003 		rx_desc_pool->array[i].rx_desc.cookie =
1004 			dp_cc_desc_id_generate(page_desc->ppt_index,
1005 					       avail_entry_index);
1006 		rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
1007 		rx_desc_pool->array[i].rx_desc.in_use = 0;
1008 		rx_desc_pool->array[i].rx_desc.chip_id =
1009 					dp_mlo_get_chip_id(soc);
1010 
1011 		avail_entry_index = (avail_entry_index + 1) &
1012 					DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
1013 	}
1014 	return QDF_STATUS_SUCCESS;
1015 }
1016 #endif
1017 
1018 static void
1019 dp_rx_desc_pool_deinit_be_cc(struct dp_soc *soc,
1020 			     struct rx_desc_pool *rx_desc_pool,
1021 			     uint32_t pool_id)
1022 {
1023 	struct dp_spt_page_desc *page_desc;
1024 	struct dp_soc_be *be_soc;
1025 	int i = 0;
1026 	struct dp_hw_cookie_conversion_t *cc_ctx;
1027 
1028 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1029 	cc_ctx  = &be_soc->rx_cc_ctx[pool_id];
1030 
1031 	for (i = 0; i < cc_ctx->total_page_num; i++) {
1032 		page_desc = &cc_ctx->page_desc_base[i];
1033 		qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
1034 	}
1035 }
1036 
1037 QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
1038 				   struct rx_desc_pool *rx_desc_pool,
1039 				   uint32_t pool_id)
1040 {
1041 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1042 
1043 	/* Only regular RX buffer desc pool use HW cookie conversion */
1044 	if (rx_desc_pool->desc_type == QDF_DP_RX_DESC_BUF_TYPE) {
1045 		dp_info("rx_desc_buf pool init");
1046 		status = dp_rx_desc_pool_init_be_cc(soc,
1047 						    rx_desc_pool,
1048 						    pool_id);
1049 	} else {
1050 		dp_info("non_rx_desc_buf_pool init");
1051 		status = dp_rx_desc_pool_init_generic(soc, rx_desc_pool,
1052 						      pool_id);
1053 	}
1054 
1055 	return status;
1056 }
1057 
1058 void dp_rx_desc_pool_deinit_be(struct dp_soc *soc,
1059 			       struct rx_desc_pool *rx_desc_pool,
1060 			       uint32_t pool_id)
1061 {
1062 	if (rx_desc_pool->desc_type == QDF_DP_RX_DESC_BUF_TYPE)
1063 		dp_rx_desc_pool_deinit_be_cc(soc, rx_desc_pool, pool_id);
1064 }
1065 
1066 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
1067 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
1068 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
1069 					       void *ring_desc,
1070 					       struct dp_rx_desc **r_rx_desc)
1071 {
1072 	if (hal_rx_wbm_get_cookie_convert_done(ring_desc)) {
1073 		/* HW cookie conversion done */
1074 		*r_rx_desc = (struct dp_rx_desc *)
1075 				hal_rx_wbm_get_desc_va(ring_desc);
1076 	} else {
1077 		/* SW do cookie conversion */
1078 		uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc);
1079 
1080 		*r_rx_desc = (struct dp_rx_desc *)
1081 				dp_cc_desc_find(soc, cookie);
1082 	}
1083 
1084 	return QDF_STATUS_SUCCESS;
1085 }
1086 #else
1087 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
1088 					       void *ring_desc,
1089 					       struct dp_rx_desc **r_rx_desc)
1090 {
1091 	 *r_rx_desc = (struct dp_rx_desc *)
1092 			hal_rx_wbm_get_desc_va(ring_desc);
1093 
1094 	return QDF_STATUS_SUCCESS;
1095 }
1096 #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
1097 #else
1098 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
1099 					       void *ring_desc,
1100 					       struct dp_rx_desc **r_rx_desc)
1101 {
1102 	/* SW do cookie conversion */
1103 	uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc);
1104 
1105 	*r_rx_desc = (struct dp_rx_desc *)
1106 			dp_cc_desc_find(soc, cookie);
1107 
1108 	return QDF_STATUS_SUCCESS;
1109 }
1110 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
1111 
1112 struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc,
1113 					     uint32_t cookie)
1114 {
1115 	return (struct dp_rx_desc *)dp_cc_desc_find(soc, cookie);
1116 }
1117 
1118 #if defined(WLAN_FEATURE_11BE_MLO)
1119 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO)
1120 #define DP_RANDOM_MAC_ID_BIT_MASK	0xC0
1121 #define DP_RANDOM_MAC_OFFSET	1
1122 #define DP_MAC_LOCAL_ADMBIT_MASK	0x2
1123 #define DP_MAC_LOCAL_ADMBIT_OFFSET	0
1124 static inline void dp_rx_dummy_src_mac(struct dp_vdev *vdev,
1125 				       qdf_nbuf_t nbuf)
1126 {
1127 	qdf_ether_header_t *eh =
1128 			(qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1129 
1130 	eh->ether_shost[DP_MAC_LOCAL_ADMBIT_OFFSET] =
1131 				eh->ether_shost[DP_MAC_LOCAL_ADMBIT_OFFSET] |
1132 				DP_MAC_LOCAL_ADMBIT_MASK;
1133 }
1134 
1135 #ifdef QCA_SUPPORT_WDS_EXTENDED
1136 static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer)
1137 {
1138 	return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init);
1139 }
1140 #else
1141 static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer)
1142 {
1143 	return false;
1144 }
1145 #endif
1146 
1147 #ifdef EXT_HYBRID_MLO_MODE
1148 static inline
1149 bool dp_rx_check_ext_hybrid_mode(struct dp_soc *soc, struct dp_vdev *vdev)
1150 {
1151 	return ((DP_MLD_MODE_HYBRID_NONBOND == soc->mld_mode_ap) &&
1152 		(wlan_op_mode_ap == vdev->opmode));
1153 }
1154 #else
1155 static inline
1156 bool dp_rx_check_ext_hybrid_mode(struct dp_soc *soc, struct dp_vdev *vdev)
1157 {
1158 	return false;
1159 }
1160 #endif
1161 
1162 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
1163 			    struct dp_vdev *vdev,
1164 			    struct dp_txrx_peer *peer,
1165 			    qdf_nbuf_t nbuf,
1166 			    uint8_t link_id)
1167 {
1168 	qdf_nbuf_t nbuf_copy;
1169 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
1170 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
1171 	struct cdp_tid_rx_stats *tid_stats = &peer->vdev->pdev->stats.
1172 					tid_stats.tid_rx_wbm_stats[0][tid];
1173 
1174 	if (!(qdf_nbuf_is_ipv4_igmp_pkt(nbuf) ||
1175 	      qdf_nbuf_is_ipv6_igmp_pkt(nbuf)))
1176 		return false;
1177 
1178 	if (qdf_unlikely(vdev->multipass_en)) {
1179 		if (dp_rx_multipass_process(peer, nbuf, tid) == false) {
1180 			DP_PEER_PER_PKT_STATS_INC(peer,
1181 						  rx.multipass_rx_pkt_drop,
1182 						  1, link_id);
1183 			return false;
1184 		}
1185 	}
1186 
1187 	if (!peer->bss_peer) {
1188 		if (dp_rx_intrabss_mcbc_fwd(soc, peer, NULL, nbuf,
1189 					    tid_stats, link_id))
1190 			dp_rx_err("forwarding failed");
1191 	}
1192 
1193 	qdf_nbuf_set_next(nbuf, NULL);
1194 
1195 	/* REO sends IGMP to driver only if AP is operating in hybrid
1196 	 *  mld mode.
1197 	 */
1198 
1199 	if (qdf_unlikely(dp_rx_mlo_igmp_wds_ext_handler(peer))) {
1200 		/* send the IGMP to the netdev corresponding to the interface
1201 		 * its received on
1202 		 */
1203 		goto send_pkt;
1204 	}
1205 
1206 	if (dp_rx_check_ext_hybrid_mode(soc, vdev)) {
1207 		/* send the IGMP to the netdev corresponding to the interface
1208 		 * its received on
1209 		 */
1210 		goto send_pkt;
1211 	}
1212 
1213 	/*
1214 	 * In the case of ME5/ME6, Backhaul WDS for a mld peer, NAWDS,
1215 	 * legacy non-mlo AP vdev & non-AP vdev(which is very unlikely),
1216 	 * send the igmp pkt on the same link where it received, as these
1217 	 *  features will use peer based tcl metadata.
1218 	 */
1219 	if (vdev->mcast_enhancement_en ||
1220 	    peer->is_mld_peer ||
1221 	    peer->nawds_enabled ||
1222 	    !vdev->mlo_vdev ||
1223 	    qdf_unlikely(wlan_op_mode_ap != vdev->opmode)) {
1224 		/* send the IGMP to the netdev corresponding to the interface
1225 		 * its received on
1226 		 */
1227 		goto send_pkt;
1228 	}
1229 
1230 	/* We are here, it means a legacy non-wds sta is connected
1231 	 * to a hybrid mld ap, So send a clone of the IGPMP packet
1232 	 * on the interface where it was received.
1233 	 */
1234 	nbuf_copy = qdf_nbuf_copy(nbuf);
1235 	if (qdf_likely(nbuf_copy))
1236 		dp_rx_deliver_to_stack(soc, vdev, peer, nbuf_copy, NULL);
1237 
1238 	dp_rx_dummy_src_mac(vdev, nbuf);
1239 	/* Set the ml peer valid bit in skb peer metadata, so that osif
1240 	 * can deliver the SA mangled IGMP packet to mld netdev.
1241 	 */
1242 	QDF_NBUF_CB_RX_PEER_ID(nbuf) |= CDP_RX_ML_PEER_VALID_MASK;
1243 	/* Deliver the original IGMP with dummy src on the mld netdev */
1244 send_pkt:
1245 	dp_rx_deliver_to_stack(be_vdev->vdev.pdev->soc,
1246 			       &be_vdev->vdev,
1247 			       peer,
1248 			       nbuf,
1249 			       NULL);
1250 	return true;
1251 }
1252 #else
1253 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
1254 			    struct dp_vdev *vdev,
1255 			    struct dp_txrx_peer *peer,
1256 			    qdf_nbuf_t nbuf,
1257 			    uint8_t link_id)
1258 {
1259 	return false;
1260 }
1261 #endif
1262 #endif
1263 
1264 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1265 uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
1266 			  hal_ring_handle_t hal_ring_hdl,
1267 			  uint8_t reo_ring_num,
1268 			  uint32_t quota)
1269 {
1270 	struct dp_soc *soc = int_ctx->soc;
1271 	struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num];
1272 	uint32_t work_done = 0;
1273 
1274 	if (dp_srng_get_near_full_level(soc, rx_ring) <
1275 			DP_SRNG_THRESH_NEAR_FULL)
1276 		return 0;
1277 
1278 	qdf_atomic_set(&rx_ring->near_full, 1);
1279 	work_done++;
1280 
1281 	return work_done;
1282 }
1283 #endif
1284 
1285 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1286 #ifdef WLAN_FEATURE_11BE_MLO
1287 /**
1288  * dp_rx_intrabss_fwd_mlo_allow() - check if MLO forwarding is allowed
1289  * @ta_peer: transmitter peer handle
1290  * @da_peer: destination peer handle
1291  *
1292  * Return: true - MLO forwarding case, false: not
1293  */
1294 static inline bool
1295 dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer,
1296 			     struct dp_txrx_peer *da_peer)
1297 {
1298 	/* TA peer and DA peer's vdev should be partner MLO vdevs */
1299 	if (dp_peer_find_mac_addr_cmp(&ta_peer->vdev->mld_mac_addr,
1300 				      &da_peer->vdev->mld_mac_addr))
1301 		return false;
1302 
1303 	return true;
1304 }
1305 #else
1306 static inline bool
1307 dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer,
1308 			     struct dp_txrx_peer *da_peer)
1309 {
1310 	return false;
1311 }
1312 #endif
1313 
1314 #ifdef INTRA_BSS_FWD_OFFLOAD
1315 /**
1316  * dp_rx_intrabss_ucast_check_be() - Check if intrabss is allowed
1317  *				     for unicast frame
1318  * @nbuf: RX packet buffer
1319  * @ta_peer: transmitter DP peer handle
1320  * @rx_tlv_hdr: Rx TLV header
1321  * @msdu_metadata: MSDU meta data info
1322  * @params: params to be filled in
1323  *
1324  * Return: true - intrabss allowed
1325  *	   false - not allow
1326  */
1327 static bool
1328 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
1329 			      struct dp_txrx_peer *ta_peer,
1330 			      uint8_t *rx_tlv_hdr,
1331 			      struct hal_rx_msdu_metadata *msdu_metadata,
1332 			      struct dp_be_intrabss_params *params)
1333 {
1334 	uint8_t dest_chip_id, dest_chip_pmac_id;
1335 	struct dp_vdev_be *be_vdev =
1336 		dp_get_be_vdev_from_dp_vdev(ta_peer->vdev);
1337 	struct dp_soc_be *be_soc =
1338 		dp_get_be_soc_from_dp_soc(params->dest_soc);
1339 	uint16_t da_peer_id;
1340 	struct dp_peer *da_peer = NULL;
1341 
1342 	if (!qdf_nbuf_is_intra_bss(nbuf))
1343 		return false;
1344 
1345 	hal_rx_tlv_get_dest_chip_pmac_id(rx_tlv_hdr,
1346 					 &dest_chip_id,
1347 					 &dest_chip_pmac_id);
1348 
1349 	params->dest_soc =
1350 		dp_mlo_get_soc_ref_by_chip_id(be_soc->ml_ctxt,
1351 					      dest_chip_id);
1352 	if (!params->dest_soc)
1353 		return false;
1354 
1355 	da_peer_id = HAL_RX_PEER_ID_GET(msdu_metadata);
1356 
1357 	da_peer = dp_peer_get_tgt_peer_by_id(params->dest_soc, da_peer_id,
1358 					     DP_MOD_ID_RX);
1359 	if (da_peer) {
1360 		if (da_peer->bss_peer || (da_peer->txrx_peer == ta_peer)) {
1361 			dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
1362 			return false;
1363 		}
1364 		dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
1365 	}
1366 
1367 	qdf_assert_always(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1));
1368 
1369 	if (dest_chip_id == be_soc->mlo_chip_id) {
1370 		if (dest_chip_pmac_id == ta_peer->vdev->pdev->pdev_id)
1371 			params->tx_vdev_id = ta_peer->vdev->vdev_id;
1372 		else
1373 			params->tx_vdev_id =
1374 				be_vdev->partner_vdev_list[dest_chip_id]
1375 							  [dest_chip_pmac_id];
1376 		return true;
1377 	}
1378 
1379 	params->tx_vdev_id =
1380 		be_vdev->partner_vdev_list[dest_chip_id][dest_chip_pmac_id];
1381 
1382 	return true;
1383 }
1384 #else
1385 #ifdef WLAN_MLO_MULTI_CHIP
1386 static bool
1387 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
1388 			      struct dp_txrx_peer *ta_peer,
1389 			      uint8_t *rx_tlv_hdr,
1390 			      struct hal_rx_msdu_metadata *msdu_metadata,
1391 			      struct dp_be_intrabss_params *params)
1392 {
1393 	uint16_t da_peer_id;
1394 	struct dp_txrx_peer *da_peer;
1395 	bool ret = false;
1396 	uint8_t dest_chip_id;
1397 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1398 	struct dp_vdev_be *be_vdev =
1399 		dp_get_be_vdev_from_dp_vdev(ta_peer->vdev);
1400 	struct dp_soc_be *be_soc =
1401 		dp_get_be_soc_from_dp_soc(params->dest_soc);
1402 
1403 	if (!(qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf)))
1404 		return false;
1405 
1406 	dest_chip_id = HAL_RX_DEST_CHIP_ID_GET(msdu_metadata);
1407 	qdf_assert_always(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1));
1408 	da_peer_id = HAL_RX_PEER_ID_GET(msdu_metadata);
1409 
1410 	/* use dest chip id when TA is MLD peer and DA is legacy */
1411 	if (be_soc->mlo_enabled &&
1412 	    ta_peer->mld_peer &&
1413 	    !(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) {
1414 		/* validate chip_id, get a ref, and re-assign soc */
1415 		params->dest_soc =
1416 			dp_mlo_get_soc_ref_by_chip_id(be_soc->ml_ctxt,
1417 						      dest_chip_id);
1418 		if (!params->dest_soc)
1419 			return false;
1420 
1421 		da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc,
1422 						     da_peer_id,
1423 						     &txrx_ref_handle,
1424 						     DP_MOD_ID_RX);
1425 		if (!da_peer)
1426 			return false;
1427 
1428 	} else {
1429 		da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc,
1430 						     da_peer_id,
1431 						     &txrx_ref_handle,
1432 						     DP_MOD_ID_RX);
1433 		if (!da_peer)
1434 			return false;
1435 
1436 		params->dest_soc = da_peer->vdev->pdev->soc;
1437 		if (!params->dest_soc)
1438 			goto rel_da_peer;
1439 
1440 	}
1441 
1442 	params->tx_vdev_id = da_peer->vdev->vdev_id;
1443 
1444 	/* If the source or destination peer in the isolation
1445 	 * list then dont forward instead push to bridge stack.
1446 	 */
1447 	if (dp_get_peer_isolation(ta_peer) ||
1448 	    dp_get_peer_isolation(da_peer)) {
1449 		ret = false;
1450 		goto rel_da_peer;
1451 	}
1452 
1453 	if (da_peer->bss_peer || (da_peer == ta_peer)) {
1454 		ret = false;
1455 		goto rel_da_peer;
1456 	}
1457 
1458 	/* Same vdev, support Inra-BSS */
1459 	if (da_peer->vdev == ta_peer->vdev) {
1460 		ret = true;
1461 		goto rel_da_peer;
1462 	}
1463 
1464 	/* MLO specific Intra-BSS check */
1465 	if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) {
1466 		/* use dest chip id for legacy dest peer */
1467 		if (!(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) {
1468 			if (!(be_vdev->partner_vdev_list[dest_chip_id][0] ==
1469 			      params->tx_vdev_id) &&
1470 			    !(be_vdev->partner_vdev_list[dest_chip_id][1] ==
1471 			      params->tx_vdev_id)) {
1472 				/*dp_soc_unref_delete(soc);*/
1473 				goto rel_da_peer;
1474 			}
1475 		}
1476 		ret = true;
1477 	}
1478 
1479 rel_da_peer:
1480 	dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
1481 	return ret;
1482 }
1483 #else
1484 static bool
1485 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
1486 			      struct dp_txrx_peer *ta_peer,
1487 			      uint8_t *rx_tlv_hdr,
1488 			      struct hal_rx_msdu_metadata *msdu_metadata,
1489 			      struct dp_be_intrabss_params *params)
1490 {
1491 	uint16_t da_peer_id;
1492 	struct dp_txrx_peer *da_peer;
1493 	bool ret = false;
1494 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1495 
1496 	if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf))
1497 		return false;
1498 
1499 	da_peer_id = dp_rx_peer_metadata_peer_id_get_be(
1500 						params->dest_soc,
1501 						msdu_metadata->da_idx);
1502 
1503 	da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id,
1504 					     &txrx_ref_handle, DP_MOD_ID_RX);
1505 	if (!da_peer)
1506 		return false;
1507 
1508 	params->tx_vdev_id = da_peer->vdev->vdev_id;
1509 	/* If the source or destination peer in the isolation
1510 	 * list then dont forward instead push to bridge stack.
1511 	 */
1512 	if (dp_get_peer_isolation(ta_peer) ||
1513 	    dp_get_peer_isolation(da_peer))
1514 		goto rel_da_peer;
1515 
1516 	if (da_peer->bss_peer || da_peer == ta_peer)
1517 		goto rel_da_peer;
1518 
1519 	/* Same vdev, support Inra-BSS */
1520 	if (da_peer->vdev == ta_peer->vdev) {
1521 		ret = true;
1522 		goto rel_da_peer;
1523 	}
1524 
1525 	/* MLO specific Intra-BSS check */
1526 	if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) {
1527 		ret = true;
1528 		goto rel_da_peer;
1529 	}
1530 
1531 rel_da_peer:
1532 	dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
1533 	return ret;
1534 }
1535 #endif /* WLAN_MLO_MULTI_CHIP */
1536 #endif /* INTRA_BSS_FWD_OFFLOAD */
1537 
1538 #if defined(WLAN_PKT_CAPTURE_RX_2_0) || defined(CONFIG_WORD_BASED_TLV)
1539 void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
1540 				  uint32_t *msg_word,
1541 				  void *rx_filter)
1542 {
1543 	struct htt_rx_ring_tlv_filter *tlv_filter =
1544 				(struct htt_rx_ring_tlv_filter *)rx_filter;
1545 
1546 	if (!msg_word || !tlv_filter)
1547 		return;
1548 
1549 	/* tlv_filter->enable is set to 1 for monitor rings */
1550 	if (tlv_filter->enable)
1551 		return;
1552 
1553 	/* if word mask is zero, FW will set the default values */
1554 	if (!(tlv_filter->rx_mpdu_start_wmask > 0 &&
1555 	      tlv_filter->rx_msdu_end_wmask > 0)) {
1556 		return;
1557 	}
1558 
1559 	HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACTION_ENABLE_SET(*msg_word, 1);
1560 
1561 	/* word 14 */
1562 	msg_word += 3;
1563 	*msg_word = 0;
1564 
1565 	HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_WORD_MASK_SET(
1566 				*msg_word,
1567 				tlv_filter->rx_mpdu_start_wmask);
1568 
1569 	/* word 15 */
1570 	msg_word++;
1571 	*msg_word = 0;
1572 	HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_WORD_MASK_SET(
1573 				*msg_word,
1574 				tlv_filter->rx_msdu_end_wmask);
1575 }
1576 #else
1577 void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
1578 				  uint32_t *msg_word,
1579 				  void *rx_filter)
1580 {
1581 }
1582 #endif
1583 
1584 #if defined(WLAN_MCAST_MLO) && defined(CONFIG_MLO_SINGLE_DEV)
1585 static inline
1586 bool dp_rx_intrabss_mlo_mcbc_fwd(struct dp_soc *soc, struct dp_vdev *vdev,
1587 				 qdf_nbuf_t nbuf_copy)
1588 {
1589 	struct dp_vdev *mcast_primary_vdev = NULL;
1590 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
1591 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1592 	struct cdp_tx_exception_metadata tx_exc_metadata = {0};
1593 
1594 	tx_exc_metadata.is_mlo_mcast = 1;
1595 	tx_exc_metadata.tx_encap_type = CDP_INVALID_TX_ENCAP_TYPE;
1596 	tx_exc_metadata.sec_type = CDP_INVALID_SEC_TYPE;
1597 	tx_exc_metadata.peer_id = CDP_INVALID_PEER;
1598 	tx_exc_metadata.tid = CDP_INVALID_TID;
1599 
1600 	mcast_primary_vdev = dp_mlo_get_mcast_primary_vdev(be_soc,
1601 							   be_vdev,
1602 							   DP_MOD_ID_RX);
1603 
1604 	if (!mcast_primary_vdev)
1605 		return false;
1606 
1607 	nbuf_copy = dp_tx_send_exception((struct cdp_soc_t *)
1608 					 mcast_primary_vdev->pdev->soc,
1609 					 mcast_primary_vdev->vdev_id,
1610 					 nbuf_copy, &tx_exc_metadata);
1611 
1612 	if (nbuf_copy)
1613 		qdf_nbuf_free(nbuf_copy);
1614 
1615 	dp_vdev_unref_delete(mcast_primary_vdev->pdev->soc,
1616 			     mcast_primary_vdev, DP_MOD_ID_RX);
1617 	return true;
1618 }
1619 #else
1620 static inline
1621 bool dp_rx_intrabss_mlo_mcbc_fwd(struct dp_soc *soc, struct dp_vdev *vdev,
1622 				 qdf_nbuf_t nbuf_copy)
1623 {
1624 	return false;
1625 }
1626 #endif
1627 
1628 bool
1629 dp_rx_intrabss_mcast_handler_be(struct dp_soc *soc,
1630 				struct dp_txrx_peer *ta_txrx_peer,
1631 				qdf_nbuf_t nbuf_copy,
1632 				struct cdp_tid_rx_stats *tid_stats,
1633 				uint8_t link_id)
1634 {
1635 	if (qdf_unlikely(ta_txrx_peer->vdev->nawds_enabled)) {
1636 		struct cdp_tx_exception_metadata tx_exc_metadata = {0};
1637 		uint16_t len = QDF_NBUF_CB_RX_PKT_LEN(nbuf_copy);
1638 
1639 		tx_exc_metadata.peer_id = ta_txrx_peer->peer_id;
1640 		tx_exc_metadata.is_intrabss_fwd = 1;
1641 		tx_exc_metadata.tid = HTT_TX_EXT_TID_INVALID;
1642 
1643 		if (dp_tx_send_exception((struct cdp_soc_t *)soc,
1644 					  ta_txrx_peer->vdev->vdev_id,
1645 					  nbuf_copy,
1646 					  &tx_exc_metadata)) {
1647 			DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer,
1648 						      rx.intra_bss.fail, 1,
1649 						      len, link_id);
1650 			tid_stats->fail_cnt[INTRABSS_DROP]++;
1651 			qdf_nbuf_free(nbuf_copy);
1652 		} else {
1653 			DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer,
1654 						      rx.intra_bss.pkts, 1,
1655 						      len, link_id);
1656 			tid_stats->intrabss_cnt++;
1657 		}
1658 		return true;
1659 	}
1660 
1661 	if (dp_rx_intrabss_mlo_mcbc_fwd(soc, ta_txrx_peer->vdev,
1662 					nbuf_copy))
1663 		return true;
1664 
1665 	return false;
1666 }
1667 
1668 bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
1669 			   uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
1670 			   uint8_t link_id)
1671 {
1672 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
1673 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
1674 	struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
1675 					tid_stats.tid_rx_stats[ring_id][tid];
1676 	bool ret = false;
1677 	struct dp_be_intrabss_params params;
1678 	struct hal_rx_msdu_metadata msdu_metadata;
1679 
1680 	/* if it is a broadcast pkt (eg: ARP) and it is not its own
1681 	 * source, then clone the pkt and send the cloned pkt for
1682 	 * intra BSS forwarding and original pkt up the network stack
1683 	 * Note: how do we handle multicast pkts. do we forward
1684 	 * all multicast pkts as is or let a higher layer module
1685 	 * like igmpsnoop decide whether to forward or not with
1686 	 * Mcast enhancement.
1687 	 */
1688 	if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_peer->bss_peer) {
1689 		return dp_rx_intrabss_mcbc_fwd(soc, ta_peer, rx_tlv_hdr,
1690 					       nbuf, tid_stats, link_id);
1691 	}
1692 
1693 	if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
1694 					    nbuf))
1695 		return true;
1696 
1697 	hal_rx_msdu_packet_metadata_get_generic_be(rx_tlv_hdr, &msdu_metadata);
1698 	params.dest_soc = soc;
1699 	if (dp_rx_intrabss_ucast_check_be(nbuf, ta_peer, rx_tlv_hdr,
1700 					  &msdu_metadata, &params)) {
1701 		ret = dp_rx_intrabss_ucast_fwd(params.dest_soc, ta_peer,
1702 					       params.tx_vdev_id,
1703 					       rx_tlv_hdr, nbuf, tid_stats,
1704 					       link_id);
1705 	}
1706 
1707 	return ret;
1708 }
1709 #endif
1710 
1711 bool dp_rx_chain_msdus_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
1712 			  uint8_t *rx_tlv_hdr, uint8_t mac_id)
1713 {
1714 	bool mpdu_done = false;
1715 	qdf_nbuf_t curr_nbuf = NULL;
1716 	qdf_nbuf_t tmp_nbuf = NULL;
1717 
1718 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1719 
1720 	if (!dp_pdev) {
1721 		dp_rx_debug("%pK: pdev is null for mac_id = %d", soc, mac_id);
1722 		return mpdu_done;
1723 	}
1724 	/* if invalid peer SG list has max values free the buffers in list
1725 	 * and treat current buffer as start of list
1726 	 *
1727 	 * current logic to detect the last buffer from attn_tlv is not reliable
1728 	 * in OFDMA UL scenario hence add max buffers check to avoid list pile
1729 	 * up
1730 	 */
1731 	if (!dp_pdev->first_nbuf ||
1732 	    (dp_pdev->invalid_peer_head_msdu &&
1733 	    QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST
1734 	    (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) {
1735 		qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
1736 		dp_pdev->first_nbuf = true;
1737 
1738 		/* If the new nbuf received is the first msdu of the
1739 		 * amsdu and there are msdus in the invalid peer msdu
1740 		 * list, then let us free all the msdus of the invalid
1741 		 * peer msdu list.
1742 		 * This scenario can happen when we start receiving
1743 		 * new a-msdu even before the previous a-msdu is completely
1744 		 * received.
1745 		 */
1746 		curr_nbuf = dp_pdev->invalid_peer_head_msdu;
1747 		while (curr_nbuf) {
1748 			tmp_nbuf = curr_nbuf->next;
1749 			dp_rx_nbuf_free(curr_nbuf);
1750 			curr_nbuf = tmp_nbuf;
1751 		}
1752 
1753 		dp_pdev->invalid_peer_head_msdu = NULL;
1754 		dp_pdev->invalid_peer_tail_msdu = NULL;
1755 
1756 		dp_monitor_get_mpdu_status(dp_pdev, soc, rx_tlv_hdr);
1757 	}
1758 
1759 	if (qdf_nbuf_is_rx_chfrag_end(nbuf) &&
1760 	    hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1761 		qdf_assert_always(dp_pdev->first_nbuf);
1762 		dp_pdev->first_nbuf = false;
1763 		mpdu_done = true;
1764 	}
1765 
1766 	/*
1767 	 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
1768 	 * should be NULL here, add the checking for debugging purpose
1769 	 * in case some corner case.
1770 	 */
1771 	DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
1772 					dp_pdev->invalid_peer_tail_msdu);
1773 	DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
1774 			  dp_pdev->invalid_peer_tail_msdu,
1775 			  nbuf);
1776 
1777 	return mpdu_done;
1778 }
1779 
1780 qdf_nbuf_t
1781 dp_rx_wbm_err_reap_desc_be(struct dp_intr *int_ctx, struct dp_soc *soc,
1782 			   hal_ring_handle_t hal_ring_hdl, uint32_t quota,
1783 			   uint32_t *rx_bufs_used)
1784 {
1785 	hal_ring_desc_t ring_desc;
1786 	hal_soc_handle_t hal_soc;
1787 	struct dp_rx_desc *rx_desc;
1788 	union dp_rx_desc_list_elem_t
1789 		*head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } };
1790 	union dp_rx_desc_list_elem_t
1791 		*tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } };
1792 	uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { 0 } };
1793 	uint8_t mac_id;
1794 	struct dp_srng *dp_rxdma_srng;
1795 	struct rx_desc_pool *rx_desc_pool;
1796 	qdf_nbuf_t nbuf_head = NULL;
1797 	qdf_nbuf_t nbuf_tail = NULL;
1798 	qdf_nbuf_t nbuf;
1799 	uint8_t msdu_continuation = 0;
1800 	bool process_sg_buf = false;
1801 	QDF_STATUS status;
1802 	struct dp_soc *replenish_soc;
1803 	uint8_t chip_id;
1804 	union hal_wbm_err_info_u wbm_err = { 0 };
1805 
1806 	qdf_assert(soc && hal_ring_hdl);
1807 	hal_soc = soc->hal_soc;
1808 	qdf_assert(hal_soc);
1809 
1810 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
1811 		/* TODO */
1812 		/*
1813 		 * Need API to convert from hal_ring pointer to
1814 		 * Ring Type / Ring Id combo
1815 		 */
1816 		dp_rx_err_err("%pK: HAL RING Access Failed -- %pK",
1817 			      soc, hal_ring_hdl);
1818 		goto done;
1819 	}
1820 
1821 	while (qdf_likely(quota)) {
1822 		ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
1823 
1824 		if (qdf_unlikely(!ring_desc))
1825 			break;
1826 
1827 		/* Get SW Desc from HAL desc */
1828 		if (dp_wbm_get_rx_desc_from_hal_desc_be(soc,
1829 							ring_desc,
1830 							&rx_desc)) {
1831 			dp_rx_err_err("get rx sw desc from hal_desc failed");
1832 			continue;
1833 		}
1834 
1835 		qdf_assert_always(rx_desc);
1836 
1837 		if (!dp_rx_desc_check_magic(rx_desc)) {
1838 			dp_rx_err_err("%pK: Invalid rx_desc %pK",
1839 				      soc, rx_desc);
1840 			continue;
1841 		}
1842 
1843 		/*
1844 		 * this is a unlikely scenario where the host is reaping
1845 		 * a descriptor which it already reaped just a while ago
1846 		 * but is yet to replenish it back to HW.
1847 		 * In this case host will dump the last 128 descriptors
1848 		 * including the software descriptor rx_desc and assert.
1849 		 */
1850 		if (qdf_unlikely(!rx_desc->in_use)) {
1851 			DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
1852 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1853 						   ring_desc, rx_desc);
1854 			continue;
1855 		}
1856 
1857 		status = dp_rx_wbm_desc_nbuf_sanity_check(soc, hal_ring_hdl,
1858 							  ring_desc, rx_desc);
1859 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
1860 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
1861 			dp_info_rl("Rx error Nbuf %pK sanity check failure!",
1862 				   rx_desc->nbuf);
1863 			rx_desc->in_err_state = 1;
1864 			rx_desc->unmapped = 1;
1865 			rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
1866 
1867 			dp_rx_add_to_free_desc_list(
1868 				&head[rx_desc->chip_id][rx_desc->pool_id],
1869 				&tail[rx_desc->chip_id][rx_desc->pool_id],
1870 				rx_desc);
1871 			continue;
1872 		}
1873 
1874 		nbuf = rx_desc->nbuf;
1875 
1876 		/*
1877 		 * Read wbm err info , MSDU info , MPDU info , peer meta data,
1878 		 * from desc. Save all the info in nbuf CB/TLV.
1879 		 * We will need this info when we do the actual nbuf processing
1880 		 */
1881 		wbm_err.info = dp_rx_wbm_err_copy_desc_info_in_nbuf(
1882 							soc,
1883 							ring_desc,
1884 							nbuf,
1885 							rx_desc->pool_id);
1886 		/*
1887 		 * For WBM ring, expect only MSDU buffers
1888 		 */
1889 		qdf_assert_always(wbm_err.info_bit.buffer_or_desc_type ==
1890 				  HAL_RX_WBM_BUF_TYPE_REL_BUF);
1891 		/*
1892 		 * Errors are handled only if the source is RXDMA or REO
1893 		 */
1894 		qdf_assert((wbm_err.info_bit.wbm_err_src ==
1895 			    HAL_RX_WBM_ERR_SRC_RXDMA) ||
1896 			   (wbm_err.info_bit.wbm_err_src ==
1897 			    HAL_RX_WBM_ERR_SRC_REO));
1898 
1899 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
1900 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
1901 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
1902 		rx_desc->unmapped = 1;
1903 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
1904 
1905 		if (qdf_unlikely(
1906 			soc->wbm_release_desc_rx_sg_support &&
1907 			dp_rx_is_sg_formation_required(&wbm_err.info_bit))) {
1908 			/* SG is detected from continuation bit */
1909 			msdu_continuation =
1910 				dp_rx_wbm_err_msdu_continuation_get(soc,
1911 								    ring_desc,
1912 								    nbuf);
1913 			if (msdu_continuation &&
1914 			    !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) {
1915 				/* Update length from first buffer in SG */
1916 				soc->wbm_sg_param.wbm_sg_desc_msdu_len =
1917 					hal_rx_msdu_start_msdu_len_get(
1918 						soc->hal_soc,
1919 						qdf_nbuf_data(nbuf));
1920 				soc->wbm_sg_param.wbm_is_first_msdu_in_sg =
1921 									true;
1922 			}
1923 
1924 			if (msdu_continuation) {
1925 				/* MSDU continued packets */
1926 				qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
1927 				QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
1928 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
1929 			} else {
1930 				/* This is the terminal packet in SG */
1931 				qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
1932 				qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
1933 				QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
1934 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
1935 				process_sg_buf = true;
1936 			}
1937 		} else {
1938 			qdf_nbuf_set_rx_chfrag_cont(nbuf, 0);
1939 		}
1940 
1941 		rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
1942 
1943 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) {
1944 			DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head,
1945 					  soc->wbm_sg_param.wbm_sg_nbuf_tail,
1946 					  nbuf);
1947 			if (process_sg_buf) {
1948 				if (!dp_rx_buffer_pool_refill(
1949 					soc,
1950 					soc->wbm_sg_param.wbm_sg_nbuf_head,
1951 					rx_desc->pool_id))
1952 					DP_RX_MERGE_TWO_LIST(
1953 					  nbuf_head, nbuf_tail,
1954 					  soc->wbm_sg_param.wbm_sg_nbuf_head,
1955 					  soc->wbm_sg_param.wbm_sg_nbuf_tail);
1956 				dp_rx_wbm_sg_list_last_msdu_war(soc);
1957 				dp_rx_wbm_sg_list_reset(soc);
1958 				process_sg_buf = false;
1959 			}
1960 		} else if (!dp_rx_buffer_pool_refill(soc, nbuf,
1961 						     rx_desc->pool_id)) {
1962 			DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
1963 		}
1964 
1965 		dp_rx_add_to_free_desc_list
1966 			(&head[rx_desc->chip_id][rx_desc->pool_id],
1967 			 &tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc);
1968 
1969 		/*
1970 		 * if continuation bit is set then we have MSDU spread
1971 		 * across multiple buffers, let us not decrement quota
1972 		 * till we reap all buffers of that MSDU.
1973 		 */
1974 		if (qdf_likely(!msdu_continuation))
1975 			quota -= 1;
1976 	}
1977 done:
1978 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
1979 
1980 	for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
1981 		for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
1982 			/*
1983 			 * continue with next mac_id if no pkts were reaped
1984 			 * from that pool
1985 			 */
1986 			if (!rx_bufs_reaped[chip_id][mac_id])
1987 				continue;
1988 
1989 			replenish_soc = dp_rx_replenish_soc_get(soc, chip_id);
1990 
1991 			dp_rxdma_srng =
1992 				&replenish_soc->rx_refill_buf_ring[mac_id];
1993 
1994 			rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id];
1995 
1996 			dp_rx_buffers_replenish_simple(replenish_soc, mac_id,
1997 						dp_rxdma_srng,
1998 						rx_desc_pool,
1999 						rx_bufs_reaped[chip_id][mac_id],
2000 						&head[chip_id][mac_id],
2001 						&tail[chip_id][mac_id]);
2002 			*rx_bufs_used += rx_bufs_reaped[chip_id][mac_id];
2003 		}
2004 	}
2005 	return nbuf_head;
2006 }
2007 
2008 #ifdef WLAN_FEATURE_11BE_MLO
2009 /**
2010  * check_extap_multicast_loopback() - Check if rx packet is a loopback packet.
2011  *
2012  * @vdev: vdev on which rx packet is received
2013  * @addr: src address of the received packet
2014  *
2015  */
2016 static bool check_extap_multicast_loopback(struct dp_vdev *vdev, uint8_t *addr)
2017 {
2018 	 /* if src mac addr matches with vdev mac address then drop the pkt */
2019 	if (!(qdf_mem_cmp(addr, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE)))
2020 		return true;
2021 
2022 	 /* if src mac addr matches with mld mac address then drop the pkt */
2023 	if (!(qdf_mem_cmp(addr, vdev->mld_mac_addr.raw, QDF_MAC_ADDR_SIZE)))
2024 		return true;
2025 
2026 	return false;
2027 }
2028 #else
2029 static bool check_extap_multicast_loopback(struct dp_vdev *vdev, uint8_t *addr)
2030 {
2031 	return false;
2032 }
2033 #endif
2034 
2035 QDF_STATUS
2036 dp_rx_null_q_desc_handle_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
2037 			    uint8_t *rx_tlv_hdr, uint8_t pool_id,
2038 			    struct dp_txrx_peer *txrx_peer,
2039 			    bool is_reo_exception,
2040 			    uint8_t link_id)
2041 {
2042 	uint32_t pkt_len;
2043 	uint16_t msdu_len;
2044 	struct dp_vdev *vdev;
2045 	uint8_t tid;
2046 	qdf_ether_header_t *eh;
2047 	struct hal_rx_msdu_metadata msdu_metadata;
2048 	uint16_t sa_idx = 0;
2049 	bool is_eapol = 0;
2050 	bool enh_flag;
2051 
2052 	qdf_nbuf_set_rx_chfrag_start(
2053 				nbuf,
2054 				hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2055 							       rx_tlv_hdr));
2056 	qdf_nbuf_set_rx_chfrag_end(nbuf,
2057 				   hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
2058 								 rx_tlv_hdr));
2059 	qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
2060 								  rx_tlv_hdr));
2061 	qdf_nbuf_set_da_valid(nbuf,
2062 			      hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
2063 							      rx_tlv_hdr));
2064 	qdf_nbuf_set_sa_valid(nbuf,
2065 			      hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
2066 							      rx_tlv_hdr));
2067 
2068 	tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
2069 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
2070 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
2071 	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
2072 
2073 	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
2074 		if (dp_rx_check_pkt_len(soc, pkt_len))
2075 			goto drop_nbuf;
2076 
2077 		/* Set length in nbuf */
2078 		qdf_nbuf_set_pktlen(
2079 			nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
2080 		qdf_assert_always(nbuf->data == rx_tlv_hdr);
2081 	}
2082 
2083 	/*
2084 	 * Check if DMA completed -- msdu_done is the last bit
2085 	 * to be written
2086 	 */
2087 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
2088 		dp_err_rl("MSDU DONE failure");
2089 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
2090 				     QDF_TRACE_LEVEL_INFO);
2091 		qdf_assert(0);
2092 	}
2093 
2094 	if (!txrx_peer &&
2095 	    dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
2096 							  rx_tlv_hdr, nbuf))
2097 		return QDF_STATUS_E_FAILURE;
2098 
2099 	if (!txrx_peer) {
2100 		bool mpdu_done = false;
2101 		struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
2102 
2103 		if (!pdev) {
2104 			dp_err_rl("pdev is null for pool_id = %d", pool_id);
2105 			return QDF_STATUS_E_FAILURE;
2106 		}
2107 
2108 		dp_err_rl("txrx_peer is NULL");
2109 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
2110 				 qdf_nbuf_len(nbuf));
2111 
2112 		/* QCN9000 has the support enabled */
2113 		if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) {
2114 			mpdu_done = true;
2115 			nbuf->next = NULL;
2116 			/* Trigger invalid peer handler wrapper */
2117 			dp_rx_process_invalid_peer_wrapper(soc,
2118 							   nbuf,
2119 							   mpdu_done,
2120 							   pool_id);
2121 		} else {
2122 			mpdu_done = soc->arch_ops.dp_rx_chain_msdus(soc, nbuf,
2123 								    rx_tlv_hdr,
2124 								    pool_id);
2125 			/* Trigger invalid peer handler wrapper */
2126 			dp_rx_process_invalid_peer_wrapper(
2127 					soc,
2128 					pdev->invalid_peer_head_msdu,
2129 					mpdu_done, pool_id);
2130 		}
2131 
2132 		if (mpdu_done) {
2133 			pdev->invalid_peer_head_msdu = NULL;
2134 			pdev->invalid_peer_tail_msdu = NULL;
2135 		}
2136 
2137 		return QDF_STATUS_E_FAILURE;
2138 	}
2139 
2140 	vdev = txrx_peer->vdev;
2141 	if (!vdev) {
2142 		dp_err_rl("Null vdev!");
2143 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
2144 		goto drop_nbuf;
2145 	}
2146 
2147 	/*
2148 	 * Advance the packet start pointer by total size of
2149 	 * pre-header TLV's
2150 	 */
2151 	if (qdf_nbuf_is_frag(nbuf))
2152 		qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
2153 	else
2154 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
2155 				   soc->rx_pkt_tlv_size));
2156 
2157 	DP_STATS_INC_PKT(vdev, rx_i.null_q_desc_pkt, 1, qdf_nbuf_len(nbuf));
2158 
2159 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
2160 
2161 	if (dp_rx_err_drop_3addr_mcast(vdev, rx_tlv_hdr)) {
2162 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.mcast_3addr_drop, 1,
2163 					  link_id);
2164 		goto drop_nbuf;
2165 	}
2166 
2167 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
2168 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
2169 
2170 		if ((sa_idx < 0) ||
2171 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
2172 			DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
2173 			goto drop_nbuf;
2174 		}
2175 	}
2176 
2177 	if ((!soc->mec_fw_offload) &&
2178 	    dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) {
2179 		/* this is a looped back MCBC pkt, drop it */
2180 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
2181 					      qdf_nbuf_len(nbuf), link_id);
2182 		goto drop_nbuf;
2183 	}
2184 
2185 	/*
2186 	 * In qwrap mode if the received packet matches with any of the vdev
2187 	 * mac addresses, drop it. Donot receive multicast packets originated
2188 	 * from any proxysta.
2189 	 */
2190 	if (check_qwrap_multicast_loopback(vdev, nbuf)) {
2191 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
2192 					      qdf_nbuf_len(nbuf), link_id);
2193 		goto drop_nbuf;
2194 	}
2195 
2196 	if (qdf_unlikely(txrx_peer->nawds_enabled &&
2197 			 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
2198 							rx_tlv_hdr))) {
2199 		dp_err_rl("free buffer for multicast packet");
2200 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.nawds_mcast_drop, 1,
2201 					  link_id);
2202 		goto drop_nbuf;
2203 	}
2204 
2205 	if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
2206 		dp_err_rl("mcast Policy Check Drop pkt");
2207 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.policy_check_drop, 1,
2208 					  link_id);
2209 		goto drop_nbuf;
2210 	}
2211 	/* WDS Source Port Learning */
2212 	if (!soc->ast_offload_support &&
2213 	    qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
2214 		       vdev->wds_enabled))
2215 		dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, txrx_peer, nbuf,
2216 					msdu_metadata);
2217 
2218 	if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
2219 		struct dp_peer *peer;
2220 		struct dp_rx_tid *rx_tid;
2221 
2222 		peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
2223 					     DP_MOD_ID_RX_ERR);
2224 		if (peer) {
2225 			rx_tid = &peer->rx_tid[tid];
2226 			qdf_spin_lock_bh(&rx_tid->tid_lock);
2227 			if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned)
2228 				dp_rx_tid_setup_wifi3(peer, tid, 1,
2229 						      IEEE80211_SEQ_MAX);
2230 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2231 			/* IEEE80211_SEQ_MAX indicates invalid start_seq */
2232 			dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
2233 		}
2234 	}
2235 
2236 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2237 
2238 	if (!txrx_peer->authorize) {
2239 		is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
2240 
2241 		if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
2242 			if (!dp_rx_err_match_dhost(eh, vdev))
2243 				goto drop_nbuf;
2244 		} else {
2245 			goto drop_nbuf;
2246 		}
2247 	}
2248 
2249 	/*
2250 	 * Drop packets in this path if cce_match is found. Packets will come
2251 	 * in following path depending on whether tidQ is setup.
2252 	 * 1. If tidQ is setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE and
2253 	 * cce_match = 1
2254 	 *    Packets with WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE are already
2255 	 *    dropped.
2256 	 * 2. If tidQ is not setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ERROR and
2257 	 * cce_match = 1
2258 	 *    These packets need to be dropped and should not get delivered
2259 	 *    to stack.
2260 	 */
2261 	if (qdf_unlikely(dp_rx_err_cce_drop(soc, vdev, nbuf, rx_tlv_hdr)))
2262 		goto drop_nbuf;
2263 
2264 	/*
2265 	 * In extap mode if the received packet matches with mld mac address
2266 	 * drop it. For non IP packets conversion might not be possible
2267 	 * due to that MEC entry will not be updated, resulting loopback.
2268 	 */
2269 	if (qdf_unlikely(check_extap_multicast_loopback(vdev,
2270 							eh->ether_shost))) {
2271 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
2272 					      qdf_nbuf_len(nbuf), link_id);
2273 		goto drop_nbuf;
2274 	}
2275 
2276 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
2277 		qdf_nbuf_set_next(nbuf, NULL);
2278 		dp_rx_deliver_raw(vdev, nbuf, txrx_peer, link_id);
2279 	} else {
2280 		enh_flag = vdev->pdev->enhanced_stats_en;
2281 		qdf_nbuf_set_next(nbuf, NULL);
2282 		DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
2283 					  enh_flag);
2284 		/*
2285 		 * Update the protocol tag in SKB based on
2286 		 * CCE metadata
2287 		 */
2288 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
2289 					  EXCEPTION_DEST_RING_ID,
2290 					  true, true);
2291 
2292 		/* Update the flow tag in SKB based on FSE metadata */
2293 		dp_rx_update_flow_tag(soc, vdev, nbuf,
2294 				      rx_tlv_hdr, true);
2295 
2296 		if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
2297 				 soc->hal_soc, rx_tlv_hdr) &&
2298 				 (vdev->rx_decap_type ==
2299 				  htt_cmn_pkt_type_ethernet))) {
2300 			DP_PEER_MC_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
2301 					    enh_flag, link_id);
2302 
2303 			if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
2304 				DP_PEER_BC_INCC_PKT(txrx_peer, 1,
2305 						    qdf_nbuf_len(nbuf),
2306 						    enh_flag,
2307 						    link_id);
2308 		} else {
2309 			DP_PEER_UC_INCC_PKT(txrx_peer, 1,
2310 					    qdf_nbuf_len(nbuf),
2311 					    enh_flag,
2312 					    link_id);
2313 		}
2314 
2315 		qdf_nbuf_set_exc_frame(nbuf, 1);
2316 
2317 		if (qdf_unlikely(vdev->multipass_en)) {
2318 			if (dp_rx_multipass_process(txrx_peer, nbuf,
2319 						    tid) == false) {
2320 				DP_PEER_PER_PKT_STATS_INC
2321 					(txrx_peer,
2322 					 rx.multipass_rx_pkt_drop,
2323 					 1, link_id);
2324 				goto drop_nbuf;
2325 			}
2326 		}
2327 
2328 		dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
2329 					    is_eapol);
2330 	}
2331 	return QDF_STATUS_SUCCESS;
2332 
2333 drop_nbuf:
2334 	dp_rx_nbuf_free(nbuf);
2335 	return QDF_STATUS_E_FAILURE;
2336 }
2337