1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "cdp_txrx_cmn_struct.h"
21 #include "hal_hw_headers.h"
22 #include "dp_types.h"
23 #include "dp_rx.h"
24 #include "dp_tx.h"
25 #include "dp_be_rx.h"
26 #include "dp_peer.h"
27 #include "hal_rx.h"
28 #include "hal_be_rx.h"
29 #include "hal_api.h"
30 #include "hal_be_api.h"
31 #include "qdf_nbuf.h"
32 #ifdef MESH_MODE_SUPPORT
33 #include "if_meta_hdr.h"
34 #endif
35 #include "dp_internal.h"
36 #include "dp_ipa.h"
37 #ifdef FEATURE_WDS
38 #include "dp_txrx_wds.h"
39 #endif
40 #include "dp_hist.h"
41 #include "dp_rx_buffer_pool.h"
42 
43 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
44 static inline void
dp_rx_update_flow_info(qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr)45 dp_rx_update_flow_info(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
46 {
47 	uint32_t fse_metadata;
48 
49 	/* Set the flow idx valid flag only when there is no timeout */
50 	if (hal_rx_msdu_flow_idx_timeout_be(rx_tlv_hdr))
51 		return;
52 
53 	/*
54 	 * If invalid bit is not set and the fse metadata indicates that it is
55 	 * a valid SFE flow match in FSE, do not set the rx flow tag and let it
56 	 * go via stack instead of VP.
57 	 */
58 	fse_metadata = hal_rx_msdu_fse_metadata_get_be(rx_tlv_hdr);
59 	if (!hal_rx_msdu_flow_idx_invalid_be(rx_tlv_hdr) && (fse_metadata == DP_RX_FSE_FLOW_MATCH_SFE))
60 		return;
61 
62 	qdf_nbuf_set_rx_flow_idx_valid(nbuf,
63 				 !hal_rx_msdu_flow_idx_invalid_be(rx_tlv_hdr));
64 }
65 #else
66 static inline void
dp_rx_update_flow_info(qdf_nbuf_t nbuf,uint8_t * rx_tlv_hdr)67 dp_rx_update_flow_info(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
68 {
69 }
70 #endif
71 
72 #ifdef DP_RX_MSDU_DONE_FAIL_HISTORY
73 static inline void
dp_rx_msdu_done_fail_event_record(struct dp_soc * soc,struct dp_rx_desc * rx_desc,qdf_nbuf_t nbuf)74 dp_rx_msdu_done_fail_event_record(struct dp_soc *soc,
75 				  struct dp_rx_desc *rx_desc,
76 				  qdf_nbuf_t nbuf)
77 {
78 	struct dp_msdu_done_fail_entry *entry;
79 	uint32_t idx;
80 
81 	if (qdf_unlikely(!soc->msdu_done_fail_hist))
82 		return;
83 
84 	idx = dp_history_get_next_index(&soc->msdu_done_fail_hist->index,
85 					DP_MSDU_DONE_FAIL_HIST_MAX);
86 	entry = &soc->msdu_done_fail_hist->entry[idx];
87 	entry->paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
88 
89 	if (rx_desc)
90 		entry->sw_cookie = rx_desc->cookie;
91 	else
92 		entry->sw_cookie = 0xDEAD;
93 }
94 #else
95 static inline void
dp_rx_msdu_done_fail_event_record(struct dp_soc * soc,struct dp_rx_desc * rx_desc,qdf_nbuf_t nbuf)96 dp_rx_msdu_done_fail_event_record(struct dp_soc *soc,
97 				  struct dp_rx_desc *rx_desc,
98 				  qdf_nbuf_t nbuf)
99 {
100 }
101 #endif
102 
103 #ifndef AST_OFFLOAD_ENABLE
104 static void
dp_rx_wds_learn(struct dp_soc * soc,struct dp_vdev * vdev,uint8_t * rx_tlv_hdr,struct dp_txrx_peer * txrx_peer,qdf_nbuf_t nbuf)105 dp_rx_wds_learn(struct dp_soc *soc,
106 		struct dp_vdev *vdev,
107 		uint8_t *rx_tlv_hdr,
108 		struct dp_txrx_peer *txrx_peer,
109 		qdf_nbuf_t nbuf)
110 {
111 	struct hal_rx_msdu_metadata msdu_metadata;
112 
113 	hal_rx_msdu_packet_metadata_get_generic_be(rx_tlv_hdr, &msdu_metadata);
114 	/* WDS Source Port Learning */
115 	if (qdf_likely(vdev->wds_enabled))
116 		dp_rx_wds_srcport_learn(soc,
117 				rx_tlv_hdr,
118 				txrx_peer,
119 				nbuf,
120 				msdu_metadata);
121 }
122 #else
123 #ifdef QCA_SUPPORT_WDS_EXTENDED
124 /**
125  * dp_wds_ext_peer_learn_be() - function to send event to control
126  * path on receiving 1st 4-address frame from backhaul.
127  * @soc: DP soc
128  * @ta_txrx_peer: WDS repeater txrx peer
129  * @rx_tlv_hdr: start address of rx tlvs
130  * @nbuf: RX packet buffer
131  *
132  * Return: void
133  */
dp_wds_ext_peer_learn_be(struct dp_soc * soc,struct dp_txrx_peer * ta_txrx_peer,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)134 static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc,
135 					    struct dp_txrx_peer *ta_txrx_peer,
136 					    uint8_t *rx_tlv_hdr,
137 					    qdf_nbuf_t nbuf)
138 {
139 	uint8_t wds_ext_src_mac[QDF_MAC_ADDR_SIZE];
140 	struct dp_peer *ta_base_peer;
141 
142 	/* instead of checking addr4 is valid or not in per packet path
143 	 * check for init bit, which will be set on reception of
144 	 * first addr4 valid packet.
145 	 */
146 	if (!ta_txrx_peer->vdev->wds_ext_enabled ||
147 	    qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT,
148 				&ta_txrx_peer->wds_ext.init))
149 		return;
150 
151 	if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
152 	    (qdf_nbuf_is_fr_ds_set(nbuf) && qdf_nbuf_is_to_ds_set(nbuf))) {
153 		qdf_atomic_test_and_set_bit(WDS_EXT_PEER_INIT_BIT,
154 					    &ta_txrx_peer->wds_ext.init);
155 
156 		if (qdf_unlikely(ta_txrx_peer->nawds_enabled &&
157 				 ta_txrx_peer->is_mld_peer)) {
158 			ta_base_peer = dp_get_primary_link_peer_by_id(
159 							soc,
160 							ta_txrx_peer->peer_id,
161 							DP_MOD_ID_RX);
162 		} else {
163 			ta_base_peer = dp_peer_get_ref_by_id(
164 							soc,
165 							ta_txrx_peer->peer_id,
166 							DP_MOD_ID_RX);
167 		}
168 
169 		if (!ta_base_peer)
170 			return;
171 
172 		qdf_mem_copy(wds_ext_src_mac, &ta_base_peer->mac_addr.raw[0],
173 			     QDF_MAC_ADDR_SIZE);
174 		dp_peer_unref_delete(ta_base_peer, DP_MOD_ID_RX);
175 
176 		soc->cdp_soc.ol_ops->rx_wds_ext_peer_learn(
177 						soc->ctrl_psoc,
178 						ta_txrx_peer->peer_id,
179 						ta_txrx_peer->vdev->vdev_id,
180 						wds_ext_src_mac);
181 	}
182 }
183 #else
dp_wds_ext_peer_learn_be(struct dp_soc * soc,struct dp_txrx_peer * ta_txrx_peer,uint8_t * rx_tlv_hdr,qdf_nbuf_t nbuf)184 static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc,
185 					    struct dp_txrx_peer *ta_txrx_peer,
186 					    uint8_t *rx_tlv_hdr,
187 					    qdf_nbuf_t nbuf)
188 {
189 }
190 #endif
191 static void
dp_rx_wds_learn(struct dp_soc * soc,struct dp_vdev * vdev,uint8_t * rx_tlv_hdr,struct dp_txrx_peer * ta_txrx_peer,qdf_nbuf_t nbuf)192 dp_rx_wds_learn(struct dp_soc *soc,
193 		struct dp_vdev *vdev,
194 		uint8_t *rx_tlv_hdr,
195 		struct dp_txrx_peer *ta_txrx_peer,
196 		qdf_nbuf_t nbuf)
197 {
198 	dp_wds_ext_peer_learn_be(soc, ta_txrx_peer, rx_tlv_hdr, nbuf);
199 }
200 #endif
201 
202 #ifdef DP_RX_PEEK_MSDU_DONE_WAR
dp_rx_war_peek_msdu_done(struct dp_soc * soc,struct dp_rx_desc * rx_desc)203 static inline int dp_rx_war_peek_msdu_done(struct dp_soc *soc,
204 					   struct dp_rx_desc *rx_desc)
205 {
206 	uint8_t *rx_tlv_hdr;
207 
208 	qdf_nbuf_sync_for_cpu(soc->osdev, rx_desc->nbuf, QDF_DMA_FROM_DEVICE);
209 	rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
210 
211 	return hal_rx_tlv_msdu_done_get_be(rx_tlv_hdr);
212 }
213 
214 /**
215  * dp_rx_delink_n_rel_rx_desc() - unmap & free the nbuf in the rx_desc
216  * @soc: DP SoC handle
217  * @rx_desc: rx_desc handle of the nbuf to be unmapped & freed
218  * @reo_ring_num: REO_RING_NUM corresponding to the REO for which the
219  *		  bottom half is being serviced.
220  *
221  * Return: None
222  */
223 static inline void
dp_rx_delink_n_rel_rx_desc(struct dp_soc * soc,struct dp_rx_desc * rx_desc,uint8_t reo_ring_num)224 dp_rx_delink_n_rel_rx_desc(struct dp_soc *soc, struct dp_rx_desc *rx_desc,
225 			   uint8_t reo_ring_num)
226 {
227 	if (!rx_desc)
228 		return;
229 
230 	dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
231 	dp_rx_nbuf_free(rx_desc->nbuf);
232 	/*
233 	 * RX_DESC flags:
234 	 * in_use = 0 will be set when this rx_desc is added to local freelist
235 	 * unmapped = 1 will be set by dp_rx_nbuf_unmap
236 	 * in_err_state = 0 will be set during replenish
237 	 * has_reuse_nbuf need not be touched.
238 	 * msdu_done_fail = 0 should be set here ..!!
239 	 */
240 	rx_desc->msdu_done_fail = 0;
241 }
242 
243 static inline struct dp_rx_desc *
dp_rx_war_store_msdu_done_fail_desc(struct dp_soc * soc,struct dp_rx_desc * rx_desc,uint8_t reo_ring_num)244 dp_rx_war_store_msdu_done_fail_desc(struct dp_soc *soc,
245 				    struct dp_rx_desc *rx_desc,
246 				    uint8_t reo_ring_num)
247 {
248 	struct dp_rx_msdu_done_fail_desc_list *msdu_done_fail_desc_list =
249 						&soc->msdu_done_fail_desc_list;
250 	struct dp_rx_desc *old_rx_desc;
251 	uint32_t idx;
252 
253 	idx = dp_get_next_index(&msdu_done_fail_desc_list->index,
254 				DP_MSDU_DONE_FAIL_DESCS_MAX);
255 
256 	old_rx_desc = msdu_done_fail_desc_list->msdu_done_fail_descs[idx];
257 	dp_rx_delink_n_rel_rx_desc(soc, old_rx_desc, reo_ring_num);
258 
259 	msdu_done_fail_desc_list->msdu_done_fail_descs[idx] = rx_desc;
260 
261 	return old_rx_desc;
262 }
263 
264 #else
dp_rx_war_peek_msdu_done(struct dp_soc * soc,struct dp_rx_desc * rx_desc)265 static inline int dp_rx_war_peek_msdu_done(struct dp_soc *soc,
266 					   struct dp_rx_desc *rx_desc)
267 {
268 	return 1;
269 }
270 
271 static inline struct dp_rx_desc *
dp_rx_war_store_msdu_done_fail_desc(struct dp_soc * soc,struct dp_rx_desc * rx_desc,uint8_t reo_ring_num)272 dp_rx_war_store_msdu_done_fail_desc(struct dp_soc *soc,
273 				    struct dp_rx_desc *rx_desc,
274 				    uint8_t reo_ring_num)
275 {
276 	return NULL;
277 }
278 #endif
279 
dp_rx_process_be(struct dp_intr * int_ctx,hal_ring_handle_t hal_ring_hdl,uint8_t reo_ring_num,uint32_t quota)280 uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
281 			  hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
282 			  uint32_t quota)
283 {
284 	hal_ring_desc_t ring_desc;
285 	hal_ring_desc_t last_prefetched_hw_desc;
286 	hal_soc_handle_t hal_soc;
287 	struct dp_rx_desc *rx_desc = NULL;
288 	struct dp_rx_desc *last_prefetched_sw_desc = NULL;
289 	qdf_nbuf_t nbuf, next;
290 	bool near_full;
291 	union dp_rx_desc_list_elem_t *head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT];
292 	union dp_rx_desc_list_elem_t *tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT];
293 	uint32_t num_pending = 0;
294 	uint32_t rx_bufs_used = 0, rx_buf_cookie;
295 	uint16_t msdu_len = 0;
296 	uint16_t peer_id;
297 	uint8_t vdev_id;
298 	struct dp_txrx_peer *txrx_peer;
299 	dp_txrx_ref_handle txrx_ref_handle = NULL;
300 	struct dp_vdev *vdev;
301 	uint32_t pkt_len = 0;
302 	enum hal_reo_error_status error;
303 	uint8_t *rx_tlv_hdr;
304 	uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT];
305 	uint8_t mac_id = 0;
306 	struct dp_pdev *rx_pdev;
307 	uint8_t enh_flag;
308 	struct dp_srng *dp_rxdma_srng;
309 	struct rx_desc_pool *rx_desc_pool;
310 	struct dp_soc *soc = int_ctx->soc;
311 	struct cdp_tid_rx_stats *tid_stats;
312 	qdf_nbuf_t nbuf_head;
313 	qdf_nbuf_t nbuf_tail;
314 	qdf_nbuf_t deliver_list_head;
315 	qdf_nbuf_t deliver_list_tail;
316 	uint32_t num_rx_bufs_reaped = 0;
317 	uint32_t intr_id;
318 	struct hif_opaque_softc *scn;
319 	int32_t tid = 0;
320 	bool is_prev_msdu_last = true;
321 	uint32_t num_entries_avail = 0;
322 	uint32_t rx_ol_pkt_cnt = 0;
323 	uint32_t num_entries = 0;
324 	QDF_STATUS status;
325 	qdf_nbuf_t ebuf_head;
326 	qdf_nbuf_t ebuf_tail;
327 	uint8_t pkt_capture_offload = 0;
328 	struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num];
329 	int max_reap_limit, ring_near_full;
330 	struct dp_soc *replenish_soc;
331 	uint8_t chip_id;
332 	uint64_t current_time = 0;
333 	uint32_t old_tid;
334 	uint32_t peer_ext_stats;
335 	uint32_t dsf;
336 	uint32_t l3_pad;
337 	uint8_t link_id = 0;
338 	uint16_t buf_size;
339 
340 	DP_HIST_INIT();
341 
342 	qdf_assert_always(soc && hal_ring_hdl);
343 	hal_soc = soc->hal_soc;
344 	qdf_assert_always(hal_soc);
345 
346 	scn = soc->hif_handle;
347 	intr_id = int_ctx->dp_intr_id;
348 	num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl);
349 	dp_runtime_pm_mark_last_busy(soc);
350 	buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx);
351 
352 more_data:
353 	/* reset local variables here to be re-used in the function */
354 	nbuf_head = NULL;
355 	nbuf_tail = NULL;
356 	deliver_list_head = NULL;
357 	deliver_list_tail = NULL;
358 	txrx_peer = NULL;
359 	vdev = NULL;
360 	num_rx_bufs_reaped = 0;
361 	ebuf_head = NULL;
362 	ebuf_tail = NULL;
363 	ring_near_full = 0;
364 	max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
365 
366 	qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped));
367 	qdf_mem_zero(head, sizeof(head));
368 	qdf_mem_zero(tail, sizeof(tail));
369 	old_tid = 0xff;
370 	dsf = 0;
371 	peer_ext_stats = 0;
372 	rx_pdev = NULL;
373 	tid_stats = NULL;
374 
375 	dp_pkt_get_timestamp(&current_time);
376 
377 	ring_near_full = _dp_srng_test_and_update_nf_params(soc, rx_ring,
378 							    &max_reap_limit);
379 
380 	peer_ext_stats = wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
381 	if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
382 		/*
383 		 * Need API to convert from hal_ring pointer to
384 		 * Ring Type / Ring Id combo
385 		 */
386 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
387 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
388 			  FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
389 		goto done;
390 	}
391 
392 	hal_srng_update_ring_usage_wm_no_lock(soc->hal_soc, hal_ring_hdl);
393 
394 	if (!num_pending)
395 		num_pending = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
396 
397 	if (num_pending > quota)
398 		num_pending = quota;
399 
400 	dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_pending);
401 	last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc,
402 							    hal_ring_hdl,
403 							    num_pending);
404 	/*
405 	 * start reaping the buffers from reo ring and queue
406 	 * them in per vdev queue.
407 	 * Process the received pkts in a different per vdev loop.
408 	 */
409 	while (qdf_likely(num_pending)) {
410 		ring_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
411 
412 		if (qdf_unlikely(!ring_desc))
413 			break;
414 
415 		error = HAL_RX_ERROR_STATUS_GET(ring_desc);
416 
417 		if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
418 			dp_rx_err("%pK: HAL RING 0x%pK:error %d",
419 				  soc, hal_ring_hdl, error);
420 			DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num],
421 				     1);
422 			/* Don't know how to deal with this -- assert */
423 			qdf_assert(0);
424 		}
425 
426 		dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc);
427 		rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
428 		status = dp_rx_cookie_check_and_invalidate(ring_desc);
429 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
430 			DP_STATS_INC(soc, rx.err.stale_cookie, 1);
431 			break;
432 		}
433 
434 		rx_desc = (struct dp_rx_desc *)
435 				hal_rx_get_reo_desc_va(ring_desc);
436 		dp_rx_desc_sw_cc_check(soc, rx_buf_cookie, &rx_desc);
437 
438 		status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl,
439 					   ring_desc, rx_desc);
440 		if (QDF_IS_STATUS_ERROR(status)) {
441 			if (qdf_unlikely(rx_desc && rx_desc->nbuf)) {
442 				qdf_assert_always(!rx_desc->unmapped);
443 				dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
444 				dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
445 							    rx_desc->pool_id);
446 				dp_rx_add_to_free_desc_list(
447 					&head[rx_desc->chip_id][rx_desc->pool_id],
448 					&tail[rx_desc->chip_id][rx_desc->pool_id],
449 					rx_desc);
450 			}
451 			continue;
452 		}
453 
454 		/*
455 		 * this is a unlikely scenario where the host is reaping
456 		 * a descriptor which it already reaped just a while ago
457 		 * but is yet to replenish it back to HW.
458 		 * In this case host will dump the last 128 descriptors
459 		 * including the software descriptor rx_desc and assert.
460 		 */
461 
462 		if (qdf_unlikely(!rx_desc->in_use)) {
463 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
464 			dp_info_rl("Reaping rx_desc not in use!");
465 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
466 						   ring_desc, rx_desc);
467 			continue;
468 		}
469 
470 		status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc);
471 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
472 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
473 			dp_info_rl("Nbuf sanity check failure!");
474 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
475 						   ring_desc, rx_desc);
476 			rx_desc->in_err_state = 1;
477 			continue;
478 		}
479 
480 		if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) {
481 			dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie);
482 			DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1);
483 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
484 						   ring_desc, rx_desc);
485 		}
486 
487 		pkt_capture_offload =
488 			dp_rx_copy_desc_info_in_nbuf_cb(soc, ring_desc,
489 							rx_desc->nbuf,
490 							reo_ring_num);
491 
492 		if (qdf_unlikely(qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) {
493 			/* In dp_rx_sg_create() until the last buffer,
494 			 * end bit should not be set. As continuation bit set,
495 			 * this is not a last buffer.
496 			 */
497 			qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 0);
498 
499 			/* previous msdu has end bit set, so current one is
500 			 * the new MPDU
501 			 */
502 			if (is_prev_msdu_last) {
503 				/* Get number of entries available in HW ring */
504 				num_entries_avail =
505 				hal_srng_dst_num_valid(hal_soc,
506 						       hal_ring_hdl, 1);
507 
508 				/* For new MPDU check if we can read complete
509 				 * MPDU by comparing the number of buffers
510 				 * available and number of buffers needed to
511 				 * reap this MPDU
512 				 */
513 				if ((QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) /
514 				     (buf_size -
515 				      soc->rx_pkt_tlv_size) + 1) >
516 				    num_pending) {
517 					DP_STATS_INC(soc,
518 						     rx.msdu_scatter_wait_break,
519 						     1);
520 					dp_rx_cookie_reset_invalid_bit(
521 								     ring_desc);
522 					/* As we are going to break out of the
523 					 * loop because of unavailability of
524 					 * descs to form complete SG, we need to
525 					 * reset the TP in the REO destination
526 					 * ring.
527 					 */
528 					hal_srng_dst_dec_tp(hal_soc,
529 							    hal_ring_hdl);
530 					break;
531 				}
532 				is_prev_msdu_last = false;
533 			}
534 		} else if (qdf_unlikely(!dp_rx_war_peek_msdu_done(soc,
535 								  rx_desc))) {
536 			struct dp_rx_desc *old_rx_desc =
537 					dp_rx_war_store_msdu_done_fail_desc(
538 								soc, rx_desc,
539 								reo_ring_num);
540 			if (qdf_likely(old_rx_desc)) {
541 				rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
542 				dp_rx_add_to_free_desc_list
543 					(&head[rx_desc->chip_id][rx_desc->pool_id],
544 					 &tail[rx_desc->chip_id][rx_desc->pool_id],
545 					 old_rx_desc);
546 				quota -= 1;
547 				num_pending -= 1;
548 				num_rx_bufs_reaped++;
549 			}
550 			rx_desc->msdu_done_fail = 1;
551 			DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
552 			dp_err("MSDU DONE failure %d",
553 			       soc->stats.rx.err.msdu_done_fail);
554 			dp_rx_msdu_done_fail_event_record(soc, rx_desc,
555 							  rx_desc->nbuf);
556 			continue;
557 		}
558 
559 		if (!is_prev_msdu_last &&
560 		    !(qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)))
561 			is_prev_msdu_last = true;
562 
563 		rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
564 
565 		/*
566 		 * move unmap after scattered msdu waiting break logic
567 		 * in case double skb unmap happened.
568 		 */
569 		dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
570 		DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head,
571 				   ebuf_tail, rx_desc);
572 
573 		quota -= 1;
574 		num_pending -= 1;
575 
576 		dp_rx_add_to_free_desc_list
577 			(&head[rx_desc->chip_id][rx_desc->pool_id],
578 			 &tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc);
579 		num_rx_bufs_reaped++;
580 
581 		dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(soc, hal_soc,
582 					       num_pending,
583 					       hal_ring_hdl,
584 					       &last_prefetched_hw_desc,
585 					       &last_prefetched_sw_desc);
586 
587 		/*
588 		 * only if complete msdu is received for scatter case,
589 		 * then allow break.
590 		 */
591 		if (is_prev_msdu_last &&
592 		    dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped,
593 						  max_reap_limit))
594 			break;
595 	}
596 done:
597 	dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl);
598 	qdf_dsb();
599 
600 	dp_rx_per_core_stats_update(soc, reo_ring_num, num_rx_bufs_reaped);
601 
602 	for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
603 		for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
604 			/*
605 			 * continue with next mac_id if no pkts were reaped
606 			 * from that pool
607 			 */
608 			if (!rx_bufs_reaped[chip_id][mac_id])
609 				continue;
610 
611 			replenish_soc = dp_rx_replenish_soc_get(soc, chip_id);
612 
613 			dp_rxdma_srng =
614 				&replenish_soc->rx_refill_buf_ring[mac_id];
615 
616 			rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id];
617 
618 			dp_rx_buffers_replenish_simple(replenish_soc, mac_id,
619 					       dp_rxdma_srng,
620 					       rx_desc_pool,
621 					       rx_bufs_reaped[chip_id][mac_id],
622 					       &head[chip_id][mac_id],
623 					       &tail[chip_id][mac_id]);
624 		}
625 	}
626 
627 	/* Peer can be NULL is case of LFR */
628 	if (qdf_likely(txrx_peer))
629 		vdev = NULL;
630 
631 	/*
632 	 * BIG loop where each nbuf is dequeued from global queue,
633 	 * processed and queued back on a per vdev basis. These nbufs
634 	 * are sent to stack as and when we run out of nbufs
635 	 * or a new nbuf dequeued from global queue has a different
636 	 * vdev when compared to previous nbuf.
637 	 */
638 	nbuf = nbuf_head;
639 	while (nbuf) {
640 		next = nbuf->next;
641 		dp_rx_prefetch_nbuf_data_be(nbuf, next);
642 		if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) {
643 			nbuf = next;
644 			dp_verbose_debug("drop raw frame");
645 			DP_STATS_INC(soc, rx.err.raw_frm_drop, 1);
646 			continue;
647 		}
648 
649 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
650 		vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
651 		peer_id = dp_rx_get_peer_id_be(nbuf);
652 		dp_rx_set_mpdu_seq_number_be(nbuf, rx_tlv_hdr);
653 
654 		if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer,
655 					peer_id, vdev_id)) {
656 			dp_rx_deliver_to_stack(soc, vdev, txrx_peer,
657 					       deliver_list_head,
658 					       deliver_list_tail);
659 			deliver_list_head = NULL;
660 			deliver_list_tail = NULL;
661 		}
662 
663 		/* Get TID from struct cb->tid_val, save to tid */
664 		tid = qdf_nbuf_get_tid_val(nbuf);
665 		if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) {
666 			DP_STATS_INC(soc, rx.err.rx_invalid_tid_err, 1);
667 			dp_verbose_debug("drop invalid tid");
668 			dp_rx_nbuf_free(nbuf);
669 			nbuf = next;
670 			continue;
671 		}
672 
673 		if (qdf_unlikely(!txrx_peer)) {
674 			txrx_peer = dp_rx_get_txrx_peer_and_vdev(soc, nbuf,
675 								 peer_id,
676 								 &txrx_ref_handle,
677 								 pkt_capture_offload,
678 								 &vdev,
679 								 &rx_pdev, &dsf,
680 								 &old_tid);
681 			if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) {
682 				dp_verbose_debug("drop no peer frame");
683 				nbuf = next;
684 				continue;
685 			}
686 			enh_flag = rx_pdev->enhanced_stats_en;
687 		} else if (txrx_peer && txrx_peer->peer_id != peer_id) {
688 			dp_txrx_peer_unref_delete(txrx_ref_handle,
689 						  DP_MOD_ID_RX);
690 
691 			txrx_peer = dp_rx_get_txrx_peer_and_vdev(soc, nbuf,
692 								 peer_id,
693 								 &txrx_ref_handle,
694 								 pkt_capture_offload,
695 								 &vdev,
696 								 &rx_pdev, &dsf,
697 								 &old_tid);
698 			if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) {
699 				dp_verbose_debug("drop by unmatch peer_id");
700 				nbuf = next;
701 				continue;
702 			}
703 			enh_flag = rx_pdev->enhanced_stats_en;
704 		}
705 
706 		if (txrx_peer) {
707 			QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
708 			qdf_dp_trace_set_track(nbuf, QDF_RX);
709 			QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
710 			QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
711 				QDF_NBUF_RX_PKT_DATA_TRACK;
712 		}
713 
714 		rx_bufs_used++;
715 
716 		/* MLD Link Peer Statistics support */
717 		if (txrx_peer->is_mld_peer && rx_pdev->link_peer_stats) {
718 			link_id = dp_rx_get_stats_arr_idx_from_link_id(
719 								nbuf,
720 								txrx_peer);
721 		} else {
722 			link_id = 0;
723 		}
724 
725 		dp_rx_set_nbuf_band(nbuf, txrx_peer, link_id);
726 
727 		/* when hlos tid override is enabled, save tid in
728 		 * skb->priority
729 		 */
730 		if (qdf_unlikely(vdev->skip_sw_tid_classification &
731 					DP_TXRX_HLOS_TID_OVERRIDE_ENABLED))
732 			qdf_nbuf_set_priority(nbuf, tid);
733 
734 		DP_RX_TID_SAVE(nbuf, tid);
735 		if (qdf_unlikely(dsf) || qdf_unlikely(peer_ext_stats) ||
736 		    dp_rx_pkt_tracepoints_enabled())
737 			qdf_nbuf_set_timestamp(nbuf);
738 
739 		if (qdf_likely(old_tid != tid)) {
740 			tid_stats =
741 		&rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid];
742 			old_tid = tid;
743 		}
744 
745 		/*
746 		 * Check if DMA completed -- msdu_done is the last bit
747 		 * to be written
748 		 */
749 		if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) &&
750 				 !hal_rx_tlv_msdu_done_get_be(rx_tlv_hdr))) {
751 			DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
752 			dp_err("MSDU DONE failure %d",
753 			       soc->stats.rx.err.msdu_done_fail);
754 			hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
755 					     QDF_TRACE_LEVEL_INFO);
756 			dp_rx_msdu_done_fail_event_record(soc, NULL, nbuf);
757 			tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
758 			dp_rx_nbuf_free(nbuf);
759 			qdf_assert(0);
760 			nbuf = next;
761 			continue;
762 		}
763 
764 		DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
765 		/*
766 		 * First IF condition:
767 		 * 802.11 Fragmented pkts are reinjected to REO
768 		 * HW block as SG pkts and for these pkts we only
769 		 * need to pull the RX TLVS header length.
770 		 * Second IF condition:
771 		 * The below condition happens when an MSDU is spread
772 		 * across multiple buffers. This can happen in two cases
773 		 * 1. The nbuf size is smaller then the received msdu.
774 		 *    ex: we have set the nbuf size to 2048 during
775 		 *        nbuf_alloc. but we received an msdu which is
776 		 *        2304 bytes in size then this msdu is spread
777 		 *        across 2 nbufs.
778 		 *
779 		 * 2. AMSDUs when RAW mode is enabled.
780 		 *    ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
781 		 *        across 1st nbuf and 2nd nbuf and last MSDU is
782 		 *        spread across 2nd nbuf and 3rd nbuf.
783 		 *
784 		 * for these scenarios let us create a skb frag_list and
785 		 * append these buffers till the last MSDU of the AMSDU
786 		 * Third condition:
787 		 * This is the most likely case, we receive 802.3 pkts
788 		 * decapsulated by HW, here we need to set the pkt length.
789 		 */
790 		if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
791 			bool is_mcbc, is_sa_vld, is_da_vld;
792 
793 			is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
794 								 rx_tlv_hdr);
795 			is_sa_vld =
796 				hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
797 								rx_tlv_hdr);
798 			is_da_vld =
799 				hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
800 								rx_tlv_hdr);
801 
802 			qdf_nbuf_set_da_mcbc(nbuf, is_mcbc);
803 			qdf_nbuf_set_da_valid(nbuf, is_da_vld);
804 			qdf_nbuf_set_sa_valid(nbuf, is_sa_vld);
805 
806 			qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
807 		} else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
808 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
809 			nbuf = dp_rx_sg_create(soc, nbuf);
810 			next = nbuf->next;
811 
812 			if (qdf_nbuf_is_raw_frame(nbuf)) {
813 				DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
814 				DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
815 							      rx.raw, 1,
816 							      msdu_len,
817 							      link_id);
818 			} else {
819 				DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
820 
821 				if (!dp_rx_is_sg_supported()) {
822 					dp_rx_nbuf_free(nbuf);
823 					dp_info_rl("sg msdu len %d, dropped",
824 						   msdu_len);
825 					nbuf = next;
826 					continue;
827 				}
828 			}
829 		} else {
830 			l3_pad = hal_rx_get_l3_pad_bytes_be(nbuf, rx_tlv_hdr);
831 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
832 			pkt_len = msdu_len + l3_pad + soc->rx_pkt_tlv_size;
833 
834 			qdf_nbuf_set_pktlen(nbuf, pkt_len);
835 			dp_rx_skip_tlvs(soc, nbuf, l3_pad);
836 		}
837 
838 		dp_rx_send_pktlog(soc, rx_pdev, nbuf, QDF_TX_RX_STATUS_OK);
839 
840 		if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
841 			dp_rx_err("%pK: Policy Check Drop pkt", soc);
842 			DP_PEER_PER_PKT_STATS_INC(txrx_peer,
843 						  rx.policy_check_drop,
844 						  1, link_id);
845 			tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
846 			/* Drop & free packet */
847 			dp_rx_nbuf_free(nbuf);
848 			/* Statistics */
849 			nbuf = next;
850 			continue;
851 		}
852 
853 		/*
854 		 * Drop non-EAPOL frames from unauthorized peer.
855 		 */
856 		if (qdf_likely(txrx_peer) &&
857 		    qdf_unlikely(!txrx_peer->authorize) &&
858 		    !qdf_nbuf_is_raw_frame(nbuf)) {
859 			bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
860 					qdf_nbuf_is_ipv4_wapi_pkt(nbuf);
861 
862 			if (!is_eapol) {
863 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
864 							  rx.peer_unauth_rx_pkt_drop,
865 							  1, link_id);
866 				dp_verbose_debug("drop by unauthorized peer");
867 				dp_rx_nbuf_free(nbuf);
868 				nbuf = next;
869 				continue;
870 			}
871 		}
872 
873 		dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
874 		dp_rx_update_flow_info(nbuf, rx_tlv_hdr);
875 
876 		if (qdf_unlikely(!rx_pdev->rx_fast_flag)) {
877 			/*
878 			 * process frame for mulitpass phrase processing
879 			 */
880 			if (qdf_unlikely(vdev->multipass_en)) {
881 				if (dp_rx_multipass_process(txrx_peer, nbuf,
882 							    tid) == false) {
883 					DP_PEER_PER_PKT_STATS_INC
884 						(txrx_peer,
885 						 rx.multipass_rx_pkt_drop,
886 						 1, link_id);
887 					dp_verbose_debug("drop multi pass");
888 					dp_rx_nbuf_free(nbuf);
889 					nbuf = next;
890 					continue;
891 				}
892 			}
893 			if (qdf_unlikely(txrx_peer &&
894 					 (txrx_peer->nawds_enabled) &&
895 					 (qdf_nbuf_is_da_mcbc(nbuf)) &&
896 					 (hal_rx_get_mpdu_mac_ad4_valid_be
897 						(rx_tlv_hdr) == false))) {
898 				tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
899 				DP_PEER_PER_PKT_STATS_INC(txrx_peer,
900 							  rx.nawds_mcast_drop,
901 							  1, link_id);
902 				dp_verbose_debug("drop nawds");
903 				dp_rx_nbuf_free(nbuf);
904 				nbuf = next;
905 				continue;
906 			}
907 
908 			/* Update the protocol tag in SKB based on CCE metadata
909 			 */
910 			dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
911 						  reo_ring_num, false, true);
912 
913 			/* Update the flow tag in SKB based on FSE metadata */
914 			dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
915 					      true);
916 
917 			if (qdf_unlikely(vdev->mesh_vdev)) {
918 				if (dp_rx_filter_mesh_packets(vdev, nbuf,
919 							      rx_tlv_hdr)
920 						== QDF_STATUS_SUCCESS) {
921 					dp_rx_info("%pK: mesh pkt filtered",
922 						   soc);
923 					tid_stats->fail_cnt[MESH_FILTER_DROP]++;
924 					DP_STATS_INC(vdev->pdev,
925 						     dropped.mesh_filter, 1);
926 
927 					dp_rx_nbuf_free(nbuf);
928 					nbuf = next;
929 					continue;
930 				}
931 				dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr,
932 						      txrx_peer);
933 			}
934 		}
935 
936 		if (qdf_likely(vdev->rx_decap_type ==
937 			       htt_cmn_pkt_type_ethernet) &&
938 		    qdf_likely(!vdev->mesh_vdev)) {
939 			dp_rx_wds_learn(soc, vdev,
940 					rx_tlv_hdr,
941 					txrx_peer,
942 					nbuf);
943 		}
944 
945 		dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
946 					reo_ring_num, tid_stats, link_id);
947 
948 		if (qdf_likely(vdev->rx_decap_type ==
949 			       htt_cmn_pkt_type_ethernet) &&
950 		    qdf_likely(!vdev->mesh_vdev)) {
951 			/* Intrabss-fwd */
952 			if (dp_rx_check_ap_bridge(vdev))
953 				if (dp_rx_intrabss_fwd_be(soc, txrx_peer,
954 							  rx_tlv_hdr,
955 							  nbuf,
956 							  link_id)) {
957 					nbuf = next;
958 					tid_stats->intrabss_cnt++;
959 					continue; /* Get next desc */
960 				}
961 		}
962 
963 		dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt);
964 
965 		dp_rx_mark_first_packet_after_wow_wakeup(vdev->pdev, rx_tlv_hdr,
966 							 nbuf);
967 
968 		dp_rx_update_stats(soc, nbuf);
969 
970 		dp_pkt_add_timestamp(txrx_peer->vdev, QDF_PKT_RX_DRIVER_ENTRY,
971 				     current_time, nbuf);
972 
973 		DP_RX_LIST_APPEND(deliver_list_head,
974 				  deliver_list_tail,
975 				  nbuf);
976 
977 		DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
978 					  QDF_NBUF_CB_RX_PKT_LEN(nbuf),
979 					  enh_flag);
980 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
981 					      rx.rx_success, 1,
982 					      QDF_NBUF_CB_RX_PKT_LEN(nbuf),
983 					      link_id);
984 
985 		if (qdf_unlikely(txrx_peer->in_twt))
986 			DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
987 						      rx.to_stack_twt, 1,
988 						      QDF_NBUF_CB_RX_PKT_LEN(nbuf),
989 						      link_id);
990 
991 		tid_stats->delivered_to_stack++;
992 		nbuf = next;
993 	}
994 
995 	DP_RX_DELIVER_TO_STACK(soc, vdev, txrx_peer, peer_id,
996 			       pkt_capture_offload,
997 			       deliver_list_head,
998 			       deliver_list_tail);
999 
1000 	if (qdf_likely(txrx_peer))
1001 		dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
1002 
1003 	/*
1004 	 * If we are processing in near-full condition, there are 3 scenario
1005 	 * 1) Ring entries has reached critical state
1006 	 * 2) Ring entries are still near high threshold
1007 	 * 3) Ring entries are below the safe level
1008 	 *
1009 	 * One more loop will move the state to normal processing and yield
1010 	 */
1011 	if (ring_near_full && quota)
1012 		goto more_data;
1013 
1014 	if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
1015 		if (quota) {
1016 			num_pending =
1017 				dp_rx_srng_get_num_pending(hal_soc,
1018 							   hal_ring_hdl,
1019 							   num_entries,
1020 							   &near_full);
1021 			if (num_pending) {
1022 				DP_STATS_INC(soc, rx.hp_oos2, 1);
1023 
1024 				if (!hif_exec_should_yield(scn, intr_id))
1025 					goto more_data;
1026 
1027 				if (qdf_unlikely(near_full)) {
1028 					DP_STATS_INC(soc, rx.near_full, 1);
1029 					goto more_data;
1030 				}
1031 			}
1032 		}
1033 
1034 		if (vdev && vdev->osif_fisa_flush)
1035 			vdev->osif_fisa_flush(soc, reo_ring_num);
1036 
1037 		if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) {
1038 			vdev->osif_gro_flush(vdev->osif_vdev,
1039 					     reo_ring_num);
1040 		}
1041 	}
1042 
1043 	/* Update histogram statistics by looping through pdev's */
1044 	DP_RX_HIST_STATS_PER_PDEV();
1045 
1046 	return rx_bufs_used; /* Assume no scale factor for now */
1047 }
1048 
1049 #ifdef RX_DESC_MULTI_PAGE_ALLOC
1050 /**
1051  * dp_rx_desc_pool_init_be_cc() - initial RX desc pool for cookie conversion
1052  * @soc: Handle to DP Soc structure
1053  * @rx_desc_pool: Rx descriptor pool handler
1054  * @pool_id: Rx descriptor pool ID
1055  *
1056  * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
1057  */
1058 static QDF_STATUS
dp_rx_desc_pool_init_be_cc(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)1059 dp_rx_desc_pool_init_be_cc(struct dp_soc *soc,
1060 			   struct rx_desc_pool *rx_desc_pool,
1061 			   uint32_t pool_id)
1062 {
1063 	struct dp_hw_cookie_conversion_t *cc_ctx;
1064 	struct dp_soc_be *be_soc;
1065 	union dp_rx_desc_list_elem_t *rx_desc_elem;
1066 	struct dp_spt_page_desc *page_desc;
1067 	uint32_t ppt_idx = 0;
1068 	uint32_t avail_entry_index = 0;
1069 
1070 	if (!rx_desc_pool->pool_size) {
1071 		dp_err("desc_num 0 !!");
1072 		return QDF_STATUS_E_FAILURE;
1073 	}
1074 
1075 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1076 	cc_ctx  = &be_soc->rx_cc_ctx[pool_id];
1077 
1078 	page_desc = &cc_ctx->page_desc_base[0];
1079 	rx_desc_elem = rx_desc_pool->freelist;
1080 	while (rx_desc_elem) {
1081 		if (avail_entry_index == 0) {
1082 			if (ppt_idx >= cc_ctx->total_page_num) {
1083 				dp_alert("insufficient secondary page tables");
1084 				qdf_assert_always(0);
1085 			}
1086 			page_desc = &cc_ctx->page_desc_base[ppt_idx++];
1087 		}
1088 
1089 		/* put each RX Desc VA to SPT pages and
1090 		 * get corresponding ID
1091 		 */
1092 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
1093 					 avail_entry_index,
1094 					 &rx_desc_elem->rx_desc);
1095 		rx_desc_elem->rx_desc.cookie =
1096 			dp_cc_desc_id_generate(page_desc->ppt_index,
1097 					       avail_entry_index);
1098 		rx_desc_elem->rx_desc.chip_id = dp_mlo_get_chip_id(soc);
1099 		rx_desc_elem->rx_desc.pool_id = pool_id;
1100 		rx_desc_elem->rx_desc.in_use = 0;
1101 		rx_desc_elem = rx_desc_elem->next;
1102 
1103 		avail_entry_index = (avail_entry_index + 1) &
1104 					DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
1105 	}
1106 
1107 	return QDF_STATUS_SUCCESS;
1108 }
1109 #else
1110 static QDF_STATUS
dp_rx_desc_pool_init_be_cc(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)1111 dp_rx_desc_pool_init_be_cc(struct dp_soc *soc,
1112 			   struct rx_desc_pool *rx_desc_pool,
1113 			   uint32_t pool_id)
1114 {
1115 	struct dp_hw_cookie_conversion_t *cc_ctx;
1116 	struct dp_soc_be *be_soc;
1117 	struct dp_spt_page_desc *page_desc;
1118 	uint32_t ppt_idx = 0;
1119 	uint32_t avail_entry_index = 0;
1120 	int i = 0;
1121 
1122 	if (!rx_desc_pool->pool_size) {
1123 		dp_err("desc_num 0 !!");
1124 		return QDF_STATUS_E_FAILURE;
1125 	}
1126 
1127 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1128 	cc_ctx  = &be_soc->rx_cc_ctx[pool_id];
1129 
1130 	page_desc = &cc_ctx->page_desc_base[0];
1131 	for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
1132 		if (i == rx_desc_pool->pool_size - 1)
1133 			rx_desc_pool->array[i].next = NULL;
1134 		else
1135 			rx_desc_pool->array[i].next =
1136 				&rx_desc_pool->array[i + 1];
1137 
1138 		if (avail_entry_index == 0) {
1139 			if (ppt_idx >= cc_ctx->total_page_num) {
1140 				dp_alert("insufficient secondary page tables");
1141 				qdf_assert_always(0);
1142 			}
1143 			page_desc = &cc_ctx->page_desc_base[ppt_idx++];
1144 		}
1145 
1146 		/* put each RX Desc VA to SPT pages and
1147 		 * get corresponding ID
1148 		 */
1149 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
1150 					 avail_entry_index,
1151 					 &rx_desc_pool->array[i].rx_desc);
1152 		rx_desc_pool->array[i].rx_desc.cookie =
1153 			dp_cc_desc_id_generate(page_desc->ppt_index,
1154 					       avail_entry_index);
1155 		rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
1156 		rx_desc_pool->array[i].rx_desc.in_use = 0;
1157 		rx_desc_pool->array[i].rx_desc.chip_id =
1158 					dp_mlo_get_chip_id(soc);
1159 
1160 		avail_entry_index = (avail_entry_index + 1) &
1161 					DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
1162 	}
1163 	return QDF_STATUS_SUCCESS;
1164 }
1165 #endif
1166 
1167 static void
dp_rx_desc_pool_deinit_be_cc(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)1168 dp_rx_desc_pool_deinit_be_cc(struct dp_soc *soc,
1169 			     struct rx_desc_pool *rx_desc_pool,
1170 			     uint32_t pool_id)
1171 {
1172 	struct dp_spt_page_desc *page_desc;
1173 	struct dp_soc_be *be_soc;
1174 	int i = 0;
1175 	struct dp_hw_cookie_conversion_t *cc_ctx;
1176 
1177 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1178 	cc_ctx  = &be_soc->rx_cc_ctx[pool_id];
1179 
1180 	for (i = 0; i < cc_ctx->total_page_num; i++) {
1181 		page_desc = &cc_ctx->page_desc_base[i];
1182 		qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
1183 	}
1184 }
1185 
dp_rx_desc_pool_init_be(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)1186 QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
1187 				   struct rx_desc_pool *rx_desc_pool,
1188 				   uint32_t pool_id)
1189 {
1190 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1191 
1192 	/* Only regular RX buffer desc pool use HW cookie conversion */
1193 	if (rx_desc_pool->desc_type == QDF_DP_RX_DESC_BUF_TYPE) {
1194 		dp_info("rx_desc_buf pool init");
1195 		status = dp_rx_desc_pool_init_be_cc(soc,
1196 						    rx_desc_pool,
1197 						    pool_id);
1198 	} else {
1199 		dp_info("non_rx_desc_buf_pool init");
1200 		status = dp_rx_desc_pool_init_generic(soc, rx_desc_pool,
1201 						      pool_id);
1202 	}
1203 
1204 	return status;
1205 }
1206 
dp_rx_desc_pool_deinit_be(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)1207 void dp_rx_desc_pool_deinit_be(struct dp_soc *soc,
1208 			       struct rx_desc_pool *rx_desc_pool,
1209 			       uint32_t pool_id)
1210 {
1211 	if (rx_desc_pool->desc_type == QDF_DP_RX_DESC_BUF_TYPE)
1212 		dp_rx_desc_pool_deinit_be_cc(soc, rx_desc_pool, pool_id);
1213 }
1214 
1215 #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
1216 #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc * soc,void * ring_desc,struct dp_rx_desc ** r_rx_desc)1217 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
1218 					       void *ring_desc,
1219 					       struct dp_rx_desc **r_rx_desc)
1220 {
1221 	if (hal_rx_wbm_get_cookie_convert_done(ring_desc)) {
1222 		/* HW cookie conversion done */
1223 		*r_rx_desc = (struct dp_rx_desc *)
1224 				hal_rx_wbm_get_desc_va(ring_desc);
1225 	} else {
1226 		/* SW do cookie conversion */
1227 		uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc);
1228 
1229 		*r_rx_desc = (struct dp_rx_desc *)
1230 				dp_cc_desc_find(soc, cookie);
1231 	}
1232 
1233 	return QDF_STATUS_SUCCESS;
1234 }
1235 #else
dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc * soc,void * ring_desc,struct dp_rx_desc ** r_rx_desc)1236 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
1237 					       void *ring_desc,
1238 					       struct dp_rx_desc **r_rx_desc)
1239 {
1240 	 *r_rx_desc = (struct dp_rx_desc *)
1241 			hal_rx_wbm_get_desc_va(ring_desc);
1242 
1243 	return QDF_STATUS_SUCCESS;
1244 }
1245 #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
dp_rx_desc_ppeds_cookie_2_va(struct dp_soc * soc,unsigned long cookie)1246 struct dp_rx_desc *dp_rx_desc_ppeds_cookie_2_va(struct dp_soc *soc,
1247 						unsigned long cookie)
1248 {
1249 	return (struct dp_rx_desc *)cookie;
1250 }
1251 
1252 #else
dp_rx_desc_ppeds_cookie_2_va(struct dp_soc * soc,unsigned long cookie)1253 struct dp_rx_desc *dp_rx_desc_ppeds_cookie_2_va(struct dp_soc *soc,
1254 						unsigned long cookie)
1255 {
1256 	if (!cookie)
1257 		return NULL;
1258 
1259 	return (struct dp_rx_desc *)dp_cc_desc_find(soc, cookie);
1260 }
1261 
dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc * soc,void * ring_desc,struct dp_rx_desc ** r_rx_desc)1262 QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
1263 					       void *ring_desc,
1264 					       struct dp_rx_desc **r_rx_desc)
1265 {
1266 	/* SW do cookie conversion */
1267 	uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc);
1268 
1269 	*r_rx_desc = (struct dp_rx_desc *)
1270 			dp_cc_desc_find(soc, cookie);
1271 
1272 	return QDF_STATUS_SUCCESS;
1273 }
1274 #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
1275 
dp_rx_desc_cookie_2_va_be(struct dp_soc * soc,uint32_t cookie)1276 struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc,
1277 					     uint32_t cookie)
1278 {
1279 	return (struct dp_rx_desc *)dp_cc_desc_find(soc, cookie);
1280 }
1281 
1282 #if defined(WLAN_FEATURE_11BE_MLO)
1283 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO)
1284 #define DP_RANDOM_MAC_ID_BIT_MASK	0xC0
1285 #define DP_RANDOM_MAC_OFFSET	1
1286 #define DP_MAC_LOCAL_ADMBIT_MASK	0x2
1287 #define DP_MAC_LOCAL_ADMBIT_OFFSET	0
dp_rx_dummy_src_mac(struct dp_vdev * vdev,qdf_nbuf_t nbuf)1288 static inline void dp_rx_dummy_src_mac(struct dp_vdev *vdev,
1289 				       qdf_nbuf_t nbuf)
1290 {
1291 	qdf_ether_header_t *eh =
1292 			(qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1293 
1294 	eh->ether_shost[DP_MAC_LOCAL_ADMBIT_OFFSET] =
1295 				eh->ether_shost[DP_MAC_LOCAL_ADMBIT_OFFSET] |
1296 				DP_MAC_LOCAL_ADMBIT_MASK;
1297 }
1298 
1299 #ifdef QCA_SUPPORT_WDS_EXTENDED
dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer * peer)1300 static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer)
1301 {
1302 	return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init);
1303 }
1304 #else
dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer * peer)1305 static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer)
1306 {
1307 	return false;
1308 }
1309 #endif
1310 
1311 #ifdef EXT_HYBRID_MLO_MODE
1312 static inline
dp_rx_check_ext_hybrid_mode(struct dp_soc * soc,struct dp_vdev * vdev)1313 bool dp_rx_check_ext_hybrid_mode(struct dp_soc *soc, struct dp_vdev *vdev)
1314 {
1315 	return ((DP_MLD_MODE_HYBRID_NONBOND == soc->mld_mode_ap) &&
1316 		(wlan_op_mode_ap == vdev->opmode));
1317 }
1318 #else
1319 static inline
dp_rx_check_ext_hybrid_mode(struct dp_soc * soc,struct dp_vdev * vdev)1320 bool dp_rx_check_ext_hybrid_mode(struct dp_soc *soc, struct dp_vdev *vdev)
1321 {
1322 	return false;
1323 }
1324 #endif
1325 
dp_rx_mlo_igmp_handler(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * peer,qdf_nbuf_t nbuf,uint8_t link_id)1326 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
1327 			    struct dp_vdev *vdev,
1328 			    struct dp_txrx_peer *peer,
1329 			    qdf_nbuf_t nbuf,
1330 			    uint8_t link_id)
1331 {
1332 	qdf_nbuf_t nbuf_copy;
1333 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
1334 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
1335 	struct cdp_tid_rx_stats *tid_stats = &peer->vdev->pdev->stats.
1336 					tid_stats.tid_rx_wbm_stats[0][tid];
1337 
1338 	if (!(qdf_nbuf_is_ipv4_igmp_pkt(nbuf) ||
1339 	      qdf_nbuf_is_ipv6_igmp_pkt(nbuf)))
1340 		return false;
1341 
1342 	if (qdf_unlikely(vdev->multipass_en)) {
1343 		if (dp_rx_multipass_process(peer, nbuf, tid) == false) {
1344 			DP_PEER_PER_PKT_STATS_INC(peer,
1345 						  rx.multipass_rx_pkt_drop,
1346 						  1, link_id);
1347 			return false;
1348 		}
1349 	}
1350 
1351 	if (!peer->bss_peer) {
1352 		if (dp_rx_intrabss_mcbc_fwd(soc, peer, NULL, nbuf,
1353 					    tid_stats, link_id))
1354 			dp_rx_err("forwarding failed");
1355 	}
1356 
1357 	qdf_nbuf_set_next(nbuf, NULL);
1358 
1359 	/* REO sends IGMP to driver only if AP is operating in hybrid
1360 	 *  mld mode.
1361 	 */
1362 
1363 	if (qdf_unlikely(dp_rx_mlo_igmp_wds_ext_handler(peer))) {
1364 		/* send the IGMP to the netdev corresponding to the interface
1365 		 * its received on
1366 		 */
1367 		goto send_pkt;
1368 	}
1369 
1370 	if (dp_rx_check_ext_hybrid_mode(soc, vdev)) {
1371 		/* send the IGMP to the netdev corresponding to the interface
1372 		 * its received on
1373 		 */
1374 		goto send_pkt;
1375 	}
1376 
1377 	/*
1378 	 * In the case of ME5/ME6, Backhaul WDS for a mld peer, NAWDS,
1379 	 * legacy non-mlo AP vdev & non-AP vdev(which is very unlikely),
1380 	 * send the igmp pkt on the same link where it received, as these
1381 	 *  features will use peer based tcl metadata.
1382 	 */
1383 	if (vdev->mcast_enhancement_en ||
1384 	    peer->is_mld_peer ||
1385 	    peer->nawds_enabled ||
1386 	    !vdev->mlo_vdev ||
1387 	    qdf_unlikely(wlan_op_mode_ap != vdev->opmode)) {
1388 		/* send the IGMP to the netdev corresponding to the interface
1389 		 * its received on
1390 		 */
1391 		goto send_pkt;
1392 	}
1393 
1394 	/* We are here, it means a legacy non-wds sta is connected
1395 	 * to a hybrid mld ap, So send a clone of the IGPMP packet
1396 	 * on the interface where it was received.
1397 	 */
1398 	nbuf_copy = qdf_nbuf_copy(nbuf);
1399 	if (qdf_likely(nbuf_copy))
1400 		dp_rx_deliver_to_stack(soc, vdev, peer, nbuf_copy, NULL);
1401 
1402 	dp_rx_dummy_src_mac(vdev, nbuf);
1403 	/* Set the ml peer valid bit in skb peer metadata, so that osif
1404 	 * can deliver the SA mangled IGMP packet to mld netdev.
1405 	 */
1406 	QDF_NBUF_CB_RX_PEER_ID(nbuf) |= CDP_RX_ML_PEER_VALID_MASK;
1407 	/* Deliver the original IGMP with dummy src on the mld netdev */
1408 send_pkt:
1409 	dp_rx_deliver_to_stack(be_vdev->vdev.pdev->soc,
1410 			       &be_vdev->vdev,
1411 			       peer,
1412 			       nbuf,
1413 			       NULL);
1414 	return true;
1415 }
1416 #else
dp_rx_mlo_igmp_handler(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_txrx_peer * peer,qdf_nbuf_t nbuf,uint8_t link_id)1417 bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
1418 			    struct dp_vdev *vdev,
1419 			    struct dp_txrx_peer *peer,
1420 			    qdf_nbuf_t nbuf,
1421 			    uint8_t link_id)
1422 {
1423 	return false;
1424 }
1425 #endif
1426 #endif
1427 
1428 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
dp_rx_nf_process(struct dp_intr * int_ctx,hal_ring_handle_t hal_ring_hdl,uint8_t reo_ring_num,uint32_t quota)1429 uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
1430 			  hal_ring_handle_t hal_ring_hdl,
1431 			  uint8_t reo_ring_num,
1432 			  uint32_t quota)
1433 {
1434 	struct dp_soc *soc = int_ctx->soc;
1435 	struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num];
1436 	uint32_t work_done = 0;
1437 
1438 	if (dp_srng_get_near_full_level(soc, rx_ring) <
1439 			DP_SRNG_THRESH_NEAR_FULL)
1440 		return 0;
1441 
1442 	qdf_atomic_set(&rx_ring->near_full, 1);
1443 	work_done++;
1444 
1445 	return work_done;
1446 }
1447 #endif
1448 
1449 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1450 #ifdef WLAN_FEATURE_11BE_MLO
1451 /**
1452  * dp_rx_intrabss_fwd_mlo_allow() - check if MLO forwarding is allowed
1453  * @ta_peer: transmitter peer handle
1454  * @da_peer: destination peer handle
1455  *
1456  * Return: true - MLO forwarding case, false: not
1457  */
1458 static inline bool
dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer * ta_peer,struct dp_txrx_peer * da_peer)1459 dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer,
1460 			     struct dp_txrx_peer *da_peer)
1461 {
1462 	/* TA peer and DA peer's vdev should be partner MLO vdevs */
1463 	if (dp_peer_find_mac_addr_cmp(&ta_peer->vdev->mld_mac_addr,
1464 				      &da_peer->vdev->mld_mac_addr))
1465 		return false;
1466 
1467 	return true;
1468 }
1469 #else
1470 static inline bool
dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer * ta_peer,struct dp_txrx_peer * da_peer)1471 dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer,
1472 			     struct dp_txrx_peer *da_peer)
1473 {
1474 	return false;
1475 }
1476 #endif
1477 
1478 #ifdef INTRA_BSS_FWD_OFFLOAD
1479 /**
1480  * dp_rx_intrabss_ucast_check_be() - Check if intrabss is allowed
1481  *				     for unicast frame
1482  * @nbuf: RX packet buffer
1483  * @ta_peer: transmitter DP peer handle
1484  * @rx_tlv_hdr: Rx TLV header
1485  * @msdu_metadata: MSDU meta data info
1486  * @params: params to be filled in
1487  *
1488  * Return: true - intrabss allowed
1489  *	   false - not allow
1490  */
1491 static bool
dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,struct dp_txrx_peer * ta_peer,uint8_t * rx_tlv_hdr,struct hal_rx_msdu_metadata * msdu_metadata,struct dp_be_intrabss_params * params)1492 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
1493 			      struct dp_txrx_peer *ta_peer,
1494 			      uint8_t *rx_tlv_hdr,
1495 			      struct hal_rx_msdu_metadata *msdu_metadata,
1496 			      struct dp_be_intrabss_params *params)
1497 {
1498 	uint8_t dest_chip_id, dest_chip_pmac_id;
1499 	struct dp_vdev_be *be_vdev =
1500 		dp_get_be_vdev_from_dp_vdev(ta_peer->vdev);
1501 	struct dp_soc_be *be_soc =
1502 		dp_get_be_soc_from_dp_soc(params->dest_soc);
1503 	uint16_t da_peer_id;
1504 	struct dp_peer *da_peer = NULL;
1505 
1506 	if (!qdf_nbuf_is_intra_bss(nbuf))
1507 		return false;
1508 
1509 	hal_rx_tlv_get_dest_chip_pmac_id(rx_tlv_hdr,
1510 					 &dest_chip_id,
1511 					 &dest_chip_pmac_id);
1512 
1513 	if (dp_assert_always_internal_stat(
1514 				(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1)),
1515 				&be_soc->soc, rx.err.intra_bss_bad_chipid))
1516 		return false;
1517 
1518 	params->dest_soc =
1519 		dp_mlo_get_soc_ref_by_chip_id(be_soc->ml_ctxt,
1520 					      dest_chip_id);
1521 	if (!params->dest_soc)
1522 		return false;
1523 
1524 	da_peer_id = HAL_RX_PEER_ID_GET(msdu_metadata);
1525 
1526 	da_peer = dp_peer_get_tgt_peer_by_id(params->dest_soc, da_peer_id,
1527 					     DP_MOD_ID_RX);
1528 	if (da_peer) {
1529 		if (da_peer->bss_peer || (da_peer->txrx_peer == ta_peer)) {
1530 			dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
1531 			return false;
1532 		}
1533 		dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
1534 	}
1535 
1536 	if (!be_vdev->mlo_dev_ctxt) {
1537 		params->tx_vdev_id = ta_peer->vdev->vdev_id;
1538 		return true;
1539 	}
1540 
1541 	if (dest_chip_id == be_soc->mlo_chip_id) {
1542 		if (dest_chip_pmac_id == ta_peer->vdev->pdev->pdev_id)
1543 			params->tx_vdev_id = ta_peer->vdev->vdev_id;
1544 		else
1545 			params->tx_vdev_id =
1546 				be_vdev->mlo_dev_ctxt->vdev_list[dest_chip_id]
1547 							  [dest_chip_pmac_id];
1548 		return true;
1549 	}
1550 
1551 	params->tx_vdev_id =
1552 		be_vdev->mlo_dev_ctxt->vdev_list[dest_chip_id]
1553 						[dest_chip_pmac_id];
1554 
1555 	return true;
1556 }
1557 #else
1558 #ifdef WLAN_MLO_MULTI_CHIP
1559 static bool
dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,struct dp_txrx_peer * ta_peer,uint8_t * rx_tlv_hdr,struct hal_rx_msdu_metadata * msdu_metadata,struct dp_be_intrabss_params * params)1560 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
1561 			      struct dp_txrx_peer *ta_peer,
1562 			      uint8_t *rx_tlv_hdr,
1563 			      struct hal_rx_msdu_metadata *msdu_metadata,
1564 			      struct dp_be_intrabss_params *params)
1565 {
1566 	uint16_t da_peer_id;
1567 	struct dp_txrx_peer *da_peer;
1568 	bool ret = false;
1569 	uint8_t dest_chip_id;
1570 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1571 	struct dp_vdev_be *be_vdev =
1572 		dp_get_be_vdev_from_dp_vdev(ta_peer->vdev);
1573 	struct dp_soc_be *be_soc =
1574 		dp_get_be_soc_from_dp_soc(params->dest_soc);
1575 
1576 	if (!(qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf)))
1577 		return false;
1578 
1579 	dest_chip_id = HAL_RX_DEST_CHIP_ID_GET(msdu_metadata);
1580 	if (dp_assert_always_internal_stat(
1581 				(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1)),
1582 				&be_soc->soc, rx.err.intra_bss_bad_chipid))
1583 		return false;
1584 
1585 	da_peer_id = HAL_RX_PEER_ID_GET(msdu_metadata);
1586 
1587 	/* use dest chip id when TA is MLD peer and DA is legacy */
1588 	if (be_soc->mlo_enabled &&
1589 	    ta_peer->mld_peer &&
1590 	    !(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) {
1591 		/* validate chip_id, get a ref, and re-assign soc */
1592 		params->dest_soc =
1593 			dp_mlo_get_soc_ref_by_chip_id(be_soc->ml_ctxt,
1594 						      dest_chip_id);
1595 		if (!params->dest_soc)
1596 			return false;
1597 
1598 		da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc,
1599 						     da_peer_id,
1600 						     &txrx_ref_handle,
1601 						     DP_MOD_ID_RX);
1602 		if (!da_peer)
1603 			return false;
1604 
1605 	} else {
1606 		da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc,
1607 						     da_peer_id,
1608 						     &txrx_ref_handle,
1609 						     DP_MOD_ID_RX);
1610 		if (!da_peer)
1611 			return false;
1612 
1613 		params->dest_soc = da_peer->vdev->pdev->soc;
1614 		if (!params->dest_soc)
1615 			goto rel_da_peer;
1616 
1617 	}
1618 
1619 	params->tx_vdev_id = da_peer->vdev->vdev_id;
1620 
1621 	/* If the source or destination peer in the isolation
1622 	 * list then dont forward instead push to bridge stack.
1623 	 */
1624 	if (dp_get_peer_isolation(ta_peer) ||
1625 	    dp_get_peer_isolation(da_peer)) {
1626 		ret = false;
1627 		goto rel_da_peer;
1628 	}
1629 
1630 	if (da_peer->bss_peer || (da_peer == ta_peer)) {
1631 		ret = false;
1632 		goto rel_da_peer;
1633 	}
1634 
1635 	/* Same vdev, support Inra-BSS */
1636 	if (da_peer->vdev == ta_peer->vdev) {
1637 		ret = true;
1638 		goto rel_da_peer;
1639 	}
1640 
1641 	if (!be_vdev->mlo_dev_ctxt)
1642 		ret = false;
1643 		goto rel_da_peer;
1644 	}
1645 
1646 	/* MLO specific Intra-BSS check */
1647 	if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) {
1648 		/* use dest chip id for legacy dest peer */
1649 		if (!(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) {
1650 			if (!(be_vdev->mlo_dev_ctxt->vdev_list[dest_chip_id][0]
1651 			      == params->tx_vdev_id) &&
1652 			    !(be_vdev->mlo_dev_ctxt->vdev_list[dest_chip_id][1]
1653 			      == params->tx_vdev_id)) {
1654 				/*dp_soc_unref_delete(soc);*/
1655 				goto rel_da_peer;
1656 			}
1657 		}
1658 		ret = true;
1659 	}
1660 
1661 rel_da_peer:
1662 	dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
1663 	return ret;
1664 }
1665 #else
1666 static bool
1667 dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
1668 			      struct dp_txrx_peer *ta_peer,
1669 			      uint8_t *rx_tlv_hdr,
1670 			      struct hal_rx_msdu_metadata *msdu_metadata,
1671 			      struct dp_be_intrabss_params *params)
1672 {
1673 	uint16_t da_peer_id;
1674 	struct dp_txrx_peer *da_peer;
1675 	bool ret = false;
1676 	dp_txrx_ref_handle txrx_ref_handle = NULL;
1677 
1678 	if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf))
1679 		return false;
1680 
1681 	da_peer_id = dp_rx_peer_metadata_peer_id_get_be(
1682 						params->dest_soc,
1683 						msdu_metadata->da_idx);
1684 
1685 	da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id,
1686 					     &txrx_ref_handle, DP_MOD_ID_RX);
1687 	if (!da_peer)
1688 		return false;
1689 
1690 	params->tx_vdev_id = da_peer->vdev->vdev_id;
1691 	/* If the source or destination peer in the isolation
1692 	 * list then dont forward instead push to bridge stack.
1693 	 */
1694 	if (dp_get_peer_isolation(ta_peer) ||
1695 	    dp_get_peer_isolation(da_peer))
1696 		goto rel_da_peer;
1697 
1698 	if (da_peer->bss_peer || da_peer == ta_peer)
1699 		goto rel_da_peer;
1700 
1701 	/* Same vdev, support Inra-BSS */
1702 	if (da_peer->vdev == ta_peer->vdev) {
1703 		ret = true;
1704 		goto rel_da_peer;
1705 	}
1706 
1707 	/* MLO specific Intra-BSS check */
1708 	if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) {
1709 		ret = true;
1710 		goto rel_da_peer;
1711 	}
1712 
1713 rel_da_peer:
1714 	dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
1715 	return ret;
1716 }
1717 #endif /* WLAN_MLO_MULTI_CHIP */
1718 #endif /* INTRA_BSS_FWD_OFFLOAD */
1719 
1720 #if defined(WLAN_PKT_CAPTURE_RX_2_0) || defined(CONFIG_WORD_BASED_TLV)
1721 void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
1722 				  uint32_t *msg_word,
1723 				  void *rx_filter)
1724 {
1725 	struct htt_rx_ring_tlv_filter *tlv_filter =
1726 				(struct htt_rx_ring_tlv_filter *)rx_filter;
1727 
1728 	if (!msg_word || !tlv_filter)
1729 		return;
1730 
1731 	/* tlv_filter->enable is set to 1 for monitor rings */
1732 	if (tlv_filter->enable)
1733 		return;
1734 
1735 	/* if word mask is zero, FW will set the default values */
1736 	if (!(tlv_filter->rx_mpdu_start_wmask > 0 &&
1737 	      tlv_filter->rx_msdu_end_wmask > 0)) {
1738 		return;
1739 	}
1740 
1741 	HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACTION_ENABLE_SET(*msg_word, 1);
1742 
1743 	/* word 14 */
1744 	msg_word += 3;
1745 	*msg_word = 0;
1746 
1747 	HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_WORD_MASK_SET(
1748 				*msg_word,
1749 				tlv_filter->rx_mpdu_start_wmask);
1750 
1751 	/* word 15 */
1752 	msg_word++;
1753 	*msg_word = 0;
1754 	HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_WORD_MASK_SET(
1755 				*msg_word,
1756 				tlv_filter->rx_msdu_end_wmask);
1757 }
1758 #else
1759 void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
1760 				  uint32_t *msg_word,
1761 				  void *rx_filter)
1762 {
1763 }
1764 #endif
1765 
1766 #if defined(WLAN_MCAST_MLO) && defined(CONFIG_MLO_SINGLE_DEV)
1767 static inline
1768 bool dp_rx_intrabss_mlo_mcbc_fwd(struct dp_soc *soc, struct dp_vdev *vdev,
1769 				 qdf_nbuf_t nbuf_copy)
1770 {
1771 	struct dp_vdev *mcast_primary_vdev = NULL;
1772 	struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
1773 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1774 	struct cdp_tx_exception_metadata tx_exc_metadata = {0};
1775 
1776 	tx_exc_metadata.is_mlo_mcast = 1;
1777 	tx_exc_metadata.tx_encap_type = CDP_INVALID_TX_ENCAP_TYPE;
1778 	tx_exc_metadata.sec_type = CDP_INVALID_SEC_TYPE;
1779 	tx_exc_metadata.peer_id = CDP_INVALID_PEER;
1780 	tx_exc_metadata.tid = CDP_INVALID_TID;
1781 
1782 	mcast_primary_vdev = dp_mlo_get_mcast_primary_vdev(be_soc,
1783 							   be_vdev,
1784 							   DP_MOD_ID_RX);
1785 
1786 	if (!mcast_primary_vdev)
1787 		return false;
1788 
1789 	nbuf_copy = dp_tx_send_exception((struct cdp_soc_t *)
1790 					 mcast_primary_vdev->pdev->soc,
1791 					 mcast_primary_vdev->vdev_id,
1792 					 nbuf_copy, &tx_exc_metadata);
1793 
1794 	if (nbuf_copy)
1795 		qdf_nbuf_free(nbuf_copy);
1796 
1797 	dp_vdev_unref_delete(mcast_primary_vdev->pdev->soc,
1798 			     mcast_primary_vdev, DP_MOD_ID_RX);
1799 	return true;
1800 }
1801 #else
1802 static inline
1803 bool dp_rx_intrabss_mlo_mcbc_fwd(struct dp_soc *soc, struct dp_vdev *vdev,
1804 				 qdf_nbuf_t nbuf_copy)
1805 {
1806 	return false;
1807 }
1808 #endif
1809 
1810 bool
1811 dp_rx_intrabss_mcast_handler_be(struct dp_soc *soc,
1812 				struct dp_txrx_peer *ta_txrx_peer,
1813 				qdf_nbuf_t nbuf_copy,
1814 				struct cdp_tid_rx_stats *tid_stats,
1815 				uint8_t link_id)
1816 {
1817 	if (qdf_unlikely(ta_txrx_peer->vdev->nawds_enabled)) {
1818 		struct cdp_tx_exception_metadata tx_exc_metadata = {0};
1819 		uint16_t len = QDF_NBUF_CB_RX_PKT_LEN(nbuf_copy);
1820 
1821 		tx_exc_metadata.peer_id = ta_txrx_peer->peer_id;
1822 		tx_exc_metadata.is_intrabss_fwd = 1;
1823 		tx_exc_metadata.tid = HTT_TX_EXT_TID_INVALID;
1824 
1825 		if (dp_tx_send_exception((struct cdp_soc_t *)soc,
1826 					  ta_txrx_peer->vdev->vdev_id,
1827 					  nbuf_copy,
1828 					  &tx_exc_metadata)) {
1829 			DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer,
1830 						      rx.intra_bss.fail, 1,
1831 						      len, link_id);
1832 			tid_stats->fail_cnt[INTRABSS_DROP]++;
1833 			qdf_nbuf_free(nbuf_copy);
1834 		} else {
1835 			DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer,
1836 						      rx.intra_bss.pkts, 1,
1837 						      len, link_id);
1838 			tid_stats->intrabss_cnt++;
1839 		}
1840 		return true;
1841 	}
1842 
1843 	if (dp_rx_intrabss_mlo_mcbc_fwd(soc, ta_txrx_peer->vdev,
1844 					nbuf_copy))
1845 		return true;
1846 
1847 	return false;
1848 }
1849 
1850 bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
1851 			   uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
1852 			   uint8_t link_id)
1853 {
1854 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
1855 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
1856 	struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
1857 					tid_stats.tid_rx_stats[ring_id][tid];
1858 	bool ret = false;
1859 	struct dp_be_intrabss_params params;
1860 	struct hal_rx_msdu_metadata msdu_metadata;
1861 
1862 	/* if it is a broadcast pkt (eg: ARP) and it is not its own
1863 	 * source, then clone the pkt and send the cloned pkt for
1864 	 * intra BSS forwarding and original pkt up the network stack
1865 	 * Note: how do we handle multicast pkts. do we forward
1866 	 * all multicast pkts as is or let a higher layer module
1867 	 * like igmpsnoop decide whether to forward or not with
1868 	 * Mcast enhancement.
1869 	 */
1870 	if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_peer->bss_peer) {
1871 		return dp_rx_intrabss_mcbc_fwd(soc, ta_peer, rx_tlv_hdr,
1872 					       nbuf, tid_stats, link_id);
1873 	}
1874 
1875 	if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
1876 					    nbuf))
1877 		return true;
1878 
1879 	hal_rx_msdu_packet_metadata_get_generic_be(rx_tlv_hdr, &msdu_metadata);
1880 	params.dest_soc = soc;
1881 	if (dp_rx_intrabss_ucast_check_be(nbuf, ta_peer, rx_tlv_hdr,
1882 					  &msdu_metadata, &params)) {
1883 		ret = dp_rx_intrabss_ucast_fwd(params.dest_soc, ta_peer,
1884 					       params.tx_vdev_id,
1885 					       rx_tlv_hdr, nbuf, tid_stats,
1886 					       link_id);
1887 	}
1888 
1889 	return ret;
1890 }
1891 #endif
1892 
1893 #ifndef BE_WBM_RELEASE_DESC_RX_SG_SUPPORT
1894 /**
1895  * dp_rx_chain_msdus_be() - Function to chain all msdus of a mpdu
1896  *			    to pdev invalid peer list
1897  *
1898  * @soc: core DP main context
1899  * @nbuf: Buffer pointer
1900  * @rx_tlv_hdr: start of rx tlv header
1901  * @mac_id: mac id
1902  *
1903  *  Return: bool: true for last msdu of mpdu
1904  */
1905 static bool dp_rx_chain_msdus_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
1906 				 uint8_t *rx_tlv_hdr, uint8_t mac_id)
1907 {
1908 	bool mpdu_done = false;
1909 	qdf_nbuf_t curr_nbuf = NULL;
1910 	qdf_nbuf_t tmp_nbuf = NULL;
1911 
1912 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1913 
1914 	if (!dp_pdev) {
1915 		dp_rx_debug("%pK: pdev is null for mac_id = %d", soc, mac_id);
1916 		return mpdu_done;
1917 	}
1918 	/* if invalid peer SG list has max values free the buffers in list
1919 	 * and treat current buffer as start of list
1920 	 *
1921 	 * current logic to detect the last buffer from attn_tlv is not reliable
1922 	 * in OFDMA UL scenario hence add max buffers check to avoid list pile
1923 	 * up
1924 	 */
1925 	if (!dp_pdev->first_nbuf ||
1926 	    (dp_pdev->invalid_peer_head_msdu &&
1927 	    QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST
1928 	    (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) {
1929 		qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
1930 		dp_pdev->first_nbuf = true;
1931 
1932 		/* If the new nbuf received is the first msdu of the
1933 		 * amsdu and there are msdus in the invalid peer msdu
1934 		 * list, then let us free all the msdus of the invalid
1935 		 * peer msdu list.
1936 		 * This scenario can happen when we start receiving
1937 		 * new a-msdu even before the previous a-msdu is completely
1938 		 * received.
1939 		 */
1940 		curr_nbuf = dp_pdev->invalid_peer_head_msdu;
1941 		while (curr_nbuf) {
1942 			tmp_nbuf = curr_nbuf->next;
1943 			dp_rx_nbuf_free(curr_nbuf);
1944 			curr_nbuf = tmp_nbuf;
1945 		}
1946 
1947 		dp_pdev->invalid_peer_head_msdu = NULL;
1948 		dp_pdev->invalid_peer_tail_msdu = NULL;
1949 
1950 		dp_monitor_get_mpdu_status(dp_pdev, soc, rx_tlv_hdr);
1951 	}
1952 
1953 	if (qdf_nbuf_is_rx_chfrag_end(nbuf) &&
1954 	    hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
1955 		qdf_assert_always(dp_pdev->first_nbuf);
1956 		dp_pdev->first_nbuf = false;
1957 		mpdu_done = true;
1958 	}
1959 
1960 	/*
1961 	 * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
1962 	 * should be NULL here, add the checking for debugging purpose
1963 	 * in case some corner case.
1964 	 */
1965 	DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
1966 					dp_pdev->invalid_peer_tail_msdu);
1967 	DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
1968 			  dp_pdev->invalid_peer_tail_msdu,
1969 			  nbuf);
1970 
1971 	return mpdu_done;
1972 }
1973 #else
1974 static bool dp_rx_chain_msdus_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
1975 				 uint8_t *rx_tlv_hdr, uint8_t mac_id)
1976 {
1977 	return false;
1978 }
1979 #endif
1980 
1981 qdf_nbuf_t
1982 dp_rx_wbm_err_reap_desc_be(struct dp_intr *int_ctx, struct dp_soc *soc,
1983 			   hal_ring_handle_t hal_ring_hdl, uint32_t quota,
1984 			   uint32_t *rx_bufs_used)
1985 {
1986 	hal_ring_desc_t ring_desc;
1987 	hal_soc_handle_t hal_soc;
1988 	struct dp_rx_desc *rx_desc;
1989 	union dp_rx_desc_list_elem_t
1990 		*head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } };
1991 	union dp_rx_desc_list_elem_t
1992 		*tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { NULL } };
1993 	uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT] = { { 0 } };
1994 	uint8_t mac_id;
1995 	struct dp_srng *dp_rxdma_srng;
1996 	struct rx_desc_pool *rx_desc_pool;
1997 	qdf_nbuf_t nbuf_head = NULL;
1998 	qdf_nbuf_t nbuf_tail = NULL;
1999 	qdf_nbuf_t nbuf;
2000 	uint8_t msdu_continuation = 0;
2001 	bool process_sg_buf = false;
2002 	QDF_STATUS status;
2003 	struct dp_soc *replenish_soc;
2004 	uint8_t chip_id;
2005 	union hal_wbm_err_info_u wbm_err = { 0 };
2006 
2007 	qdf_assert(soc && hal_ring_hdl);
2008 	hal_soc = soc->hal_soc;
2009 	qdf_assert(hal_soc);
2010 
2011 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
2012 		/* TODO */
2013 		/*
2014 		 * Need API to convert from hal_ring pointer to
2015 		 * Ring Type / Ring Id combo
2016 		 */
2017 		dp_rx_err_err("%pK: HAL RING Access Failed -- %pK",
2018 			      soc, hal_ring_hdl);
2019 		goto done;
2020 	}
2021 
2022 	while (qdf_likely(quota)) {
2023 		ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2024 
2025 		if (qdf_unlikely(!ring_desc))
2026 			break;
2027 
2028 		/* Get SW Desc from HAL desc */
2029 		if (dp_wbm_get_rx_desc_from_hal_desc_be(soc,
2030 							ring_desc,
2031 							&rx_desc)) {
2032 			dp_rx_err_err("get rx sw desc from hal_desc failed");
2033 			continue;
2034 		}
2035 
2036 		if (dp_assert_always_internal_stat(rx_desc, soc,
2037 						   rx.err.rx_desc_null))
2038 			continue;
2039 
2040 		if (!dp_rx_desc_check_magic(rx_desc)) {
2041 			dp_rx_err_err("%pK: Invalid rx_desc %pK",
2042 				      soc, rx_desc);
2043 			continue;
2044 		}
2045 
2046 		/*
2047 		 * this is a unlikely scenario where the host is reaping
2048 		 * a descriptor which it already reaped just a while ago
2049 		 * but is yet to replenish it back to HW.
2050 		 * In this case host will dump the last 128 descriptors
2051 		 * including the software descriptor rx_desc and assert.
2052 		 */
2053 		if (qdf_unlikely(!rx_desc->in_use)) {
2054 			DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
2055 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2056 						   ring_desc, rx_desc);
2057 			continue;
2058 		}
2059 
2060 		status = dp_rx_wbm_desc_nbuf_sanity_check(soc, hal_ring_hdl,
2061 							  ring_desc, rx_desc);
2062 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
2063 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2064 			dp_info_rl("Rx error Nbuf %pK sanity check failure!",
2065 				   rx_desc->nbuf);
2066 			rx_desc->in_err_state = 1;
2067 			continue;
2068 		}
2069 
2070 		nbuf = rx_desc->nbuf;
2071 
2072 		/*
2073 		 * Read wbm err info , MSDU info , MPDU info , peer meta data,
2074 		 * from desc. Save all the info in nbuf CB/TLV.
2075 		 * We will need this info when we do the actual nbuf processing
2076 		 */
2077 		wbm_err.info = dp_rx_wbm_err_copy_desc_info_in_nbuf(
2078 							soc,
2079 							ring_desc,
2080 							nbuf,
2081 							rx_desc->pool_id);
2082 		/*
2083 		 * For WBM ring, expect only MSDU buffers
2084 		 */
2085 		if (dp_assert_always_internal_stat(
2086 				wbm_err.info_bit.buffer_or_desc_type ==
2087 						HAL_RX_WBM_BUF_TYPE_REL_BUF,
2088 				soc, rx.err.wbm_err_buf_rel_type))
2089 			continue;
2090 		/*
2091 		 * Errors are handled only if the source is RXDMA or REO
2092 		 */
2093 		qdf_assert((wbm_err.info_bit.wbm_err_src ==
2094 			    HAL_RX_WBM_ERR_SRC_RXDMA) ||
2095 			   (wbm_err.info_bit.wbm_err_src ==
2096 			    HAL_RX_WBM_ERR_SRC_REO));
2097 
2098 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2099 		dp_ipa_rx_buf_smmu_mapping_lock(soc);
2100 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
2101 		rx_desc->unmapped = 1;
2102 		dp_ipa_rx_buf_smmu_mapping_unlock(soc);
2103 
2104 		if (qdf_unlikely(
2105 			soc->wbm_release_desc_rx_sg_support &&
2106 			dp_rx_is_sg_formation_required(&wbm_err.info_bit))) {
2107 			/* SG is detected from continuation bit */
2108 			msdu_continuation =
2109 				dp_rx_wbm_err_msdu_continuation_get(soc,
2110 								    ring_desc,
2111 								    nbuf);
2112 			if (msdu_continuation &&
2113 			    !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) {
2114 				/* Update length from first buffer in SG */
2115 				soc->wbm_sg_param.wbm_sg_desc_msdu_len =
2116 					hal_rx_msdu_start_msdu_len_get(
2117 						soc->hal_soc,
2118 						qdf_nbuf_data(nbuf));
2119 				soc->wbm_sg_param.wbm_is_first_msdu_in_sg =
2120 									true;
2121 			}
2122 
2123 			if (msdu_continuation) {
2124 				/* MSDU continued packets */
2125 				qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
2126 				QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2127 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
2128 			} else {
2129 				/* This is the terminal packet in SG */
2130 				qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
2131 				qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
2132 				QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
2133 					soc->wbm_sg_param.wbm_sg_desc_msdu_len;
2134 				process_sg_buf = true;
2135 			}
2136 		} else {
2137 			qdf_nbuf_set_rx_chfrag_cont(nbuf, 0);
2138 		}
2139 
2140 		rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
2141 
2142 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) {
2143 			DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head,
2144 					  soc->wbm_sg_param.wbm_sg_nbuf_tail,
2145 					  nbuf);
2146 			if (process_sg_buf) {
2147 				if (!dp_rx_buffer_pool_refill(
2148 					soc,
2149 					soc->wbm_sg_param.wbm_sg_nbuf_head,
2150 					rx_desc->pool_id))
2151 					DP_RX_MERGE_TWO_LIST(
2152 					  nbuf_head, nbuf_tail,
2153 					  soc->wbm_sg_param.wbm_sg_nbuf_head,
2154 					  soc->wbm_sg_param.wbm_sg_nbuf_tail);
2155 				dp_rx_wbm_sg_list_last_msdu_war(soc);
2156 				dp_rx_wbm_sg_list_reset(soc);
2157 				process_sg_buf = false;
2158 			}
2159 		} else if (!dp_rx_buffer_pool_refill(soc, nbuf,
2160 						     rx_desc->pool_id)) {
2161 			DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
2162 		}
2163 
2164 		dp_rx_add_to_free_desc_list
2165 			(&head[rx_desc->chip_id][rx_desc->pool_id],
2166 			 &tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc);
2167 
2168 		/*
2169 		 * if continuation bit is set then we have MSDU spread
2170 		 * across multiple buffers, let us not decrement quota
2171 		 * till we reap all buffers of that MSDU.
2172 		 */
2173 		if (qdf_likely(!msdu_continuation))
2174 			quota -= 1;
2175 	}
2176 done:
2177 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
2178 
2179 	for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
2180 		for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2181 			/*
2182 			 * continue with next mac_id if no pkts were reaped
2183 			 * from that pool
2184 			 */
2185 			if (!rx_bufs_reaped[chip_id][mac_id])
2186 				continue;
2187 
2188 			replenish_soc = dp_rx_replenish_soc_get(soc, chip_id);
2189 
2190 			dp_rxdma_srng =
2191 				&replenish_soc->rx_refill_buf_ring[mac_id];
2192 
2193 			rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id];
2194 
2195 			dp_rx_buffers_replenish_simple(replenish_soc, mac_id,
2196 						dp_rxdma_srng,
2197 						rx_desc_pool,
2198 						rx_bufs_reaped[chip_id][mac_id],
2199 						&head[chip_id][mac_id],
2200 						&tail[chip_id][mac_id]);
2201 			*rx_bufs_used += rx_bufs_reaped[chip_id][mac_id];
2202 		}
2203 	}
2204 	return nbuf_head;
2205 }
2206 
2207 #ifdef WLAN_FEATURE_11BE_MLO
2208 /**
2209  * check_extap_multicast_loopback() - Check if rx packet is a loopback packet.
2210  *
2211  * @vdev: vdev on which rx packet is received
2212  * @addr: src address of the received packet
2213  *
2214  */
2215 static bool check_extap_multicast_loopback(struct dp_vdev *vdev, uint8_t *addr)
2216 {
2217 	 /* if src mac addr matches with vdev mac address then drop the pkt */
2218 	if (!(qdf_mem_cmp(addr, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE)))
2219 		return true;
2220 
2221 	 /* if src mac addr matches with mld mac address then drop the pkt */
2222 	if (!(qdf_mem_cmp(addr, vdev->mld_mac_addr.raw, QDF_MAC_ADDR_SIZE)))
2223 		return true;
2224 
2225 	return false;
2226 }
2227 #else
2228 static bool check_extap_multicast_loopback(struct dp_vdev *vdev, uint8_t *addr)
2229 {
2230 	return false;
2231 }
2232 #endif
2233 
2234 QDF_STATUS
2235 dp_rx_null_q_desc_handle_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
2236 			    uint8_t *rx_tlv_hdr, uint8_t pool_id,
2237 			    struct dp_txrx_peer *txrx_peer,
2238 			    bool is_reo_exception,
2239 			    uint8_t link_id)
2240 {
2241 	uint32_t pkt_len;
2242 	uint16_t msdu_len;
2243 	struct dp_vdev *vdev;
2244 	uint8_t tid;
2245 	qdf_ether_header_t *eh;
2246 	struct hal_rx_msdu_metadata msdu_metadata;
2247 	uint16_t sa_idx = 0;
2248 	bool is_eapol = 0;
2249 	bool enh_flag;
2250 	uint16_t buf_size;
2251 
2252 	buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx);
2253 
2254 	qdf_nbuf_set_rx_chfrag_start(
2255 				nbuf,
2256 				hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
2257 							       rx_tlv_hdr));
2258 	qdf_nbuf_set_rx_chfrag_end(nbuf,
2259 				   hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
2260 								 rx_tlv_hdr));
2261 	qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
2262 								  rx_tlv_hdr));
2263 	qdf_nbuf_set_da_valid(nbuf,
2264 			      hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
2265 							      rx_tlv_hdr));
2266 	qdf_nbuf_set_sa_valid(nbuf,
2267 			      hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
2268 							      rx_tlv_hdr));
2269 
2270 	tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
2271 	hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
2272 	msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
2273 	pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
2274 
2275 	if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
2276 		if (dp_rx_check_pkt_len(soc, pkt_len))
2277 			goto drop_nbuf;
2278 
2279 		/* Set length in nbuf */
2280 		qdf_nbuf_set_pktlen(nbuf, qdf_min(pkt_len, (uint32_t)buf_size));
2281 	}
2282 
2283 	/*
2284 	 * Check if DMA completed -- msdu_done is the last bit
2285 	 * to be written
2286 	 */
2287 	if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
2288 		dp_err_rl("MSDU DONE failure");
2289 		hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
2290 				     QDF_TRACE_LEVEL_INFO);
2291 		qdf_assert(0);
2292 	}
2293 
2294 	if (!txrx_peer &&
2295 	    dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
2296 							  rx_tlv_hdr, nbuf))
2297 		return QDF_STATUS_E_FAILURE;
2298 
2299 	if (!txrx_peer) {
2300 		bool mpdu_done = false;
2301 		struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
2302 
2303 		if (!pdev) {
2304 			dp_err_rl("pdev is null for pool_id = %d", pool_id);
2305 			return QDF_STATUS_E_FAILURE;
2306 		}
2307 
2308 		dp_err_rl("txrx_peer is NULL");
2309 		DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
2310 				 qdf_nbuf_len(nbuf));
2311 
2312 		/* QCN9000 has the support enabled */
2313 		if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) {
2314 			mpdu_done = true;
2315 			nbuf->next = NULL;
2316 			/* Trigger invalid peer handler wrapper */
2317 			dp_rx_process_invalid_peer_wrapper(soc,
2318 							   nbuf,
2319 							   mpdu_done,
2320 							   pool_id);
2321 		} else {
2322 			mpdu_done = dp_rx_chain_msdus_be(soc, nbuf, rx_tlv_hdr,
2323 							 pool_id);
2324 
2325 			/* Trigger invalid peer handler wrapper */
2326 			dp_rx_process_invalid_peer_wrapper(
2327 					soc,
2328 					pdev->invalid_peer_head_msdu,
2329 					mpdu_done, pool_id);
2330 		}
2331 
2332 		if (mpdu_done) {
2333 			pdev->invalid_peer_head_msdu = NULL;
2334 			pdev->invalid_peer_tail_msdu = NULL;
2335 		}
2336 
2337 		return QDF_STATUS_E_FAILURE;
2338 	}
2339 
2340 	vdev = txrx_peer->vdev;
2341 	if (!vdev) {
2342 		dp_err_rl("Null vdev!");
2343 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
2344 		goto drop_nbuf;
2345 	}
2346 
2347 	/*
2348 	 * Advance the packet start pointer by total size of
2349 	 * pre-header TLV's
2350 	 */
2351 	if (qdf_nbuf_is_frag(nbuf))
2352 		qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
2353 	else
2354 		qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
2355 				   soc->rx_pkt_tlv_size));
2356 
2357 	DP_STATS_INC_PKT(vdev, rx_i.null_q_desc_pkt, 1, qdf_nbuf_len(nbuf));
2358 
2359 	dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
2360 
2361 	if (dp_rx_err_drop_3addr_mcast(vdev, rx_tlv_hdr)) {
2362 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.mcast_3addr_drop, 1,
2363 					  link_id);
2364 		goto drop_nbuf;
2365 	}
2366 
2367 	if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
2368 		sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
2369 
2370 		if ((sa_idx < 0) ||
2371 		    (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
2372 			DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
2373 			goto drop_nbuf;
2374 		}
2375 	}
2376 
2377 	if ((!soc->mec_fw_offload) &&
2378 	    dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) {
2379 		/* this is a looped back MCBC pkt, drop it */
2380 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
2381 					      qdf_nbuf_len(nbuf), link_id);
2382 		goto drop_nbuf;
2383 	}
2384 
2385 	/*
2386 	 * In qwrap mode if the received packet matches with any of the vdev
2387 	 * mac addresses, drop it. Donot receive multicast packets originated
2388 	 * from any proxysta.
2389 	 */
2390 	if (check_qwrap_multicast_loopback(vdev, nbuf)) {
2391 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
2392 					      qdf_nbuf_len(nbuf), link_id);
2393 		goto drop_nbuf;
2394 	}
2395 
2396 	if (qdf_unlikely(txrx_peer->nawds_enabled &&
2397 			 hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
2398 							rx_tlv_hdr))) {
2399 		dp_err_rl("free buffer for multicast packet");
2400 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.nawds_mcast_drop, 1,
2401 					  link_id);
2402 		goto drop_nbuf;
2403 	}
2404 
2405 	if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
2406 		dp_err_rl("mcast Policy Check Drop pkt");
2407 		DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.policy_check_drop, 1,
2408 					  link_id);
2409 		goto drop_nbuf;
2410 	}
2411 	/* WDS Source Port Learning */
2412 	if (!soc->ast_offload_support &&
2413 	    qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
2414 		       vdev->wds_enabled))
2415 		dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, txrx_peer, nbuf,
2416 					msdu_metadata);
2417 
2418 	if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
2419 		struct dp_peer *peer;
2420 		struct dp_rx_tid *rx_tid;
2421 
2422 		peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
2423 					     DP_MOD_ID_RX_ERR);
2424 		if (peer) {
2425 			rx_tid = &peer->rx_tid[tid];
2426 			qdf_spin_lock_bh(&rx_tid->tid_lock);
2427 			if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
2428 			/* For Mesh peer, if on one of the mesh AP the
2429 			 * mesh peer is not deleted, the new addition of mesh
2430 			 * peer on other mesh AP doesn't do BA negotiation
2431 			 * leading to mismatch in BA windows.
2432 			 * To avoid this send max BA window during init.
2433 			 */
2434 				if (qdf_unlikely(vdev->mesh_vdev) ||
2435 				    qdf_unlikely(txrx_peer->nawds_enabled))
2436 					dp_rx_tid_setup_wifi3(
2437 						peer, BIT(tid),
2438 						hal_get_rx_max_ba_window(soc->hal_soc,tid),
2439 						IEEE80211_SEQ_MAX);
2440 				else
2441 					dp_rx_tid_setup_wifi3(peer, BIT(tid), 1,
2442 							      IEEE80211_SEQ_MAX);
2443 			}
2444 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
2445 			/* IEEE80211_SEQ_MAX indicates invalid start_seq */
2446 			dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
2447 		}
2448 	}
2449 
2450 	eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2451 
2452 	if (!txrx_peer->authorize) {
2453 		is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
2454 
2455 		if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
2456 			if (!dp_rx_err_match_dhost(eh, vdev))
2457 				goto drop_nbuf;
2458 		} else {
2459 			goto drop_nbuf;
2460 		}
2461 	}
2462 
2463 	/*
2464 	 * Drop packets in this path if cce_match is found. Packets will come
2465 	 * in following path depending on whether tidQ is setup.
2466 	 * 1. If tidQ is setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE and
2467 	 * cce_match = 1
2468 	 *    Packets with WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE are already
2469 	 *    dropped.
2470 	 * 2. If tidQ is not setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ERROR and
2471 	 * cce_match = 1
2472 	 *    These packets need to be dropped and should not get delivered
2473 	 *    to stack.
2474 	 */
2475 	if (qdf_unlikely(dp_rx_err_cce_drop(soc, vdev, nbuf, rx_tlv_hdr)))
2476 		goto drop_nbuf;
2477 
2478 	/*
2479 	 * In extap mode if the received packet matches with mld mac address
2480 	 * drop it. For non IP packets conversion might not be possible
2481 	 * due to that MEC entry will not be updated, resulting loopback.
2482 	 */
2483 	if (qdf_unlikely(check_extap_multicast_loopback(vdev,
2484 							eh->ether_shost))) {
2485 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
2486 					      qdf_nbuf_len(nbuf), link_id);
2487 		goto drop_nbuf;
2488 	}
2489 
2490 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
2491 		qdf_nbuf_set_raw_frame(nbuf, 1);
2492 		qdf_nbuf_set_next(nbuf, NULL);
2493 		dp_rx_deliver_raw(vdev, nbuf, txrx_peer, link_id);
2494 	} else {
2495 		enh_flag = vdev->pdev->enhanced_stats_en;
2496 		qdf_nbuf_set_next(nbuf, NULL);
2497 		DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
2498 					  enh_flag);
2499 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
2500 					      rx.rx_success, 1,
2501 					      qdf_nbuf_len(nbuf),
2502 					      link_id);
2503 		/*
2504 		 * Update the protocol tag in SKB based on
2505 		 * CCE metadata
2506 		 */
2507 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
2508 					  EXCEPTION_DEST_RING_ID,
2509 					  true, true);
2510 
2511 		/* Update the flow tag in SKB based on FSE metadata */
2512 		dp_rx_update_flow_tag(soc, vdev, nbuf,
2513 				      rx_tlv_hdr, true);
2514 
2515 		if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
2516 				 soc->hal_soc, rx_tlv_hdr) &&
2517 				 (vdev->rx_decap_type ==
2518 				  htt_cmn_pkt_type_ethernet))) {
2519 			DP_PEER_MC_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
2520 					    enh_flag, link_id);
2521 
2522 			if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
2523 				DP_PEER_BC_INCC_PKT(txrx_peer, 1,
2524 						    qdf_nbuf_len(nbuf),
2525 						    enh_flag,
2526 						    link_id);
2527 		} else {
2528 			DP_PEER_UC_INCC_PKT(txrx_peer, 1,
2529 					    qdf_nbuf_len(nbuf),
2530 					    enh_flag,
2531 					    link_id);
2532 		}
2533 
2534 		qdf_nbuf_set_exc_frame(nbuf, 1);
2535 
2536 		if (qdf_unlikely(vdev->multipass_en)) {
2537 			if (dp_rx_multipass_process(txrx_peer, nbuf,
2538 						    tid) == false) {
2539 				DP_PEER_PER_PKT_STATS_INC
2540 					(txrx_peer,
2541 					 rx.multipass_rx_pkt_drop,
2542 					 1, link_id);
2543 				goto drop_nbuf;
2544 			}
2545 		}
2546 
2547 		dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
2548 					    is_eapol);
2549 	}
2550 	return QDF_STATUS_SUCCESS;
2551 
2552 drop_nbuf:
2553 	dp_rx_nbuf_free(nbuf);
2554 	return QDF_STATUS_E_FAILURE;
2555 }
2556