xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.c (revision 60d9ef02ece2840f39794c943b63ee53ecc1a0d2)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hal_hw_headers.h"
21 #include "dp_types.h"
22 #include "dp_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 #include "hal_rx.h"
26 #include "hal_api.h"
27 #include "qdf_nbuf.h"
28 #ifdef MESH_MODE_SUPPORT
29 #include "if_meta_hdr.h"
30 #endif
31 #include "dp_internal.h"
32 #include "dp_ipa.h"
33 #include "dp_hist.h"
34 #include "dp_rx_buffer_pool.h"
35 #ifdef WIFI_MONITOR_SUPPORT
36 #include "dp_htt.h"
37 #include <dp_mon.h>
38 #endif
39 #ifdef FEATURE_WDS
40 #include "dp_txrx_wds.h"
41 #endif
42 #ifdef DP_RATETABLE_SUPPORT
43 #include "dp_ratetable.h"
44 #endif
45 #include "enet.h"
46 
47 #ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */
48 
49 #ifdef DUP_RX_DESC_WAR
50 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
51 				hal_ring_handle_t hal_ring,
52 				hal_ring_desc_t ring_desc,
53 				struct dp_rx_desc *rx_desc)
54 {
55 	void *hal_soc = soc->hal_soc;
56 
57 	hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc);
58 	dp_rx_desc_dump(rx_desc);
59 }
60 #else
61 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
62 				hal_ring_handle_t hal_ring_hdl,
63 				hal_ring_desc_t ring_desc,
64 				struct dp_rx_desc *rx_desc)
65 {
66 	hal_soc_handle_t hal_soc = soc->hal_soc;
67 
68 	dp_rx_desc_dump(rx_desc);
69 	hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc);
70 	hal_srng_dump_ring(hal_soc, hal_ring_hdl);
71 	qdf_assert_always(0);
72 }
73 #endif
74 
75 #ifndef QCA_HOST_MODE_WIFI_DISABLED
76 #ifdef RX_DESC_SANITY_WAR
77 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
78 			     hal_ring_handle_t hal_ring_hdl,
79 			     hal_ring_desc_t ring_desc,
80 			     struct dp_rx_desc *rx_desc)
81 {
82 	uint8_t return_buffer_manager;
83 
84 	if (qdf_unlikely(!rx_desc)) {
85 		/*
86 		 * This is an unlikely case where the cookie obtained
87 		 * from the ring_desc is invalid and hence we are not
88 		 * able to find the corresponding rx_desc
89 		 */
90 		goto fail;
91 	}
92 
93 	return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc);
94 	if (qdf_unlikely(!(return_buffer_manager ==
95 				HAL_RX_BUF_RBM_SW1_BM(soc->wbm_sw0_bm_id) ||
96 			 return_buffer_manager ==
97 				HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)))) {
98 		goto fail;
99 	}
100 
101 	return QDF_STATUS_SUCCESS;
102 
103 fail:
104 	DP_STATS_INC(soc, rx.err.invalid_cookie, 1);
105 	dp_err_rl("Sanity failed for ring Desc:");
106 	hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl,
107 				ring_desc);
108 	return QDF_STATUS_E_NULL_VALUE;
109 
110 }
111 #endif
112 
113 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
114 				    hal_ring_handle_t hal_ring_hdl,
115 				    uint32_t num_entries,
116 				    bool *near_full)
117 {
118 	uint32_t num_pending = 0;
119 
120 	num_pending = hal_srng_dst_num_valid_locked(hal_soc,
121 						    hal_ring_hdl,
122 						    true);
123 
124 	if (num_entries && (num_pending >= num_entries >> 1))
125 		*near_full = true;
126 	else
127 		*near_full = false;
128 
129 	return num_pending;
130 }
131 
132 #ifdef RX_DESC_DEBUG_CHECK
133 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
134 					hal_ring_desc_t ring_desc,
135 					struct dp_rx_desc *rx_desc)
136 {
137 	struct hal_buf_info hbi;
138 
139 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
140 	/* Sanity check for possible buffer paddr corruption */
141 	if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
142 		return QDF_STATUS_SUCCESS;
143 
144 	return QDF_STATUS_E_FAILURE;
145 }
146 
147 /**
148  * dp_rx_desc_nbuf_len_sanity_check - Add sanity check to catch Rx buffer
149  *				      out of bound access from H.W
150  *
151  * @soc: DP soc
152  * @pkt_len: Packet length received from H.W
153  *
154  * Return: NONE
155  */
156 static inline void
157 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc,
158 				 uint32_t pkt_len)
159 {
160 	struct rx_desc_pool *rx_desc_pool;
161 
162 	rx_desc_pool = &soc->rx_desc_buf[0];
163 	qdf_assert_always(pkt_len <= rx_desc_pool->buf_size);
164 }
165 #else
166 static inline void
167 dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { }
168 #endif
169 
170 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
171 void
172 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
173 			hal_ring_desc_t ring_desc)
174 {
175 	struct dp_buf_info_record *record;
176 	struct hal_buf_info hbi;
177 	uint32_t idx;
178 
179 	if (qdf_unlikely(!soc->rx_ring_history[ring_num]))
180 		return;
181 
182 	hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
183 
184 	/* buffer_addr_info is the first element of ring_desc */
185 	hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc,
186 				  &hbi);
187 
188 	idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index,
189 					DP_RX_HIST_MAX);
190 
191 	/* No NULL check needed for record since its an array */
192 	record = &soc->rx_ring_history[ring_num]->entry[idx];
193 
194 	record->timestamp = qdf_get_log_timestamp();
195 	record->hbi.paddr = hbi.paddr;
196 	record->hbi.sw_cookie = hbi.sw_cookie;
197 	record->hbi.rbm = hbi.rbm;
198 }
199 #endif
200 
201 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
202 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
203 					      uint8_t *rx_tlv,
204 					      qdf_nbuf_t nbuf)
205 {
206 	struct dp_soc *soc;
207 
208 	if (!pdev->is_first_wakeup_packet)
209 		return;
210 
211 	soc = pdev->soc;
212 	if (hal_get_first_wow_wakeup_packet(soc->hal_soc, rx_tlv)) {
213 		qdf_nbuf_mark_wakeup_frame(nbuf);
214 		dp_info("First packet after WOW Wakeup rcvd");
215 	}
216 }
217 #endif
218 
219 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
220 #endif /* WLAN_SOFTUMAC_SUPPORT */
221 
222 /**
223  * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map
224  *
225  * @dp_soc: struct dp_soc *
226  * @nbuf_frag_info_t: nbuf frag info
227  * @dp_pdev: struct dp_pdev *
228  * @rx_desc_pool: Rx desc pool
229  *
230  * Return: QDF_STATUS
231  */
232 #ifdef DP_RX_MON_MEM_FRAG
233 static inline QDF_STATUS
234 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
235 			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
236 			   struct dp_pdev *dp_pdev,
237 			   struct rx_desc_pool *rx_desc_pool)
238 {
239 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
240 
241 	(nbuf_frag_info_t->virt_addr).vaddr =
242 			qdf_frag_alloc(&rx_desc_pool->pf_cache, rx_desc_pool->buf_size);
243 
244 	if (!((nbuf_frag_info_t->virt_addr).vaddr)) {
245 		dp_err("Frag alloc failed");
246 		DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1);
247 		return QDF_STATUS_E_NOMEM;
248 	}
249 
250 	ret = qdf_mem_map_page(dp_soc->osdev,
251 			       (nbuf_frag_info_t->virt_addr).vaddr,
252 			       QDF_DMA_FROM_DEVICE,
253 			       rx_desc_pool->buf_size,
254 			       &nbuf_frag_info_t->paddr);
255 
256 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
257 		qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr);
258 		dp_err("Frag map failed");
259 		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
260 		return QDF_STATUS_E_FAULT;
261 	}
262 
263 	return QDF_STATUS_SUCCESS;
264 }
265 #else
266 static inline QDF_STATUS
267 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
268 			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
269 			   struct dp_pdev *dp_pdev,
270 			   struct rx_desc_pool *rx_desc_pool)
271 {
272 	return QDF_STATUS_SUCCESS;
273 }
274 #endif /* DP_RX_MON_MEM_FRAG */
275 
276 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
277 /**
278  * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history
279  * @soc: Datapath soc structure
280  * @ring_num: Refill ring number
281  * @hal_ring_hdl:
282  * @num_req: number of buffers requested for refill
283  * @num_refill: number of buffers refilled
284  *
285  * Return: None
286  */
287 static inline void
288 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
289 			       hal_ring_handle_t hal_ring_hdl,
290 			       uint32_t num_req, uint32_t num_refill)
291 {
292 	struct dp_refill_info_record *record;
293 	uint32_t idx;
294 	uint32_t tp;
295 	uint32_t hp;
296 
297 	if (qdf_unlikely(ring_num >= MAX_PDEV_CNT ||
298 			 !soc->rx_refill_ring_history[ring_num]))
299 		return;
300 
301 	idx = dp_history_get_next_index(&soc->rx_refill_ring_history[ring_num]->index,
302 					DP_RX_REFILL_HIST_MAX);
303 
304 	/* No NULL check needed for record since its an array */
305 	record = &soc->rx_refill_ring_history[ring_num]->entry[idx];
306 
307 	hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &tp, &hp);
308 	record->timestamp = qdf_get_log_timestamp();
309 	record->num_req = num_req;
310 	record->num_refill = num_refill;
311 	record->hp = hp;
312 	record->tp = tp;
313 }
314 #else
315 static inline void
316 dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
317 			       hal_ring_handle_t hal_ring_hdl,
318 			       uint32_t num_req, uint32_t num_refill)
319 {
320 }
321 #endif
322 
323 /**
324  * dp_pdev_nbuf_alloc_and_map_replenish() - Allocate nbuf for desc buffer and
325  *                                          map
326  * @dp_soc: struct dp_soc *
327  * @mac_id: Mac id
328  * @num_entries_avail: num_entries_avail
329  * @nbuf_frag_info_t: nbuf frag info
330  * @dp_pdev: struct dp_pdev *
331  * @rx_desc_pool: Rx desc pool
332  *
333  * Return: QDF_STATUS
334  */
335 static inline QDF_STATUS
336 dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc,
337 				     uint32_t mac_id,
338 				     uint32_t num_entries_avail,
339 				     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
340 				     struct dp_pdev *dp_pdev,
341 				     struct rx_desc_pool *rx_desc_pool)
342 {
343 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
344 
345 	(nbuf_frag_info_t->virt_addr).nbuf =
346 		dp_rx_buffer_pool_nbuf_alloc(dp_soc,
347 					     mac_id,
348 					     rx_desc_pool,
349 					     num_entries_avail);
350 	if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
351 		dp_err("nbuf alloc failed");
352 		DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
353 		return QDF_STATUS_E_NOMEM;
354 	}
355 
356 	ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool,
357 					 nbuf_frag_info_t);
358 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
359 		dp_rx_buffer_pool_nbuf_free(dp_soc,
360 			(nbuf_frag_info_t->virt_addr).nbuf, mac_id);
361 		dp_err("nbuf map failed");
362 		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
363 		return QDF_STATUS_E_FAULT;
364 	}
365 
366 	nbuf_frag_info_t->paddr =
367 		qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
368 	dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, (qdf_nbuf_t)(
369 					  (nbuf_frag_info_t->virt_addr).nbuf),
370 					  rx_desc_pool->buf_size,
371 					  true, __func__, __LINE__);
372 
373 	ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
374 			     &nbuf_frag_info_t->paddr,
375 			     rx_desc_pool);
376 	if (ret == QDF_STATUS_E_FAILURE) {
377 		DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
378 		return QDF_STATUS_E_ADDRNOTAVAIL;
379 	}
380 
381 	return QDF_STATUS_SUCCESS;
382 }
383 
384 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
385 QDF_STATUS
386 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *soc, uint32_t mac_id,
387 				    struct dp_srng *dp_rxdma_srng,
388 				    struct rx_desc_pool *rx_desc_pool,
389 				    bool force_replenish)
390 {
391 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
392 	uint32_t count;
393 	void *rxdma_ring_entry;
394 	union dp_rx_desc_list_elem_t *next = NULL;
395 	void *rxdma_srng;
396 	qdf_nbuf_t nbuf;
397 	qdf_dma_addr_t paddr;
398 	uint16_t num_entries_avail = 0;
399 	uint16_t num_alloc_desc = 0;
400 	union dp_rx_desc_list_elem_t *desc_list = NULL;
401 	union dp_rx_desc_list_elem_t *tail = NULL;
402 	int sync_hw_ptr = 0;
403 
404 	rxdma_srng = dp_rxdma_srng->hal_srng;
405 
406 	if (qdf_unlikely(!dp_pdev)) {
407 		dp_rx_err("%pK: pdev is null for mac_id = %d", soc, mac_id);
408 		return QDF_STATUS_E_FAILURE;
409 	}
410 
411 	if (qdf_unlikely(!rxdma_srng)) {
412 		dp_rx_debug("%pK: rxdma srng not initialized", soc);
413 		return QDF_STATUS_E_FAILURE;
414 	}
415 
416 	hal_srng_access_start(soc->hal_soc, rxdma_srng);
417 
418 	num_entries_avail = hal_srng_src_num_avail(soc->hal_soc,
419 						   rxdma_srng,
420 						   sync_hw_ptr);
421 
422 	dp_rx_debug("%pK: no of available entries in rxdma ring: %d",
423 		    soc, num_entries_avail);
424 
425 	if (qdf_unlikely(!force_replenish && (num_entries_avail <
426 			 ((dp_rxdma_srng->num_entries * 3) / 4)))) {
427 		hal_srng_access_end(soc->hal_soc, rxdma_srng);
428 		return QDF_STATUS_E_FAILURE;
429 	}
430 
431 	DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1);
432 	num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
433 						  rx_desc_pool,
434 						  num_entries_avail,
435 						  &desc_list,
436 						  &tail);
437 
438 	if (!num_alloc_desc) {
439 		dp_rx_err("%pK: no free rx_descs in freelist", soc);
440 		DP_STATS_INC(dp_pdev, err.desc_lt_alloc_fail,
441 			     num_entries_avail);
442 		hal_srng_access_end(soc->hal_soc, rxdma_srng);
443 		return QDF_STATUS_E_NOMEM;
444 	}
445 
446 	for (count = 0; count < num_alloc_desc; count++) {
447 		next = desc_list->next;
448 		qdf_prefetch(next);
449 		nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
450 		if (qdf_unlikely(!nbuf)) {
451 			DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
452 			break;
453 		}
454 
455 		paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
456 					       rx_desc_pool->buf_size);
457 
458 		rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc,
459 							 rxdma_srng);
460 		qdf_assert_always(rxdma_ring_entry);
461 
462 		desc_list->rx_desc.nbuf = nbuf;
463 		dp_rx_set_reuse_nbuf(&desc_list->rx_desc, nbuf);
464 		desc_list->rx_desc.rx_buf_start = nbuf->data;
465 		desc_list->rx_desc.paddr_buf_start = paddr;
466 		desc_list->rx_desc.unmapped = 0;
467 
468 		/* rx_desc.in_use should be zero at this time*/
469 		qdf_assert_always(desc_list->rx_desc.in_use == 0);
470 
471 		desc_list->rx_desc.in_use = 1;
472 		desc_list->rx_desc.in_err_state = 0;
473 
474 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
475 					     paddr,
476 					     desc_list->rx_desc.cookie,
477 					     rx_desc_pool->owner);
478 
479 		desc_list = next;
480 	}
481 	qdf_dsb();
482 	hal_srng_access_end(soc->hal_soc, rxdma_srng);
483 
484 	/* No need to count the number of bytes received during replenish.
485 	 * Therefore set replenish.pkts.bytes as 0.
486 	 */
487 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
488 	DP_STATS_INC(dp_pdev, buf_freelist, (num_alloc_desc - count));
489 	/*
490 	 * add any available free desc back to the free list
491 	 */
492 	if (desc_list)
493 		dp_rx_add_desc_list_to_free_list(soc, &desc_list, &tail,
494 						 mac_id, rx_desc_pool);
495 
496 	return QDF_STATUS_SUCCESS;
497 }
498 
499 QDF_STATUS
500 __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id,
501 				 struct dp_srng *dp_rxdma_srng,
502 				 struct rx_desc_pool *rx_desc_pool,
503 				 uint32_t num_req_buffers,
504 				 union dp_rx_desc_list_elem_t **desc_list,
505 				 union dp_rx_desc_list_elem_t **tail)
506 {
507 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
508 	uint32_t count;
509 	void *rxdma_ring_entry;
510 	union dp_rx_desc_list_elem_t *next;
511 	void *rxdma_srng;
512 	qdf_nbuf_t nbuf;
513 	qdf_nbuf_t nbuf_next;
514 	qdf_nbuf_t nbuf_head = NULL;
515 	qdf_nbuf_t nbuf_tail = NULL;
516 	qdf_dma_addr_t paddr;
517 
518 	rxdma_srng = dp_rxdma_srng->hal_srng;
519 
520 	if (qdf_unlikely(!dp_pdev)) {
521 		dp_rx_err("%pK: pdev is null for mac_id = %d",
522 			  soc, mac_id);
523 		return QDF_STATUS_E_FAILURE;
524 	}
525 
526 	if (qdf_unlikely(!rxdma_srng)) {
527 		dp_rx_debug("%pK: rxdma srng not initialized", soc);
528 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
529 		return QDF_STATUS_E_FAILURE;
530 	}
531 
532 	/* Allocate required number of nbufs */
533 	for (count = 0; count < num_req_buffers; count++) {
534 		nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
535 		if (qdf_unlikely(!nbuf)) {
536 			DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
537 			/* Update num_req_buffers to nbufs allocated count */
538 			num_req_buffers = count;
539 			break;
540 		}
541 
542 		paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
543 					       rx_desc_pool->buf_size);
544 
545 		QDF_NBUF_CB_PADDR(nbuf) = paddr;
546 		DP_RX_LIST_APPEND(nbuf_head,
547 				  nbuf_tail,
548 				  nbuf);
549 	}
550 	qdf_dsb();
551 
552 	nbuf = nbuf_head;
553 	hal_srng_access_start(soc->hal_soc, rxdma_srng);
554 
555 	for (count = 0; count < num_req_buffers; count++) {
556 		next = (*desc_list)->next;
557 		nbuf_next = nbuf->next;
558 		qdf_prefetch(next);
559 
560 		rxdma_ring_entry = (struct dp_buffer_addr_info *)
561 			hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
562 
563 		if (!rxdma_ring_entry)
564 			break;
565 
566 		(*desc_list)->rx_desc.nbuf = nbuf;
567 		dp_rx_set_reuse_nbuf(&(*desc_list)->rx_desc, nbuf);
568 		(*desc_list)->rx_desc.rx_buf_start = nbuf->data;
569 		(*desc_list)->rx_desc.paddr_buf_start = QDF_NBUF_CB_PADDR(nbuf);
570 		(*desc_list)->rx_desc.unmapped = 0;
571 
572 		/* rx_desc.in_use should be zero at this time*/
573 		qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
574 
575 		(*desc_list)->rx_desc.in_use = 1;
576 		(*desc_list)->rx_desc.in_err_state = 0;
577 
578 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
579 					     QDF_NBUF_CB_PADDR(nbuf),
580 					     (*desc_list)->rx_desc.cookie,
581 					     rx_desc_pool->owner);
582 
583 		*desc_list = next;
584 		nbuf = nbuf_next;
585 	}
586 	hal_srng_access_end(soc->hal_soc, rxdma_srng);
587 
588 	/* No need to count the number of bytes received during replenish.
589 	 * Therefore set replenish.pkts.bytes as 0.
590 	 */
591 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
592 	DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count));
593 	/*
594 	 * add any available free desc back to the free list
595 	 */
596 	if (*desc_list)
597 		dp_rx_add_desc_list_to_free_list(soc, desc_list, tail,
598 						 mac_id, rx_desc_pool);
599 	while (nbuf) {
600 		nbuf_next = nbuf->next;
601 		dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
602 		qdf_nbuf_free(nbuf);
603 		nbuf = nbuf_next;
604 	}
605 
606 	return QDF_STATUS_SUCCESS;
607 }
608 
609 #ifdef WLAN_SUPPORT_PPEDS
610 QDF_STATUS
611 __dp_rx_comp2refill_replenish(struct dp_soc *soc, uint32_t mac_id,
612 			      struct dp_srng *dp_rxdma_srng,
613 			      struct rx_desc_pool *rx_desc_pool,
614 			      uint32_t num_req_buffers,
615 			      union dp_rx_desc_list_elem_t **desc_list,
616 			      union dp_rx_desc_list_elem_t **tail)
617 {
618 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
619 	uint32_t count;
620 	void *rxdma_ring_entry;
621 	union dp_rx_desc_list_elem_t *next;
622 	union dp_rx_desc_list_elem_t *cur;
623 	void *rxdma_srng;
624 	qdf_nbuf_t nbuf;
625 
626 	rxdma_srng = dp_rxdma_srng->hal_srng;
627 
628 	if (qdf_unlikely(!dp_pdev)) {
629 		dp_rx_err("%pK: pdev is null for mac_id = %d",
630 			  soc, mac_id);
631 		return QDF_STATUS_E_FAILURE;
632 	}
633 
634 	if (qdf_unlikely(!rxdma_srng)) {
635 		dp_rx_debug("%pK: rxdma srng not initialized", soc);
636 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
637 		return QDF_STATUS_E_FAILURE;
638 	}
639 
640 	hal_srng_access_start(soc->hal_soc, rxdma_srng);
641 
642 	for (count = 0; count < num_req_buffers; count++) {
643 		next = (*desc_list)->next;
644 		qdf_prefetch(next);
645 
646 		rxdma_ring_entry = (struct dp_buffer_addr_info *)
647 			hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
648 
649 		if (!rxdma_ring_entry)
650 			break;
651 
652 		(*desc_list)->rx_desc.in_use = 1;
653 		(*desc_list)->rx_desc.in_err_state = 0;
654 		(*desc_list)->rx_desc.nbuf = (*desc_list)->rx_desc.reuse_nbuf;
655 
656 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
657 				     (*desc_list)->rx_desc.paddr_buf_start,
658 				     (*desc_list)->rx_desc.cookie,
659 				     rx_desc_pool->owner);
660 
661 		*desc_list = next;
662 	}
663 	hal_srng_access_end(soc->hal_soc, rxdma_srng);
664 
665 	/* No need to count the number of bytes received during replenish.
666 	 * Therefore set replenish.pkts.bytes as 0.
667 	 */
668 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
669 	DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count));
670 
671 	/*
672 	 * add any available free desc back to the free list
673 	 */
674 	cur = *desc_list;
675 	for ( ; count < num_req_buffers; count++) {
676 		next = cur->next;
677 		qdf_prefetch(next);
678 
679 		nbuf = cur->rx_desc.reuse_nbuf;
680 
681 		cur->rx_desc.nbuf = NULL;
682 		cur->rx_desc.in_use = 0;
683 		cur->rx_desc.has_reuse_nbuf = false;
684 		cur->rx_desc.reuse_nbuf = NULL;
685 		if (!nbuf->recycled_for_ds)
686 			dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
687 
688 		nbuf->recycled_for_ds = 0;
689 		nbuf->fast_recycled = 0;
690 		qdf_nbuf_free(nbuf);
691 		cur = next;
692 	}
693 
694 	if (*desc_list)
695 		dp_rx_add_desc_list_to_free_list(soc, desc_list, tail,
696 						 mac_id, rx_desc_pool);
697 
698 	return QDF_STATUS_SUCCESS;
699 }
700 #endif
701 
702 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc,
703 					      uint32_t mac_id,
704 					      struct dp_srng *dp_rxdma_srng,
705 					      struct rx_desc_pool *rx_desc_pool,
706 					      uint32_t num_req_buffers)
707 {
708 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
709 	uint32_t count;
710 	uint32_t nr_descs = 0;
711 	void *rxdma_ring_entry;
712 	union dp_rx_desc_list_elem_t *next;
713 	void *rxdma_srng;
714 	qdf_nbuf_t nbuf;
715 	qdf_dma_addr_t paddr;
716 	union dp_rx_desc_list_elem_t *desc_list = NULL;
717 	union dp_rx_desc_list_elem_t *tail = NULL;
718 
719 	rxdma_srng = dp_rxdma_srng->hal_srng;
720 
721 	if (qdf_unlikely(!dp_pdev)) {
722 		dp_rx_err("%pK: pdev is null for mac_id = %d",
723 			  soc, mac_id);
724 		return QDF_STATUS_E_FAILURE;
725 	}
726 
727 	if (qdf_unlikely(!rxdma_srng)) {
728 		dp_rx_debug("%pK: rxdma srng not initialized", soc);
729 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
730 		return QDF_STATUS_E_FAILURE;
731 	}
732 
733 	dp_rx_debug("%pK: requested %d buffers for replenish",
734 		    soc, num_req_buffers);
735 
736 	nr_descs = dp_rx_get_free_desc_list(soc, mac_id, rx_desc_pool,
737 					    num_req_buffers, &desc_list, &tail);
738 	if (!nr_descs) {
739 		dp_err("no free rx_descs in freelist");
740 		DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
741 		return QDF_STATUS_E_NOMEM;
742 	}
743 
744 	dp_debug("got %u RX descs for driver attach", nr_descs);
745 
746 	hal_srng_access_start(soc->hal_soc, rxdma_srng);
747 
748 	for (count = 0; count < nr_descs; count++) {
749 		next = desc_list->next;
750 		qdf_prefetch(next);
751 		nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
752 		if (qdf_unlikely(!nbuf)) {
753 			DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
754 			break;
755 		}
756 
757 		paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
758 					       rx_desc_pool->buf_size);
759 		rxdma_ring_entry = (struct dp_buffer_addr_info *)
760 			hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
761 		if (!rxdma_ring_entry) {
762 			qdf_nbuf_free(nbuf);
763 			break;
764 		}
765 
766 		desc_list->rx_desc.nbuf = nbuf;
767 		dp_rx_set_reuse_nbuf(&desc_list->rx_desc, nbuf);
768 		desc_list->rx_desc.rx_buf_start = nbuf->data;
769 		desc_list->rx_desc.paddr_buf_start = paddr;
770 		desc_list->rx_desc.unmapped = 0;
771 
772 		/* rx_desc.in_use should be zero at this time*/
773 		qdf_assert_always(desc_list->rx_desc.in_use == 0);
774 
775 		desc_list->rx_desc.in_use = 1;
776 		desc_list->rx_desc.in_err_state = 0;
777 
778 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
779 					     paddr,
780 					     desc_list->rx_desc.cookie,
781 					     rx_desc_pool->owner);
782 
783 		desc_list = next;
784 	}
785 	qdf_dsb();
786 	hal_srng_access_end(soc->hal_soc, rxdma_srng);
787 
788 	/* No need to count the number of bytes received during replenish.
789 	 * Therefore set replenish.pkts.bytes as 0.
790 	 */
791 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
792 
793 	return QDF_STATUS_SUCCESS;
794 }
795 #endif
796 
797 #ifdef DP_UMAC_HW_RESET_SUPPORT
798 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
799 static inline
800 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf,
801 					uint32_t buf_size)
802 {
803 	return dp_rx_nbuf_sync_no_dsb(dp_soc, nbuf, buf_size);
804 }
805 #else
806 static inline
807 qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf,
808 					uint32_t buf_size)
809 {
810 	return qdf_nbuf_get_frag_paddr(nbuf, 0);
811 }
812 #endif
813 
814 /**
815  * dp_rx_desc_replenish() - Replenish the rx descriptors one at a time
816  * @soc: core txrx main context
817  * @dp_rxdma_srng: rxdma ring
818  * @rx_desc_pool: rx descriptor pool
819  * @rx_desc:rx descriptor
820  *
821  * Return: void
822  */
823 static inline
824 void dp_rx_desc_replenish(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng,
825 			  struct rx_desc_pool *rx_desc_pool,
826 			  struct dp_rx_desc *rx_desc)
827 {
828 	void *rxdma_srng;
829 	void *rxdma_ring_entry;
830 	qdf_dma_addr_t paddr;
831 
832 	rxdma_srng = dp_rxdma_srng->hal_srng;
833 
834 	/* No one else should be accessing the srng at this point */
835 	hal_srng_access_start_unlocked(soc->hal_soc, rxdma_srng);
836 
837 	rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
838 
839 	qdf_assert_always(rxdma_ring_entry);
840 	rx_desc->in_err_state = 0;
841 
842 	paddr = dp_rx_rep_retrieve_paddr(soc, rx_desc->nbuf,
843 					 rx_desc_pool->buf_size);
844 	hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, paddr,
845 				     rx_desc->cookie, rx_desc_pool->owner);
846 
847 	hal_srng_access_end_unlocked(soc->hal_soc, rxdma_srng);
848 }
849 
850 void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list)
851 {
852 	int mac_id, i, j;
853 	union dp_rx_desc_list_elem_t *head = NULL;
854 	union dp_rx_desc_list_elem_t *tail = NULL;
855 
856 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
857 		struct dp_srng *dp_rxdma_srng =
858 					&soc->rx_refill_buf_ring[mac_id];
859 		struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
860 		uint32_t rx_sw_desc_num = rx_desc_pool->pool_size;
861 		/* Only fill up 1/3 of the ring size */
862 		uint32_t num_req_decs;
863 
864 		if (!dp_rxdma_srng || !dp_rxdma_srng->hal_srng ||
865 		    !rx_desc_pool->array)
866 			continue;
867 
868 		num_req_decs = dp_rxdma_srng->num_entries / 3;
869 
870 		for (i = 0, j = 0; i < rx_sw_desc_num; i++) {
871 			struct dp_rx_desc *rx_desc =
872 				(struct dp_rx_desc *)&rx_desc_pool->array[i];
873 
874 			if (rx_desc->in_use) {
875 				if (j < (dp_rxdma_srng->num_entries - 1)) {
876 					dp_rx_desc_replenish(soc, dp_rxdma_srng,
877 							     rx_desc_pool,
878 							     rx_desc);
879 				} else {
880 					dp_rx_nbuf_unmap(soc, rx_desc, 0);
881 					rx_desc->unmapped = 0;
882 
883 					rx_desc->nbuf->next = *nbuf_list;
884 					*nbuf_list = rx_desc->nbuf;
885 
886 					dp_rx_add_to_free_desc_list(&head,
887 								    &tail,
888 								    rx_desc);
889 				}
890 				j++;
891 			}
892 		}
893 
894 		if (head)
895 			dp_rx_add_desc_list_to_free_list(soc, &head, &tail,
896 							 mac_id, rx_desc_pool);
897 
898 		/* If num of descs in use were less, then we need to replenish
899 		 * the ring with some buffers
900 		 */
901 		head = NULL;
902 		tail = NULL;
903 
904 		if (j < (num_req_decs - 1))
905 			dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
906 						rx_desc_pool,
907 						((num_req_decs - 1) - j),
908 						&head, &tail, true);
909 	}
910 }
911 #endif
912 
913 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
914 				struct dp_srng *dp_rxdma_srng,
915 				struct rx_desc_pool *rx_desc_pool,
916 				uint32_t num_req_buffers,
917 				union dp_rx_desc_list_elem_t **desc_list,
918 				union dp_rx_desc_list_elem_t **tail,
919 				bool req_only, bool force_replenish,
920 				const char *func_name)
921 {
922 	uint32_t num_alloc_desc;
923 	uint16_t num_desc_to_free = 0;
924 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
925 	uint32_t num_entries_avail;
926 	uint32_t count;
927 	uint32_t extra_buffers;
928 	int sync_hw_ptr = 1;
929 	struct dp_rx_nbuf_frag_info nbuf_frag_info = {0};
930 	void *rxdma_ring_entry;
931 	union dp_rx_desc_list_elem_t *next;
932 	QDF_STATUS ret;
933 	void *rxdma_srng;
934 	union dp_rx_desc_list_elem_t *desc_list_append = NULL;
935 	union dp_rx_desc_list_elem_t *tail_append = NULL;
936 	union dp_rx_desc_list_elem_t *temp_list = NULL;
937 
938 	rxdma_srng = dp_rxdma_srng->hal_srng;
939 
940 	if (qdf_unlikely(!dp_pdev)) {
941 		dp_rx_err("%pK: pdev is null for mac_id = %d",
942 			  dp_soc, mac_id);
943 		return QDF_STATUS_E_FAILURE;
944 	}
945 
946 	if (qdf_unlikely(!rxdma_srng)) {
947 		dp_rx_debug("%pK: rxdma srng not initialized", dp_soc);
948 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
949 		return QDF_STATUS_E_FAILURE;
950 	}
951 
952 	dp_verbose_debug("%pK: requested %d buffers for replenish",
953 			 dp_soc, num_req_buffers);
954 
955 	hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
956 
957 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
958 						   rxdma_srng,
959 						   sync_hw_ptr);
960 
961 	dp_verbose_debug("%pK: no of available entries in rxdma ring: %d",
962 			 dp_soc, num_entries_avail);
963 
964 	if (!req_only && !(*desc_list) &&
965 	    (force_replenish || (num_entries_avail >
966 	     ((dp_rxdma_srng->num_entries * 3) / 4)))) {
967 		num_req_buffers = num_entries_avail;
968 		DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1);
969 	} else if (num_entries_avail < num_req_buffers) {
970 		num_desc_to_free = num_req_buffers - num_entries_avail;
971 		num_req_buffers = num_entries_avail;
972 	} else if ((*desc_list) &&
973 		   dp_rxdma_srng->num_entries - num_entries_avail <
974 		   CRITICAL_BUFFER_THRESHOLD) {
975 		/* set extra buffers to CRITICAL_BUFFER_THRESHOLD only if
976 		 * total buff requested after adding extra buffers is less
977 		 * than or equal to num entries available, else set it to max
978 		 * possible additional buffers available at that moment
979 		 */
980 		extra_buffers =
981 			((num_req_buffers + CRITICAL_BUFFER_THRESHOLD) > num_entries_avail) ?
982 			(num_entries_avail - num_req_buffers) :
983 			CRITICAL_BUFFER_THRESHOLD;
984 		/* Append some free descriptors to tail */
985 		num_alloc_desc =
986 			dp_rx_get_free_desc_list(dp_soc, mac_id,
987 						 rx_desc_pool,
988 						 extra_buffers,
989 						 &desc_list_append,
990 						 &tail_append);
991 
992 		if (num_alloc_desc) {
993 			temp_list = *desc_list;
994 			*desc_list = desc_list_append;
995 			tail_append->next = temp_list;
996 			num_req_buffers += num_alloc_desc;
997 
998 			DP_STATS_DEC(dp_pdev,
999 				     replenish.free_list,
1000 				     num_alloc_desc);
1001 		} else
1002 			dp_err_rl("%pK:  no free rx_descs in freelist", dp_soc);
1003 	}
1004 
1005 	if (qdf_unlikely(!num_req_buffers)) {
1006 		num_desc_to_free = num_req_buffers;
1007 		hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
1008 		goto free_descs;
1009 	}
1010 
1011 	/*
1012 	 * if desc_list is NULL, allocate the descs from freelist
1013 	 */
1014 	if (!(*desc_list)) {
1015 		num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
1016 							  rx_desc_pool,
1017 							  num_req_buffers,
1018 							  desc_list,
1019 							  tail);
1020 
1021 		if (!num_alloc_desc) {
1022 			dp_rx_err("%pK: no free rx_descs in freelist", dp_soc);
1023 			DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
1024 					num_req_buffers);
1025 			hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
1026 			return QDF_STATUS_E_NOMEM;
1027 		}
1028 
1029 		dp_verbose_debug("%pK: %d rx desc allocated", dp_soc,
1030 				 num_alloc_desc);
1031 		num_req_buffers = num_alloc_desc;
1032 	}
1033 
1034 
1035 	count = 0;
1036 
1037 	while (count < num_req_buffers) {
1038 		/* Flag is set while pdev rx_desc_pool initialization */
1039 		if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
1040 			ret = dp_pdev_frag_alloc_and_map(dp_soc,
1041 							 &nbuf_frag_info,
1042 							 dp_pdev,
1043 							 rx_desc_pool);
1044 		else
1045 			ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc,
1046 								   mac_id,
1047 					num_entries_avail, &nbuf_frag_info,
1048 					dp_pdev, rx_desc_pool);
1049 
1050 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
1051 			if (qdf_unlikely(ret  == QDF_STATUS_E_FAULT))
1052 				continue;
1053 			break;
1054 		}
1055 
1056 		count++;
1057 
1058 		rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
1059 							 rxdma_srng);
1060 		qdf_assert_always(rxdma_ring_entry);
1061 
1062 		next = (*desc_list)->next;
1063 
1064 		/* Flag is set while pdev rx_desc_pool initialization */
1065 		if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
1066 			dp_rx_desc_frag_prep(&((*desc_list)->rx_desc),
1067 					     &nbuf_frag_info);
1068 		else
1069 			dp_rx_desc_prep(&((*desc_list)->rx_desc),
1070 					&nbuf_frag_info);
1071 
1072 		/* rx_desc.in_use should be zero at this time*/
1073 		qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
1074 
1075 		(*desc_list)->rx_desc.in_use = 1;
1076 		(*desc_list)->rx_desc.in_err_state = 0;
1077 		dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc,
1078 					   func_name, RX_DESC_REPLENISHED);
1079 		dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d",
1080 				 nbuf_frag_info.virt_addr.nbuf,
1081 				 (unsigned long long)(nbuf_frag_info.paddr),
1082 				 (*desc_list)->rx_desc.cookie);
1083 
1084 		hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, rxdma_ring_entry,
1085 					     nbuf_frag_info.paddr,
1086 						(*desc_list)->rx_desc.cookie,
1087 						rx_desc_pool->owner);
1088 
1089 		*desc_list = next;
1090 
1091 	}
1092 
1093 	dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng,
1094 				       num_req_buffers, count);
1095 
1096 	hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
1097 
1098 	dp_rx_schedule_refill_thread(dp_soc);
1099 
1100 	dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
1101 			 count, num_desc_to_free);
1102 
1103 	/* No need to count the number of bytes received during replenish.
1104 	 * Therefore set replenish.pkts.bytes as 0.
1105 	 */
1106 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
1107 	DP_STATS_INC(dp_pdev, replenish.free_list, num_req_buffers - count);
1108 
1109 free_descs:
1110 	DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
1111 	/*
1112 	 * add any available free desc back to the free list
1113 	 */
1114 	if (*desc_list)
1115 		dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
1116 			mac_id, rx_desc_pool);
1117 
1118 	return QDF_STATUS_SUCCESS;
1119 }
1120 
1121 qdf_export_symbol(__dp_rx_buffers_replenish);
1122 
1123 void
1124 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
1125 		  struct dp_txrx_peer *txrx_peer, uint8_t link_id)
1126 {
1127 	qdf_nbuf_t deliver_list_head = NULL;
1128 	qdf_nbuf_t deliver_list_tail = NULL;
1129 	qdf_nbuf_t nbuf;
1130 
1131 	nbuf = nbuf_list;
1132 	while (nbuf) {
1133 		qdf_nbuf_t next = qdf_nbuf_next(nbuf);
1134 
1135 		DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
1136 
1137 		DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
1138 		DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.raw, 1,
1139 					      qdf_nbuf_len(nbuf), link_id);
1140 
1141 		nbuf = next;
1142 	}
1143 
1144 	vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
1145 				 &deliver_list_tail);
1146 
1147 	vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
1148 }
1149 
1150 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1151 #ifndef FEATURE_WDS
1152 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
1153 		    struct dp_txrx_peer *ta_peer, qdf_nbuf_t nbuf)
1154 {
1155 }
1156 #endif
1157 
1158 #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES
1159 /**
1160  * dp_classify_critical_pkts() - API for marking critical packets
1161  * @soc: dp_soc context
1162  * @vdev: vdev on which packet is to be sent
1163  * @nbuf: nbuf that has to be classified
1164  *
1165  * The function parses the packet, identifies whether its a critical frame and
1166  * marks QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL bit in qdf_nbuf_cb for the nbuf.
1167  * Code for marking which frames are CRITICAL is accessed via callback.
1168  * EAPOL, ARP, DHCP, DHCPv6, ICMPv6 NS/NA are the typical critical frames.
1169  *
1170  * Return: None
1171  */
1172 static
1173 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev,
1174 			       qdf_nbuf_t nbuf)
1175 {
1176 	if (vdev->tx_classify_critical_pkt_cb)
1177 		vdev->tx_classify_critical_pkt_cb(vdev->osif_vdev, nbuf);
1178 }
1179 #else
1180 static inline
1181 void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev,
1182 			       qdf_nbuf_t nbuf)
1183 {
1184 }
1185 #endif
1186 
1187 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
1188 static inline
1189 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id)
1190 {
1191 	qdf_nbuf_set_queue_mapping(nbuf, ring_id);
1192 }
1193 #else
1194 static inline
1195 void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id)
1196 {
1197 }
1198 #endif
1199 
1200 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
1201 			     uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
1202 			     struct cdp_tid_rx_stats *tid_stats,
1203 			     uint8_t link_id)
1204 {
1205 	uint16_t len;
1206 	qdf_nbuf_t nbuf_copy;
1207 
1208 	if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
1209 					    nbuf))
1210 		return true;
1211 
1212 	if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf, link_id))
1213 		return false;
1214 
1215 	/* If the source peer in the isolation list
1216 	 * then dont forward instead push to bridge stack
1217 	 */
1218 	if (dp_get_peer_isolation(ta_peer))
1219 		return false;
1220 
1221 	nbuf_copy = qdf_nbuf_copy(nbuf);
1222 	if (!nbuf_copy)
1223 		return false;
1224 
1225 	len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1226 
1227 	qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
1228 	dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy);
1229 
1230 	if (soc->arch_ops.dp_rx_intrabss_mcast_handler(soc, ta_peer,
1231 						       nbuf_copy,
1232 						       tid_stats,
1233 						       link_id))
1234 		return false;
1235 
1236 	/* Don't send packets if tx is paused */
1237 	if (!soc->is_tx_pause &&
1238 	    !dp_tx_send((struct cdp_soc_t *)soc,
1239 			ta_peer->vdev->vdev_id, nbuf_copy)) {
1240 		DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
1241 					      len, link_id);
1242 		tid_stats->intrabss_cnt++;
1243 	} else {
1244 		DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
1245 					      len, link_id);
1246 		tid_stats->fail_cnt[INTRABSS_DROP]++;
1247 		dp_rx_nbuf_free(nbuf_copy);
1248 	}
1249 	return false;
1250 }
1251 
1252 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
1253 			      uint8_t tx_vdev_id,
1254 			      uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
1255 			      struct cdp_tid_rx_stats *tid_stats,
1256 			      uint8_t link_id)
1257 {
1258 	uint16_t len;
1259 
1260 	len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1261 
1262 	/* linearize the nbuf just before we send to
1263 	 * dp_tx_send()
1264 	 */
1265 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
1266 		if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
1267 			return false;
1268 
1269 		nbuf = qdf_nbuf_unshare(nbuf);
1270 		if (!nbuf) {
1271 			DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer,
1272 						      rx.intra_bss.fail,
1273 						      1, len, link_id);
1274 			/* return true even though the pkt is
1275 			 * not forwarded. Basically skb_unshare
1276 			 * failed and we want to continue with
1277 			 * next nbuf.
1278 			 */
1279 			tid_stats->fail_cnt[INTRABSS_DROP]++;
1280 			return false;
1281 		}
1282 	}
1283 
1284 	qdf_mem_set(nbuf->cb, 0x0, sizeof(nbuf->cb));
1285 	dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf);
1286 
1287 	/* Don't send packets if tx is paused */
1288 	if (!soc->is_tx_pause && !dp_tx_send((struct cdp_soc_t *)soc,
1289 					     tx_vdev_id, nbuf)) {
1290 		DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
1291 					      len, link_id);
1292 	} else {
1293 		DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
1294 					      len, link_id);
1295 		tid_stats->fail_cnt[INTRABSS_DROP]++;
1296 		return false;
1297 	}
1298 
1299 	return true;
1300 }
1301 
1302 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1303 
1304 #ifdef MESH_MODE_SUPPORT
1305 
1306 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1307 			   uint8_t *rx_tlv_hdr,
1308 			   struct dp_txrx_peer *txrx_peer)
1309 {
1310 	struct mesh_recv_hdr_s *rx_info = NULL;
1311 	uint32_t pkt_type;
1312 	uint32_t nss;
1313 	uint32_t rate_mcs;
1314 	uint32_t bw;
1315 	uint8_t primary_chan_num;
1316 	uint32_t center_chan_freq;
1317 	struct dp_soc *soc = vdev->pdev->soc;
1318 	struct dp_peer *peer;
1319 	struct dp_peer *primary_link_peer;
1320 	struct dp_soc *link_peer_soc;
1321 	cdp_peer_stats_param_t buf = {0};
1322 
1323 	/* fill recv mesh stats */
1324 	rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
1325 
1326 	/* upper layers are responsible to free this memory */
1327 
1328 	if (!rx_info) {
1329 		dp_rx_err("%pK: Memory allocation failed for mesh rx stats",
1330 			  vdev->pdev->soc);
1331 		DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
1332 		return;
1333 	}
1334 
1335 	rx_info->rs_flags = MESH_RXHDR_VER1;
1336 	if (qdf_nbuf_is_rx_chfrag_start(nbuf))
1337 		rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
1338 
1339 	if (qdf_nbuf_is_rx_chfrag_end(nbuf))
1340 		rx_info->rs_flags |= MESH_RX_LAST_MSDU;
1341 
1342 	peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, DP_MOD_ID_MESH);
1343 	if (peer) {
1344 		if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) {
1345 			rx_info->rs_flags |= MESH_RX_DECRYPTED;
1346 			rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc,
1347 								  rx_tlv_hdr);
1348 			if (vdev->osif_get_key)
1349 				vdev->osif_get_key(vdev->osif_vdev,
1350 						   &rx_info->rs_decryptkey[0],
1351 						   &peer->mac_addr.raw[0],
1352 						   rx_info->rs_keyix);
1353 		}
1354 
1355 		dp_peer_unref_delete(peer, DP_MOD_ID_MESH);
1356 	}
1357 
1358 	primary_link_peer = dp_get_primary_link_peer_by_id(soc,
1359 							   txrx_peer->peer_id,
1360 							   DP_MOD_ID_MESH);
1361 
1362 	if (qdf_likely(primary_link_peer)) {
1363 		link_peer_soc = primary_link_peer->vdev->pdev->soc;
1364 		dp_monitor_peer_get_stats_param(link_peer_soc,
1365 						primary_link_peer,
1366 						cdp_peer_rx_snr, &buf);
1367 		rx_info->rs_snr = buf.rx_snr;
1368 		dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_MESH);
1369 	}
1370 
1371 	rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR;
1372 
1373 	soc = vdev->pdev->soc;
1374 	primary_chan_num = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr);
1375 	center_chan_freq = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr) >> 16;
1376 
1377 	if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) {
1378 		rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band(
1379 							soc->ctrl_psoc,
1380 							vdev->pdev->pdev_id,
1381 							center_chan_freq);
1382 	}
1383 	rx_info->rs_channel = primary_chan_num;
1384 	pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
1385 	rate_mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
1386 	bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr);
1387 	nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
1388 
1389 	/*
1390 	 * The MCS index does not start with 0 when NSS>1 in HT mode.
1391 	 * MCS params for optional 20/40MHz, NSS=1~3, EQM(NSS>1):
1392 	 * ------------------------------------------------------
1393 	 *         NSS     |   1   |   2    |    3    |    4
1394 	 * ------------------------------------------------------
1395 	 * MCS index: HT20 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31
1396 	 * ------------------------------------------------------
1397 	 * MCS index: HT40 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31
1398 	 * ------------------------------------------------------
1399 	 * Currently, the MAX_NSS=2. If NSS>2, MCS index = 8 * (NSS-1)
1400 	 */
1401 	if ((pkt_type == DOT11_N) && (nss == 2))
1402 		rate_mcs += 8;
1403 
1404 	rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
1405 				(bw << 24);
1406 
1407 	qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
1408 
1409 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
1410 		FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"),
1411 						rx_info->rs_flags,
1412 						rx_info->rs_rssi,
1413 						rx_info->rs_channel,
1414 						rx_info->rs_ratephy1,
1415 						rx_info->rs_keyix,
1416 						rx_info->rs_snr);
1417 
1418 }
1419 
1420 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1421 					uint8_t *rx_tlv_hdr)
1422 {
1423 	union dp_align_mac_addr mac_addr;
1424 	struct dp_soc *soc = vdev->pdev->soc;
1425 
1426 	if (qdf_unlikely(vdev->mesh_rx_filter)) {
1427 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
1428 			if (hal_rx_mpdu_get_fr_ds(soc->hal_soc,
1429 						  rx_tlv_hdr))
1430 				return  QDF_STATUS_SUCCESS;
1431 
1432 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
1433 			if (hal_rx_mpdu_get_to_ds(soc->hal_soc,
1434 						  rx_tlv_hdr))
1435 				return  QDF_STATUS_SUCCESS;
1436 
1437 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
1438 			if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc,
1439 						   rx_tlv_hdr) &&
1440 			    !hal_rx_mpdu_get_to_ds(soc->hal_soc,
1441 						   rx_tlv_hdr))
1442 				return  QDF_STATUS_SUCCESS;
1443 
1444 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
1445 			if (hal_rx_mpdu_get_addr1(soc->hal_soc,
1446 						  rx_tlv_hdr,
1447 					&mac_addr.raw[0]))
1448 				return QDF_STATUS_E_FAILURE;
1449 
1450 			if (!qdf_mem_cmp(&mac_addr.raw[0],
1451 					&vdev->mac_addr.raw[0],
1452 					QDF_MAC_ADDR_SIZE))
1453 				return  QDF_STATUS_SUCCESS;
1454 		}
1455 
1456 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
1457 			if (hal_rx_mpdu_get_addr2(soc->hal_soc,
1458 						  rx_tlv_hdr,
1459 						  &mac_addr.raw[0]))
1460 				return QDF_STATUS_E_FAILURE;
1461 
1462 			if (!qdf_mem_cmp(&mac_addr.raw[0],
1463 					&vdev->mac_addr.raw[0],
1464 					QDF_MAC_ADDR_SIZE))
1465 				return  QDF_STATUS_SUCCESS;
1466 		}
1467 	}
1468 
1469 	return QDF_STATUS_E_FAILURE;
1470 }
1471 
1472 #else
1473 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1474 				uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer)
1475 {
1476 }
1477 
1478 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1479 					uint8_t *rx_tlv_hdr)
1480 {
1481 	return QDF_STATUS_E_FAILURE;
1482 }
1483 
1484 #endif
1485 
1486 #ifdef RX_PEER_INVALID_ENH
1487 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
1488 				   uint8_t mac_id)
1489 {
1490 	struct dp_invalid_peer_msg msg;
1491 	struct dp_vdev *vdev = NULL;
1492 	struct dp_pdev *pdev = NULL;
1493 	struct ieee80211_frame *wh;
1494 	qdf_nbuf_t curr_nbuf, next_nbuf;
1495 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
1496 	uint8_t *rx_pkt_hdr = NULL;
1497 	int i = 0;
1498 	uint32_t nbuf_len;
1499 
1500 	if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) {
1501 		dp_rx_debug("%pK: Drop decapped frames", soc);
1502 		goto free;
1503 	}
1504 
1505 	/* In RAW packet, packet header will be part of data */
1506 	rx_pkt_hdr = rx_tlv_hdr + soc->rx_pkt_tlv_size;
1507 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
1508 
1509 	if (!DP_FRAME_IS_DATA(wh)) {
1510 		dp_rx_debug("%pK: NAWDS valid only for data frames", soc);
1511 		goto free;
1512 	}
1513 
1514 	nbuf_len = qdf_nbuf_len(mpdu);
1515 	if (nbuf_len < sizeof(struct ieee80211_frame)) {
1516 		dp_rx_err("%pK: Invalid nbuf length: %u", soc, nbuf_len);
1517 		goto free;
1518 	}
1519 
1520 	/* In DMAC case the rx_desc_pools are common across PDEVs
1521 	 * so PDEV cannot be derived from the pool_id.
1522 	 *
1523 	 * link_id need to derived from the TLV tag word which is
1524 	 * disabled by default. For now adding a WAR to get vdev
1525 	 * with brute force this need to fixed with word based subscription
1526 	 * support is added by enabling TLV tag word
1527 	 */
1528 	if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
1529 		for (i = 0; i < MAX_PDEV_CNT; i++) {
1530 			pdev = soc->pdev_list[i];
1531 
1532 			if (!pdev || qdf_unlikely(pdev->is_pdev_down))
1533 				continue;
1534 
1535 			TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
1536 				if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
1537 						QDF_MAC_ADDR_SIZE) == 0) {
1538 					goto out;
1539 				}
1540 			}
1541 		}
1542 	} else {
1543 		pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1544 
1545 		if (!pdev || qdf_unlikely(pdev->is_pdev_down)) {
1546 			dp_rx_err("%pK: PDEV %s",
1547 				  soc, !pdev ? "not found" : "down");
1548 			goto free;
1549 		}
1550 
1551 		if (dp_monitor_filter_neighbour_peer(pdev, rx_pkt_hdr) ==
1552 		    QDF_STATUS_SUCCESS)
1553 			return 0;
1554 
1555 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
1556 			if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
1557 					QDF_MAC_ADDR_SIZE) == 0) {
1558 				goto out;
1559 			}
1560 		}
1561 	}
1562 
1563 	if (!vdev) {
1564 		dp_rx_err("%pK: VDEV not found", soc);
1565 		goto free;
1566 	}
1567 out:
1568 	msg.wh = wh;
1569 	qdf_nbuf_pull_head(mpdu, soc->rx_pkt_tlv_size);
1570 	msg.nbuf = mpdu;
1571 	msg.vdev_id = vdev->vdev_id;
1572 
1573 	/*
1574 	 * NOTE: Only valid for HKv1.
1575 	 * If smart monitor mode is enabled on RE, we are getting invalid
1576 	 * peer frames with RA as STA mac of RE and the TA not matching
1577 	 * with any NAC list or the the BSSID.Such frames need to dropped
1578 	 * in order to avoid HM_WDS false addition.
1579 	 */
1580 	if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) {
1581 		if (dp_monitor_drop_inv_peer_pkts(vdev) == QDF_STATUS_SUCCESS) {
1582 			dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm",
1583 				   soc, wh->i_addr1);
1584 			goto free;
1585 		}
1586 		pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(
1587 				(struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc,
1588 				pdev->pdev_id, &msg);
1589 	}
1590 
1591 free:
1592 	/* Drop and free packet */
1593 	curr_nbuf = mpdu;
1594 	while (curr_nbuf) {
1595 		next_nbuf = qdf_nbuf_next(curr_nbuf);
1596 		dp_rx_nbuf_free(curr_nbuf);
1597 		curr_nbuf = next_nbuf;
1598 	}
1599 
1600 	return 0;
1601 }
1602 
1603 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
1604 					qdf_nbuf_t mpdu, bool mpdu_done,
1605 					uint8_t mac_id)
1606 {
1607 	/* Only trigger the process when mpdu is completed */
1608 	if (mpdu_done)
1609 		dp_rx_process_invalid_peer(soc, mpdu, mac_id);
1610 }
1611 #else
1612 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
1613 				   uint8_t mac_id)
1614 {
1615 	qdf_nbuf_t curr_nbuf, next_nbuf;
1616 	struct dp_pdev *pdev;
1617 	struct dp_vdev *vdev = NULL;
1618 	struct ieee80211_frame *wh;
1619 	struct dp_peer *peer = NULL;
1620 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
1621 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
1622 	uint32_t nbuf_len;
1623 
1624 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
1625 
1626 	if (!DP_FRAME_IS_DATA(wh)) {
1627 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP,
1628 				   "only for data frames");
1629 		goto free;
1630 	}
1631 
1632 	nbuf_len = qdf_nbuf_len(mpdu);
1633 	if (nbuf_len < sizeof(struct ieee80211_frame)) {
1634 		dp_rx_info_rl("%pK: Invalid nbuf length: %u", soc, nbuf_len);
1635 		goto free;
1636 	}
1637 
1638 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1639 	if (!pdev) {
1640 		dp_rx_info_rl("%pK: PDEV not found", soc);
1641 		goto free;
1642 	}
1643 
1644 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
1645 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1646 		if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
1647 				QDF_MAC_ADDR_SIZE) == 0) {
1648 			qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1649 			goto out;
1650 		}
1651 	}
1652 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1653 
1654 	if (!vdev) {
1655 		dp_rx_info_rl("%pK: VDEV not found", soc);
1656 		goto free;
1657 	}
1658 
1659 out:
1660 	if (vdev->opmode == wlan_op_mode_ap) {
1661 		peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0,
1662 					      vdev->vdev_id,
1663 					      DP_MOD_ID_RX_ERR);
1664 		/* If SA is a valid peer in vdev,
1665 		 * don't send disconnect
1666 		 */
1667 		if (peer) {
1668 			dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
1669 			DP_STATS_INC(soc, rx.err.decrypt_err_drop, 1);
1670 			dp_err_rl("invalid peer frame with correct SA/RA is freed");
1671 			goto free;
1672 		}
1673 	}
1674 
1675 	if (soc->cdp_soc.ol_ops->rx_invalid_peer)
1676 		soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh);
1677 free:
1678 
1679 	/* Drop and free packet */
1680 	curr_nbuf = mpdu;
1681 	while (curr_nbuf) {
1682 		next_nbuf = qdf_nbuf_next(curr_nbuf);
1683 		dp_rx_nbuf_free(curr_nbuf);
1684 		curr_nbuf = next_nbuf;
1685 	}
1686 
1687 	/* Reset the head and tail pointers */
1688 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1689 	if (pdev) {
1690 		pdev->invalid_peer_head_msdu = NULL;
1691 		pdev->invalid_peer_tail_msdu = NULL;
1692 	}
1693 
1694 	return 0;
1695 }
1696 
1697 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
1698 					qdf_nbuf_t mpdu, bool mpdu_done,
1699 					uint8_t mac_id)
1700 {
1701 	/* Process the nbuf */
1702 	dp_rx_process_invalid_peer(soc, mpdu, mac_id);
1703 }
1704 #endif
1705 
1706 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1707 
1708 #ifdef RECEIVE_OFFLOAD
1709 /**
1710  * dp_rx_print_offload_info() - Print offload info from RX TLV
1711  * @soc: dp soc handle
1712  * @msdu: MSDU for which the offload info is to be printed
1713  * @ofl_info: offload info saved in hal_offload_info structure
1714  *
1715  * Return: None
1716  */
1717 static void dp_rx_print_offload_info(struct dp_soc *soc,
1718 				     qdf_nbuf_t msdu,
1719 				     struct hal_offload_info *ofl_info)
1720 {
1721 	dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------");
1722 	dp_verbose_debug("lro_eligible 0x%x",
1723 			 QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu));
1724 	dp_verbose_debug("pure_ack 0x%x", QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu));
1725 	dp_verbose_debug("chksum 0x%x", QDF_NBUF_CB_RX_TCP_CHKSUM(msdu));
1726 	dp_verbose_debug("TCP seq num 0x%x", ofl_info->tcp_seq_num);
1727 	dp_verbose_debug("TCP ack num 0x%x", ofl_info->tcp_ack_num);
1728 	dp_verbose_debug("TCP window 0x%x", QDF_NBUF_CB_RX_TCP_WIN(msdu));
1729 	dp_verbose_debug("TCP protocol 0x%x", QDF_NBUF_CB_RX_TCP_PROTO(msdu));
1730 	dp_verbose_debug("TCP offset 0x%x", QDF_NBUF_CB_RX_TCP_OFFSET(msdu));
1731 	dp_verbose_debug("toeplitz 0x%x", QDF_NBUF_CB_RX_FLOW_ID(msdu));
1732 	dp_verbose_debug("---------------------------------------------------------");
1733 }
1734 
1735 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
1736 			 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
1737 {
1738 	struct hal_offload_info offload_info;
1739 
1740 	if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
1741 		return;
1742 
1743 	if (hal_rx_tlv_get_offload_info(soc->hal_soc, rx_tlv, &offload_info))
1744 		return;
1745 
1746 	*rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1;
1747 
1748 	QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = offload_info.lro_eligible;
1749 	QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = offload_info.tcp_pure_ack;
1750 	QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
1751 			hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
1752 						  rx_tlv);
1753 	QDF_NBUF_CB_RX_TCP_WIN(msdu) = offload_info.tcp_win;
1754 	QDF_NBUF_CB_RX_TCP_PROTO(msdu) = offload_info.tcp_proto;
1755 	QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = offload_info.ipv6_proto;
1756 	QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = offload_info.tcp_offset;
1757 	QDF_NBUF_CB_RX_FLOW_ID(msdu) = offload_info.flow_id;
1758 
1759 	dp_rx_print_offload_info(soc, msdu, &offload_info);
1760 }
1761 #endif /* RECEIVE_OFFLOAD */
1762 
1763 /**
1764  * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
1765  *
1766  * @soc: DP soc handle
1767  * @nbuf: pointer to msdu.
1768  * @mpdu_len: mpdu length
1769  * @l3_pad_len: L3 padding length by HW
1770  *
1771  * Return: returns true if nbuf is last msdu of mpdu else returns false.
1772  */
1773 static inline bool dp_rx_adjust_nbuf_len(struct dp_soc *soc,
1774 					 qdf_nbuf_t nbuf,
1775 					 uint16_t *mpdu_len,
1776 					 uint32_t l3_pad_len)
1777 {
1778 	bool last_nbuf;
1779 	uint32_t pkt_hdr_size;
1780 	uint16_t buf_size;
1781 
1782 	buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx);
1783 
1784 	pkt_hdr_size = soc->rx_pkt_tlv_size + l3_pad_len;
1785 
1786 	if ((*mpdu_len + pkt_hdr_size) > buf_size) {
1787 		qdf_nbuf_set_pktlen(nbuf, buf_size);
1788 		last_nbuf = false;
1789 		*mpdu_len -= (buf_size - pkt_hdr_size);
1790 	} else {
1791 		qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + pkt_hdr_size));
1792 		last_nbuf = true;
1793 		*mpdu_len = 0;
1794 	}
1795 
1796 	return last_nbuf;
1797 }
1798 
1799 /**
1800  * dp_get_l3_hdr_pad_len() - get L3 header padding length.
1801  *
1802  * @soc: DP soc handle
1803  * @nbuf: pointer to msdu.
1804  *
1805  * Return: returns padding length in bytes.
1806  */
1807 static inline uint32_t dp_get_l3_hdr_pad_len(struct dp_soc *soc,
1808 					     qdf_nbuf_t nbuf)
1809 {
1810 	uint32_t l3_hdr_pad = 0;
1811 	uint8_t *rx_tlv_hdr;
1812 	struct hal_rx_msdu_metadata msdu_metadata;
1813 
1814 	while (nbuf) {
1815 		if (!qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
1816 			/* scattered msdu end with continuation is 0 */
1817 			rx_tlv_hdr = qdf_nbuf_data(nbuf);
1818 			hal_rx_msdu_metadata_get(soc->hal_soc,
1819 						 rx_tlv_hdr,
1820 						 &msdu_metadata);
1821 			l3_hdr_pad = msdu_metadata.l3_hdr_pad;
1822 			break;
1823 		}
1824 		nbuf = nbuf->next;
1825 	}
1826 
1827 	return l3_hdr_pad;
1828 }
1829 
1830 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf)
1831 {
1832 	qdf_nbuf_t parent, frag_list, next = NULL;
1833 	uint16_t frag_list_len = 0;
1834 	uint16_t mpdu_len;
1835 	bool last_nbuf;
1836 	uint32_t l3_hdr_pad_offset = 0;
1837 
1838 	/*
1839 	 * Use msdu len got from REO entry descriptor instead since
1840 	 * there is case the RX PKT TLV is corrupted while msdu_len
1841 	 * from REO descriptor is right for non-raw RX scatter msdu.
1842 	 */
1843 	mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1844 
1845 	/*
1846 	 * this is a case where the complete msdu fits in one single nbuf.
1847 	 * in this case HW sets both start and end bit and we only need to
1848 	 * reset these bits for RAW mode simulator to decap the pkt
1849 	 */
1850 	if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
1851 					qdf_nbuf_is_rx_chfrag_end(nbuf)) {
1852 		qdf_nbuf_set_pktlen(nbuf, mpdu_len + soc->rx_pkt_tlv_size);
1853 		qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
1854 		return nbuf;
1855 	}
1856 
1857 	l3_hdr_pad_offset = dp_get_l3_hdr_pad_len(soc, nbuf);
1858 	/*
1859 	 * This is a case where we have multiple msdus (A-MSDU) spread across
1860 	 * multiple nbufs. here we create a fraglist out of these nbufs.
1861 	 *
1862 	 * the moment we encounter a nbuf with continuation bit set we
1863 	 * know for sure we have an MSDU which is spread across multiple
1864 	 * nbufs. We loop through and reap nbufs till we reach last nbuf.
1865 	 */
1866 	parent = nbuf;
1867 	frag_list = nbuf->next;
1868 	nbuf = nbuf->next;
1869 
1870 	/*
1871 	 * set the start bit in the first nbuf we encounter with continuation
1872 	 * bit set. This has the proper mpdu length set as it is the first
1873 	 * msdu of the mpdu. this becomes the parent nbuf and the subsequent
1874 	 * nbufs will form the frag_list of the parent nbuf.
1875 	 */
1876 	qdf_nbuf_set_rx_chfrag_start(parent, 1);
1877 	/*
1878 	 * L3 header padding is only needed for the 1st buffer
1879 	 * in a scattered msdu
1880 	 */
1881 	last_nbuf = dp_rx_adjust_nbuf_len(soc, parent, &mpdu_len,
1882 					  l3_hdr_pad_offset);
1883 
1884 	/*
1885 	 * MSDU cont bit is set but reported MPDU length can fit
1886 	 * in to single buffer
1887 	 *
1888 	 * Increment error stats and avoid SG list creation
1889 	 */
1890 	if (last_nbuf) {
1891 		DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1);
1892 		qdf_nbuf_pull_head(parent,
1893 				   soc->rx_pkt_tlv_size + l3_hdr_pad_offset);
1894 		return parent;
1895 	}
1896 
1897 	/*
1898 	 * this is where we set the length of the fragments which are
1899 	 * associated to the parent nbuf. We iterate through the frag_list
1900 	 * till we hit the last_nbuf of the list.
1901 	 */
1902 	do {
1903 		last_nbuf = dp_rx_adjust_nbuf_len(soc, nbuf, &mpdu_len, 0);
1904 		qdf_nbuf_pull_head(nbuf,
1905 				   soc->rx_pkt_tlv_size);
1906 		frag_list_len += qdf_nbuf_len(nbuf);
1907 
1908 		if (last_nbuf) {
1909 			next = nbuf->next;
1910 			nbuf->next = NULL;
1911 			break;
1912 		} else if (qdf_nbuf_is_rx_chfrag_end(nbuf)) {
1913 			dp_err("Invalid packet length");
1914 			qdf_assert_always(0);
1915 		}
1916 		nbuf = nbuf->next;
1917 	} while (!last_nbuf);
1918 
1919 	qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
1920 	qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
1921 	parent->next = next;
1922 
1923 	qdf_nbuf_pull_head(parent,
1924 			   soc->rx_pkt_tlv_size + l3_hdr_pad_offset);
1925 	return parent;
1926 }
1927 
1928 #ifdef DP_RX_SG_FRAME_SUPPORT
1929 bool dp_rx_is_sg_supported(void)
1930 {
1931 	return true;
1932 }
1933 #else
1934 bool dp_rx_is_sg_supported(void)
1935 {
1936 	return false;
1937 }
1938 #endif
1939 
1940 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1941 
1942 #ifdef QCA_PEER_EXT_STATS
1943 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
1944 			     qdf_nbuf_t nbuf)
1945 {
1946 	struct cdp_delay_rx_stats  *rx_delay = &stats->rx_delay;
1947 	uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
1948 
1949 	dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack);
1950 }
1951 #endif /* QCA_PEER_EXT_STATS */
1952 
1953 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1954 {
1955 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
1956 	int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get());
1957 	uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
1958 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
1959 	uint32_t interframe_delay =
1960 		(uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp);
1961 	struct cdp_tid_rx_stats *rstats =
1962 		&vdev->pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
1963 
1964 	dp_update_delay_stats(NULL, rstats, to_stack, tid,
1965 			      CDP_DELAY_STATS_REAP_STACK, ring_id, false);
1966 	/*
1967 	 * Update interframe delay stats calculated at deliver_data_ol point.
1968 	 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so
1969 	 * interframe delay will not be calculate correctly for 1st frame.
1970 	 * On the other side, this will help in avoiding extra per packet check
1971 	 * of vdev->prev_rx_deliver_tstamp.
1972 	 */
1973 	dp_update_delay_stats(NULL, rstats, interframe_delay, tid,
1974 			      CDP_DELAY_STATS_RX_INTERFRAME, ring_id, false);
1975 	vdev->prev_rx_deliver_tstamp = current_ts;
1976 }
1977 
1978 /**
1979  * dp_rx_drop_nbuf_list() - drop an nbuf list
1980  * @pdev: dp pdev reference
1981  * @buf_list: buffer list to be dropepd
1982  *
1983  * Return: int (number of bufs dropped)
1984  */
1985 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
1986 				       qdf_nbuf_t buf_list)
1987 {
1988 	struct cdp_tid_rx_stats *stats = NULL;
1989 	uint8_t tid = 0, ring_id = 0;
1990 	int num_dropped = 0;
1991 	qdf_nbuf_t buf, next_buf;
1992 
1993 	buf = buf_list;
1994 	while (buf) {
1995 		ring_id = QDF_NBUF_CB_RX_CTX_ID(buf);
1996 		next_buf = qdf_nbuf_queue_next(buf);
1997 		tid = qdf_nbuf_get_tid_val(buf);
1998 		if (qdf_likely(pdev)) {
1999 			stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
2000 			stats->fail_cnt[INVALID_PEER_VDEV]++;
2001 			stats->delivered_to_stack--;
2002 		}
2003 		dp_rx_nbuf_free(buf);
2004 		buf = next_buf;
2005 		num_dropped++;
2006 	}
2007 
2008 	return num_dropped;
2009 }
2010 
2011 #ifdef QCA_SUPPORT_WDS_EXTENDED
2012 /**
2013  * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta
2014  * @soc: core txrx main context
2015  * @vdev: vdev
2016  * @txrx_peer: txrx peer
2017  * @nbuf_head: skb list head
2018  *
2019  * Return: true if packet is delivered to netdev per STA.
2020  */
2021 bool
2022 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
2023 			   struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head)
2024 {
2025 	/*
2026 	 * When extended WDS is disabled, frames are sent to AP netdevice.
2027 	 */
2028 	if (qdf_likely(!vdev->wds_ext_enabled))
2029 		return false;
2030 
2031 	/*
2032 	 * There can be 2 cases:
2033 	 * 1. Send frame to parent netdev if its not for netdev per STA
2034 	 * 2. If frame is meant for netdev per STA:
2035 	 *    a. Send frame to appropriate netdev using registered fp.
2036 	 *    b. If fp is NULL, drop the frames.
2037 	 */
2038 	if (!txrx_peer->wds_ext.init)
2039 		return false;
2040 
2041 	if (txrx_peer->osif_rx)
2042 		txrx_peer->osif_rx(txrx_peer->wds_ext.osif_peer, nbuf_head);
2043 	else
2044 		dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
2045 
2046 	return true;
2047 }
2048 
2049 #else
2050 static inline bool
2051 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
2052 			   struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head)
2053 {
2054 	return false;
2055 }
2056 #endif
2057 
2058 #ifdef PEER_CACHE_RX_PKTS
2059 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
2060 /**
2061  * dp_set_nbuf_band() - Set band in nbuf cb
2062  * @peer: dp_peer
2063  * @nbuf: nbuf
2064  *
2065  * Return: None
2066  */
2067 static inline void
2068 dp_set_nbuf_band(struct dp_peer *peer, qdf_nbuf_t nbuf)
2069 {
2070 	uint8_t link_id = 0;
2071 
2072 	link_id = dp_rx_get_stats_arr_idx_from_link_id(nbuf, peer->txrx_peer);
2073 	dp_rx_set_nbuf_band(nbuf, peer->txrx_peer, link_id);
2074 }
2075 #else
2076 static inline void
2077 dp_set_nbuf_band(struct dp_peer *peer, qdf_nbuf_t nbuf)
2078 {
2079 }
2080 #endif
2081 
2082 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
2083 {
2084 	struct dp_peer_cached_bufq *bufqi;
2085 	struct dp_rx_cached_buf *cache_buf = NULL;
2086 	ol_txrx_rx_fp data_rx = NULL;
2087 	int num_buff_elem;
2088 	QDF_STATUS status;
2089 
2090 	/*
2091 	 * Flush dp cached frames only for mld peers and legacy peers, as
2092 	 * link peers don't store cached frames
2093 	 */
2094 	if (IS_MLO_DP_LINK_PEER(peer))
2095 		return;
2096 
2097 	if (!peer->txrx_peer) {
2098 		dp_err("txrx_peer NULL!! peer mac_addr("QDF_MAC_ADDR_FMT")",
2099 			QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2100 		return;
2101 	}
2102 
2103 	if (qdf_atomic_inc_return(&peer->txrx_peer->flush_in_progress) > 1) {
2104 		qdf_atomic_dec(&peer->txrx_peer->flush_in_progress);
2105 		return;
2106 	}
2107 
2108 	qdf_spin_lock_bh(&peer->peer_info_lock);
2109 	if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx)
2110 		data_rx = peer->vdev->osif_rx;
2111 	else
2112 		drop = true;
2113 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2114 
2115 	bufqi = &peer->txrx_peer->bufq_info;
2116 
2117 	qdf_spin_lock_bh(&bufqi->bufq_lock);
2118 	qdf_list_remove_front(&bufqi->cached_bufq,
2119 			      (qdf_list_node_t **)&cache_buf);
2120 	while (cache_buf) {
2121 		num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(
2122 								cache_buf->buf);
2123 		bufqi->entries -= num_buff_elem;
2124 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
2125 		if (drop) {
2126 			bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
2127 							      cache_buf->buf);
2128 		} else {
2129 			dp_set_nbuf_band(peer, cache_buf->buf);
2130 			/* Flush the cached frames to OSIF DEV */
2131 			status = data_rx(peer->vdev->osif_vdev, cache_buf->buf);
2132 			if (status != QDF_STATUS_SUCCESS)
2133 				bufqi->dropped = dp_rx_drop_nbuf_list(
2134 							peer->vdev->pdev,
2135 							cache_buf->buf);
2136 		}
2137 		qdf_mem_free(cache_buf);
2138 		cache_buf = NULL;
2139 		qdf_spin_lock_bh(&bufqi->bufq_lock);
2140 		qdf_list_remove_front(&bufqi->cached_bufq,
2141 				      (qdf_list_node_t **)&cache_buf);
2142 	}
2143 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
2144 	qdf_atomic_dec(&peer->txrx_peer->flush_in_progress);
2145 }
2146 
2147 /**
2148  * dp_rx_enqueue_rx() - cache rx frames
2149  * @peer: peer
2150  * @txrx_peer: DP txrx_peer
2151  * @rx_buf_list: cache buffer list
2152  *
2153  * Return: None
2154  */
2155 static QDF_STATUS
2156 dp_rx_enqueue_rx(struct dp_peer *peer,
2157 		 struct dp_txrx_peer *txrx_peer,
2158 		 qdf_nbuf_t rx_buf_list)
2159 {
2160 	struct dp_rx_cached_buf *cache_buf;
2161 	struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info;
2162 	int num_buff_elem;
2163 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
2164 	struct dp_soc *soc = txrx_peer->vdev->pdev->soc;
2165 	struct dp_peer *ta_peer = NULL;
2166 
2167 	/*
2168 	 * If peer id is invalid which likely peer map has not completed,
2169 	 * then need caller provide dp_peer pointer, else it's ok to use
2170 	 * txrx_peer->peer_id to get dp_peer.
2171 	 */
2172 	if (peer) {
2173 		if (QDF_STATUS_SUCCESS ==
2174 		    dp_peer_get_ref(soc, peer, DP_MOD_ID_RX))
2175 			ta_peer = peer;
2176 	} else {
2177 		ta_peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
2178 						DP_MOD_ID_RX);
2179 	}
2180 
2181 	if (!ta_peer) {
2182 		bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
2183 						      rx_buf_list);
2184 		return QDF_STATUS_E_INVAL;
2185 	}
2186 
2187 	dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries,
2188 		    bufqi->dropped);
2189 	if (!ta_peer->valid) {
2190 		bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
2191 						      rx_buf_list);
2192 		ret = QDF_STATUS_E_INVAL;
2193 		goto fail;
2194 	}
2195 
2196 	qdf_spin_lock_bh(&bufqi->bufq_lock);
2197 	if (bufqi->entries >= bufqi->thresh) {
2198 		bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
2199 						      rx_buf_list);
2200 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
2201 		ret = QDF_STATUS_E_RESOURCES;
2202 		goto fail;
2203 	}
2204 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
2205 
2206 	num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list);
2207 
2208 	cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf));
2209 	if (!cache_buf) {
2210 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2211 			  "Failed to allocate buf to cache rx frames");
2212 		bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
2213 						      rx_buf_list);
2214 		ret = QDF_STATUS_E_NOMEM;
2215 		goto fail;
2216 	}
2217 
2218 	cache_buf->buf = rx_buf_list;
2219 
2220 	qdf_spin_lock_bh(&bufqi->bufq_lock);
2221 	qdf_list_insert_back(&bufqi->cached_bufq,
2222 			     &cache_buf->node);
2223 	bufqi->entries += num_buff_elem;
2224 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
2225 
2226 fail:
2227 	dp_peer_unref_delete(ta_peer, DP_MOD_ID_RX);
2228 	return ret;
2229 }
2230 
2231 static inline
2232 bool dp_rx_is_peer_cache_bufq_supported(void)
2233 {
2234 	return true;
2235 }
2236 #else
2237 static inline
2238 bool dp_rx_is_peer_cache_bufq_supported(void)
2239 {
2240 	return false;
2241 }
2242 
2243 static inline QDF_STATUS
2244 dp_rx_enqueue_rx(struct dp_peer *peer,
2245 		 struct dp_txrx_peer *txrx_peer,
2246 		 qdf_nbuf_t rx_buf_list)
2247 {
2248 	return QDF_STATUS_SUCCESS;
2249 }
2250 #endif
2251 
2252 #ifndef DELIVERY_TO_STACK_STATUS_CHECK
2253 /**
2254  * dp_rx_check_delivery_to_stack() - Deliver pkts to network
2255  * using the appropriate call back functions.
2256  * @soc: soc
2257  * @vdev: vdev
2258  * @txrx_peer: peer
2259  * @nbuf_head: skb list head
2260  *
2261  * Return: None
2262  */
2263 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
2264 					  struct dp_vdev *vdev,
2265 					  struct dp_txrx_peer *txrx_peer,
2266 					  qdf_nbuf_t nbuf_head)
2267 {
2268 	if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev,
2269 						    txrx_peer, nbuf_head)))
2270 		return;
2271 
2272 	/* Function pointer initialized only when FISA is enabled */
2273 	if (vdev->osif_fisa_rx)
2274 		/* on failure send it via regular path */
2275 		vdev->osif_fisa_rx(soc, vdev, nbuf_head);
2276 	else
2277 		vdev->osif_rx(vdev->osif_vdev, nbuf_head);
2278 }
2279 
2280 #else
2281 /**
2282  * dp_rx_check_delivery_to_stack() - Deliver pkts to network
2283  * using the appropriate call back functions.
2284  * @soc: soc
2285  * @vdev: vdev
2286  * @txrx_peer: txrx peer
2287  * @nbuf_head: skb list head
2288  *
2289  * Check the return status of the call back function and drop
2290  * the packets if the return status indicates a failure.
2291  *
2292  * Return: None
2293  */
2294 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
2295 					  struct dp_vdev *vdev,
2296 					  struct dp_txrx_peer *txrx_peer,
2297 					  qdf_nbuf_t nbuf_head)
2298 {
2299 	int num_nbuf = 0;
2300 	QDF_STATUS ret_val = QDF_STATUS_E_FAILURE;
2301 
2302 	/* Function pointer initialized only when FISA is enabled */
2303 	if (vdev->osif_fisa_rx)
2304 		/* on failure send it via regular path */
2305 		ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head);
2306 	else if (vdev->osif_rx)
2307 		ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head);
2308 
2309 	if (!QDF_IS_STATUS_SUCCESS(ret_val)) {
2310 		num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
2311 		DP_STATS_INC(soc, rx.err.rejected, num_nbuf);
2312 		if (txrx_peer)
2313 			DP_PEER_STATS_FLAT_DEC(txrx_peer, to_stack.num,
2314 					       num_nbuf);
2315 	}
2316 }
2317 #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */
2318 
2319 /**
2320  * dp_rx_validate_rx_callbacks() - validate rx callbacks
2321  * @soc: DP soc
2322  * @vdev: DP vdev handle
2323  * @txrx_peer: pointer to the txrx peer object
2324  * @nbuf_head: skb list head
2325  *
2326  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
2327  *			QDF_STATUS_E_FAILURE
2328  */
2329 static inline QDF_STATUS
2330 dp_rx_validate_rx_callbacks(struct dp_soc *soc,
2331 			    struct dp_vdev *vdev,
2332 			    struct dp_txrx_peer *txrx_peer,
2333 			    qdf_nbuf_t nbuf_head)
2334 {
2335 	int num_nbuf;
2336 
2337 	if (qdf_unlikely(!vdev || vdev->delete.pending)) {
2338 		num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head);
2339 		/*
2340 		 * This is a special case where vdev is invalid,
2341 		 * so we cannot know the pdev to which this packet
2342 		 * belonged. Hence we update the soc rx error stats.
2343 		 */
2344 		DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf);
2345 		return QDF_STATUS_E_FAILURE;
2346 	}
2347 
2348 	/*
2349 	 * highly unlikely to have a vdev without a registered rx
2350 	 * callback function. if so let us free the nbuf_list.
2351 	 */
2352 	if (qdf_unlikely(!vdev->osif_rx)) {
2353 		if (txrx_peer && dp_rx_is_peer_cache_bufq_supported()) {
2354 			dp_rx_enqueue_rx(NULL, txrx_peer, nbuf_head);
2355 		} else {
2356 			num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev,
2357 							nbuf_head);
2358 			DP_PEER_TO_STACK_DECC(txrx_peer, num_nbuf,
2359 					      vdev->pdev->enhanced_stats_en);
2360 		}
2361 		return QDF_STATUS_E_FAILURE;
2362 	}
2363 
2364 	return QDF_STATUS_SUCCESS;
2365 }
2366 
2367 #if defined(WLAN_FEATURE_11BE_MLO) && defined(RAW_PKT_MLD_ADDR_CONVERSION)
2368 static void dp_rx_raw_pkt_mld_addr_conv(struct dp_soc *soc,
2369 					struct dp_vdev *vdev,
2370 					struct dp_txrx_peer *txrx_peer,
2371 					qdf_nbuf_t nbuf_head)
2372 {
2373 	qdf_nbuf_t nbuf, next;
2374 	struct dp_peer *peer = NULL;
2375 	struct ieee80211_frame *wh = NULL;
2376 
2377 	if (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)
2378 		return;
2379 
2380 	peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
2381 				     DP_MOD_ID_RX);
2382 
2383 	if (!peer)
2384 		return;
2385 
2386 	if (!IS_MLO_DP_MLD_PEER(peer)) {
2387 		dp_peer_unref_delete(peer, DP_MOD_ID_RX);
2388 		return;
2389 	}
2390 
2391 	nbuf = nbuf_head;
2392 	while (nbuf) {
2393 		next = nbuf->next;
2394 		wh = (struct ieee80211_frame *)qdf_nbuf_data(nbuf);
2395 		qdf_mem_copy(wh->i_addr1, vdev->mld_mac_addr.raw,
2396 			     QDF_MAC_ADDR_SIZE);
2397 		qdf_mem_copy(wh->i_addr2, peer->mac_addr.raw,
2398 			     QDF_MAC_ADDR_SIZE);
2399 		nbuf = next;
2400 	}
2401 
2402 	dp_peer_unref_delete(peer, DP_MOD_ID_RX);
2403 }
2404 #else
2405 static inline
2406 void dp_rx_raw_pkt_mld_addr_conv(struct dp_soc *soc,
2407 				 struct dp_vdev *vdev,
2408 				 struct dp_txrx_peer *txrx_peer,
2409 				 qdf_nbuf_t nbuf_head)
2410 { }
2411 #endif
2412 
2413 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc,
2414 				  struct dp_vdev *vdev,
2415 				  struct dp_txrx_peer *txrx_peer,
2416 				  qdf_nbuf_t nbuf_head,
2417 				  qdf_nbuf_t nbuf_tail)
2418 {
2419 	if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) !=
2420 					QDF_STATUS_SUCCESS)
2421 		return QDF_STATUS_E_FAILURE;
2422 
2423 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
2424 			(vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
2425 		dp_rx_raw_pkt_mld_addr_conv(soc, vdev, txrx_peer, nbuf_head);
2426 		vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
2427 					 &nbuf_tail);
2428 	}
2429 
2430 	dp_rx_check_delivery_to_stack(soc, vdev, txrx_peer, nbuf_head);
2431 
2432 	return QDF_STATUS_SUCCESS;
2433 }
2434 
2435 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
2436 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
2437 					struct dp_vdev *vdev,
2438 					struct dp_txrx_peer *txrx_peer,
2439 					qdf_nbuf_t nbuf_head,
2440 					qdf_nbuf_t nbuf_tail)
2441 {
2442 	if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) !=
2443 					QDF_STATUS_SUCCESS)
2444 		return QDF_STATUS_E_FAILURE;
2445 
2446 	vdev->osif_rx_eapol(vdev->osif_vdev, nbuf_head);
2447 
2448 	return QDF_STATUS_SUCCESS;
2449 }
2450 #endif
2451 
2452 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2453 #ifdef VDEV_PEER_PROTOCOL_COUNT
2454 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) \
2455 { \
2456 	qdf_nbuf_t nbuf_local; \
2457 	struct dp_txrx_peer *txrx_peer_local; \
2458 	struct dp_vdev *vdev_local = vdev_hdl; \
2459 	do { \
2460 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
2461 			break; \
2462 		nbuf_local = nbuf; \
2463 		txrx_peer_local = txrx_peer; \
2464 		if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \
2465 			break; \
2466 		else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \
2467 			break; \
2468 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
2469 						       (nbuf_local), \
2470 						       (txrx_peer_local), 0, 1); \
2471 	} while (0); \
2472 }
2473 #else
2474 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer)
2475 #endif
2476 
2477 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
2478 /**
2479  * dp_rx_rates_stats_update() - update rate stats
2480  * from rx msdu.
2481  * @soc: datapath soc handle
2482  * @nbuf: received msdu buffer
2483  * @rx_tlv_hdr: rx tlv header
2484  * @txrx_peer: datapath txrx_peer handle
2485  * @sgi: Short Guard Interval
2486  * @mcs: Modulation and Coding Set
2487  * @nss: Number of Spatial Streams
2488  * @bw: BandWidth
2489  * @pkt_type: Corresponds to preamble
2490  * @link_id: Link Id on which packet is received
2491  *
2492  * To be precisely record rates, following factors are considered:
2493  * Exclude specific frames, ARP, DHCP, ssdp, etc.
2494  * Make sure to affect rx throughput as least as possible.
2495  *
2496  * Return: void
2497  */
2498 static void
2499 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
2500 			 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
2501 			 uint32_t sgi, uint32_t mcs,
2502 			 uint32_t nss, uint32_t bw, uint32_t pkt_type,
2503 			 uint8_t link_id)
2504 {
2505 	uint32_t rix;
2506 	uint16_t ratecode;
2507 	uint32_t avg_rx_rate;
2508 	uint32_t ratekbps;
2509 	enum cdp_punctured_modes punc_mode = NO_PUNCTURE;
2510 
2511 	if (soc->high_throughput ||
2512 	    dp_rx_data_is_specific(soc->hal_soc, rx_tlv_hdr, nbuf)) {
2513 		return;
2514 	}
2515 
2516 	DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs, link_id);
2517 
2518 	/* In 11b mode, the nss we get from tlv is 0, invalid and should be 1 */
2519 	if (qdf_unlikely(pkt_type == DOT11_B))
2520 		nss = 1;
2521 
2522 	/* here pkt_type corresponds to preamble */
2523 	ratekbps = dp_getrateindex(sgi,
2524 				   mcs,
2525 				   nss - 1,
2526 				   pkt_type,
2527 				   bw,
2528 				   punc_mode,
2529 				   &rix,
2530 				   &ratecode);
2531 	DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps, link_id);
2532 	avg_rx_rate =
2533 		dp_ath_rate_lpf(
2534 			txrx_peer->stats[link_id].extd_stats.rx.avg_rx_rate,
2535 			ratekbps);
2536 	DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate, link_id);
2537 	DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.nss_info, nss, link_id);
2538 	DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.mcs_info, mcs, link_id);
2539 	DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.bw_info, bw, link_id);
2540 	DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.gi_info, sgi, link_id);
2541 	DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.preamble_info, pkt_type, link_id);
2542 }
2543 #else
2544 static inline void
2545 dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
2546 			 uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
2547 			 uint32_t sgi, uint32_t mcs,
2548 			 uint32_t nss, uint32_t bw, uint32_t pkt_type,
2549 			 uint8_t link_id)
2550 {
2551 }
2552 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */
2553 
2554 #ifndef QCA_ENHANCED_STATS_SUPPORT
2555 /**
2556  * dp_rx_msdu_extd_stats_update(): Update Rx extended path stats for peer
2557  *
2558  * @soc: datapath soc handle
2559  * @nbuf: received msdu buffer
2560  * @rx_tlv_hdr: rx tlv header
2561  * @txrx_peer: datapath txrx_peer handle
2562  * @link_id: link id on which the packet is received
2563  *
2564  * Return: void
2565  */
2566 static inline
2567 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
2568 				  uint8_t *rx_tlv_hdr,
2569 				  struct dp_txrx_peer *txrx_peer,
2570 				  uint8_t link_id)
2571 {
2572 	bool is_ampdu;
2573 	uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
2574 	uint8_t dst_mcs_idx;
2575 
2576 	/*
2577 	 * TODO - For KIWI this field is present in ring_desc
2578 	 * Try to use ring desc instead of tlv.
2579 	 */
2580 	is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr);
2581 	DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu, link_id);
2582 	DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu),
2583 				link_id);
2584 
2585 	sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr);
2586 	mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
2587 	tid = qdf_nbuf_get_tid_val(nbuf);
2588 	bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr);
2589 	reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
2590 							      rx_tlv_hdr);
2591 	nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
2592 	pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
2593 	/* do HW to SW pkt type conversion */
2594 	pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
2595 		    hal_2_dp_pkt_type_map[pkt_type]);
2596 
2597 	/*
2598 	 * The MCS index does not start with 0 when NSS>1 in HT mode.
2599 	 * MCS params for optional 20/40MHz, NSS=1~3, EQM(NSS>1):
2600 	 * ------------------------------------------------------
2601 	 *         NSS     |   1   |   2    |    3    |    4
2602 	 * ------------------------------------------------------
2603 	 * MCS index: HT20 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31
2604 	 * ------------------------------------------------------
2605 	 * MCS index: HT40 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31
2606 	 * ------------------------------------------------------
2607 	 * Currently, the MAX_NSS=2. If NSS>2, MCS index = 8 * (NSS-1)
2608 	 */
2609 	if ((pkt_type == DOT11_N) && (nss == 2))
2610 		mcs += 8;
2611 
2612 	DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1,
2613 		      ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)),
2614 		      link_id);
2615 	DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1,
2616 		      ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)),
2617 		      link_id);
2618 	DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1, link_id);
2619 	/*
2620 	 * only if nss > 0 and pkt_type is 11N/AC/AX,
2621 	 * then increase index [nss - 1] in array counter.
2622 	 */
2623 	if (nss > 0 && CDP_IS_PKT_TYPE_SUPPORT_NSS(pkt_type))
2624 		DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1, link_id);
2625 
2626 	DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1, link_id);
2627 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1,
2628 				   hal_rx_tlv_mic_err_get(soc->hal_soc,
2629 				   rx_tlv_hdr), link_id);
2630 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1,
2631 				   hal_rx_tlv_decrypt_err_get(soc->hal_soc,
2632 				   rx_tlv_hdr), link_id);
2633 
2634 	DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1,
2635 			       link_id);
2636 	DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1,
2637 			       link_id);
2638 
2639 	dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
2640 	if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
2641 		DP_PEER_EXTD_STATS_INC(txrx_peer,
2642 				       rx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
2643 				       1, link_id);
2644 
2645 	dp_rx_rates_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
2646 				 sgi, mcs, nss, bw, pkt_type, link_id);
2647 }
2648 #else
2649 static inline
2650 void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
2651 				  uint8_t *rx_tlv_hdr,
2652 				  struct dp_txrx_peer *txrx_peer,
2653 				  uint8_t link_id)
2654 {
2655 }
2656 #endif
2657 
2658 #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO)
2659 static inline void
2660 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer,
2661 			       qdf_nbuf_t nbuf, uint8_t link_id)
2662 {
2663 	uint8_t lmac_id = qdf_nbuf_get_lmac_id(nbuf);
2664 
2665 	if (qdf_unlikely(lmac_id >= CDP_MAX_LMACS)) {
2666 		dp_err_rl("Invalid lmac_id: %u vdev_id: %u",
2667 			  lmac_id, QDF_NBUF_CB_RX_VDEV_ID(nbuf));
2668 
2669 		if (qdf_likely(txrx_peer))
2670 			dp_err_rl("peer_id: %u", txrx_peer->peer_id);
2671 
2672 		return;
2673 	}
2674 
2675 	/* only count stats per lmac for MLO connection*/
2676 	DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, rx.rx_lmac[lmac_id], 1,
2677 				       QDF_NBUF_CB_RX_PKT_LEN(nbuf),
2678 				       txrx_peer->is_mld_peer, link_id);
2679 }
2680 #else
2681 static inline void
2682 dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer,
2683 			       qdf_nbuf_t nbuf, uint8_t link_id)
2684 {
2685 }
2686 #endif
2687 
2688 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
2689 			     uint8_t *rx_tlv_hdr,
2690 			     struct dp_txrx_peer *txrx_peer,
2691 			     uint8_t ring_id,
2692 			     struct cdp_tid_rx_stats *tid_stats,
2693 			     uint8_t link_id)
2694 {
2695 	bool is_not_amsdu;
2696 	struct dp_vdev *vdev = txrx_peer->vdev;
2697 	uint8_t enh_flag;
2698 	qdf_ether_header_t *eh;
2699 	uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
2700 
2701 	dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer);
2702 	is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
2703 			qdf_nbuf_is_rx_chfrag_end(nbuf);
2704 	DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1,
2705 				      msdu_len, link_id);
2706 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1,
2707 				   is_not_amsdu, link_id);
2708 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1,
2709 				   !is_not_amsdu, link_id);
2710 	DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1,
2711 				   qdf_nbuf_is_rx_retry_flag(nbuf), link_id);
2712 	dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf, link_id);
2713 	tid_stats->msdu_cnt++;
2714 	enh_flag = vdev->pdev->enhanced_stats_en;
2715 	if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
2716 			 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
2717 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
2718 		DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag, link_id);
2719 		tid_stats->mcast_msdu_cnt++;
2720 		if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
2721 			DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len,
2722 					    enh_flag, link_id);
2723 			tid_stats->bcast_msdu_cnt++;
2724 		}
2725 	} else {
2726 		DP_PEER_UC_INCC_PKT(txrx_peer, 1, msdu_len,
2727 				    enh_flag, link_id);
2728 	}
2729 
2730 	txrx_peer->stats[link_id].per_pkt_stats.rx.last_rx_ts =
2731 							qdf_system_ticks();
2732 
2733 	dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr,
2734 				     txrx_peer, link_id);
2735 }
2736 
2737 #ifndef WDS_VENDOR_EXTENSION
2738 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
2739 			   struct dp_vdev *vdev,
2740 			   struct dp_txrx_peer *txrx_peer)
2741 {
2742 	return 1;
2743 }
2744 #endif
2745 
2746 #ifdef DP_RX_PKT_NO_PEER_DELIVER
2747 #ifdef DP_RX_UDP_OVER_PEER_ROAM
2748 /**
2749  * dp_rx_is_udp_allowed_over_roam_peer() - check if udp data received
2750  *					   during roaming
2751  * @vdev: dp_vdev pointer
2752  * @rx_tlv_hdr: rx tlv header
2753  * @nbuf: pkt skb pointer
2754  *
2755  * This function will check if rx udp data is received from authorised
2756  * roamed peer before peer map indication is received from FW after
2757  * roaming. This is needed for VoIP scenarios in which packet loss
2758  * expected during roaming is minimal.
2759  *
2760  * Return: bool
2761  */
2762 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev,
2763 						uint8_t *rx_tlv_hdr,
2764 						qdf_nbuf_t nbuf)
2765 {
2766 	char *hdr_desc;
2767 	struct ieee80211_frame *wh = NULL;
2768 
2769 	hdr_desc = hal_rx_desc_get_80211_hdr(vdev->pdev->soc->hal_soc,
2770 					     rx_tlv_hdr);
2771 	wh = (struct ieee80211_frame *)hdr_desc;
2772 
2773 	if (vdev->roaming_peer_status ==
2774 	    WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED &&
2775 	    !qdf_mem_cmp(vdev->roaming_peer_mac.raw, wh->i_addr2,
2776 	    QDF_MAC_ADDR_SIZE) && (qdf_nbuf_is_ipv4_udp_pkt(nbuf) ||
2777 	    qdf_nbuf_is_ipv6_udp_pkt(nbuf)))
2778 		return true;
2779 
2780 	return false;
2781 }
2782 #else
2783 static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev,
2784 						uint8_t *rx_tlv_hdr,
2785 						qdf_nbuf_t nbuf)
2786 {
2787 	return false;
2788 }
2789 #endif
2790 
2791 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
2792 /**
2793  * dp_rx_nbuf_band_set() - set nbuf band.
2794  * @soc: dp soc handle
2795  * @nbuf: nbuf handle
2796  *
2797  * Return: None
2798  */
2799 static inline void
2800 dp_rx_nbuf_band_set(struct dp_soc *soc, qdf_nbuf_t nbuf)
2801 {
2802 	struct qdf_mac_addr *mac_addr;
2803 	struct dp_peer *peer;
2804 	struct dp_txrx_peer *txrx_peer;
2805 
2806 	uint8_t link_id;
2807 
2808 	mac_addr = (struct qdf_mac_addr *)(qdf_nbuf_data(nbuf) +
2809 					   QDF_NBUF_SRC_MAC_OFFSET);
2810 
2811 	peer = dp_mld_peer_find_hash_find(soc, mac_addr->bytes, 0,
2812 					  DP_VDEV_ALL, DP_MOD_ID_RX);
2813 	if (qdf_likely(peer)) {
2814 		txrx_peer = dp_get_txrx_peer(peer);
2815 		if (qdf_likely(txrx_peer)) {
2816 			link_id = QDF_NBUF_CB_RX_LOGICAL_LINK_ID(nbuf);
2817 			qdf_nbuf_rx_set_band(nbuf, txrx_peer->ll_band[link_id]);
2818 		}
2819 		dp_peer_unref_delete(peer, DP_MOD_ID_RX);
2820 	}
2821 }
2822 #else
2823 static inline void
2824 dp_rx_nbuf_band_set(struct dp_soc *soc, qdf_nbuf_t nbuf)
2825 {
2826 }
2827 #endif
2828 
2829 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
2830 {
2831 	uint16_t peer_id;
2832 	uint8_t vdev_id;
2833 	struct dp_vdev *vdev = NULL;
2834 	uint32_t l2_hdr_offset = 0;
2835 	uint16_t msdu_len = 0;
2836 	uint32_t pkt_len = 0;
2837 	uint8_t *rx_tlv_hdr;
2838 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
2839 			      FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP |
2840 			      FRAME_MASK_DNS_QUERY | FRAME_MASK_DNS_RESP;
2841 
2842 	bool is_special_frame = false;
2843 	struct dp_peer *peer = NULL;
2844 
2845 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
2846 	if (peer_id > soc->max_peer_id)
2847 		goto deliver_fail;
2848 
2849 	vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
2850 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX);
2851 	if (!vdev || vdev->delete.pending)
2852 		goto deliver_fail;
2853 
2854 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf)))
2855 		goto deliver_fail;
2856 
2857 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
2858 	l2_hdr_offset =
2859 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
2860 
2861 	msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
2862 	pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
2863 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
2864 
2865 	qdf_nbuf_set_pktlen(nbuf, pkt_len);
2866 	qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + l2_hdr_offset);
2867 
2868 	is_special_frame = dp_rx_is_special_frame(nbuf, frame_mask);
2869 	if (qdf_likely(vdev->osif_rx)) {
2870 		if (is_special_frame ||
2871 		    dp_rx_is_udp_allowed_over_roam_peer(vdev, rx_tlv_hdr,
2872 							nbuf)) {
2873 			dp_rx_nbuf_band_set(soc, nbuf);
2874 			qdf_nbuf_set_exc_frame(nbuf, 1);
2875 			if (QDF_STATUS_SUCCESS !=
2876 			    vdev->osif_rx(vdev->osif_vdev, nbuf))
2877 				goto deliver_fail;
2878 
2879 			DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1);
2880 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
2881 			return;
2882 		}
2883 	} else if (is_special_frame) {
2884 		/*
2885 		 * If MLO connection, txrx_peer for link peer does not exist,
2886 		 * try to store these RX packets to txrx_peer's bufq of MLD
2887 		 * peer until vdev->osif_rx is registered from CP and flush
2888 		 * them to stack.
2889 		 */
2890 		peer = dp_peer_get_tgt_peer_by_id(soc, peer_id,
2891 						  DP_MOD_ID_RX);
2892 		if (!peer)
2893 			goto deliver_fail;
2894 
2895 		/* only check for MLO connection */
2896 		if (IS_MLO_DP_MLD_PEER(peer) && peer->txrx_peer &&
2897 		    dp_rx_is_peer_cache_bufq_supported()) {
2898 			qdf_nbuf_set_exc_frame(nbuf, 1);
2899 
2900 			if (QDF_STATUS_SUCCESS ==
2901 			    dp_rx_enqueue_rx(peer, peer->txrx_peer, nbuf)) {
2902 				DP_STATS_INC(soc,
2903 					     rx.err.pkt_delivered_no_peer,
2904 					     1);
2905 			} else {
2906 				DP_STATS_INC(soc,
2907 					     rx.err.rx_invalid_peer.num,
2908 					     1);
2909 			}
2910 
2911 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
2912 			dp_peer_unref_delete(peer, DP_MOD_ID_RX);
2913 			return;
2914 		}
2915 
2916 		dp_peer_unref_delete(peer, DP_MOD_ID_RX);
2917 	}
2918 
2919 deliver_fail:
2920 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
2921 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
2922 	dp_rx_nbuf_free(nbuf);
2923 	if (vdev)
2924 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
2925 }
2926 #else
2927 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
2928 {
2929 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
2930 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
2931 	dp_rx_nbuf_free(nbuf);
2932 }
2933 #endif
2934 
2935 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2936 
2937 #ifdef WLAN_SUPPORT_RX_FISA
2938 QDF_STATUS dp_fisa_config(ol_txrx_soc_handle cdp_soc, uint8_t pdev_id,
2939 			  enum cdp_fisa_config_id config_id,
2940 			  union cdp_fisa_config *cfg)
2941 {
2942 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
2943 	struct dp_pdev *pdev;
2944 	QDF_STATUS status;
2945 
2946 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2947 	if (!pdev) {
2948 		dp_err("pdev is NULL for pdev_id %u", pdev_id);
2949 		return QDF_STATUS_E_INVAL;
2950 	}
2951 
2952 	switch (config_id) {
2953 	case CDP_FISA_HTT_RX_FISA_CFG:
2954 		status = dp_htt_rx_fisa_config(pdev, cfg->fisa_config);
2955 		break;
2956 	case CDP_FISA_HTT_RX_FSE_OP_CFG:
2957 		status = dp_htt_rx_flow_fse_operation(pdev, cfg->fse_op_cmd);
2958 		break;
2959 	case CDP_FISA_HTT_RX_FSE_SETUP_CFG:
2960 		status = dp_htt_rx_flow_fst_setup(pdev, cfg->fse_setup_info);
2961 		break;
2962 	default:
2963 		status = QDF_STATUS_E_INVAL;
2964 	}
2965 
2966 	return status;
2967 }
2968 
2969 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding)
2970 {
2971 	QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
2972 	qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size);
2973 }
2974 #else
2975 void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding)
2976 {
2977 	qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size);
2978 }
2979 #endif
2980 
2981 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2982 
2983 #ifdef DP_RX_DROP_RAW_FRM
2984 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
2985 {
2986 	if (qdf_nbuf_is_raw_frame(nbuf)) {
2987 		dp_rx_nbuf_free(nbuf);
2988 		return true;
2989 	}
2990 
2991 	return false;
2992 }
2993 #endif
2994 
2995 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
2996 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf)
2997 {
2998 	DP_STATS_INC_PKT(soc, rx.ingress, 1,
2999 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
3000 }
3001 #endif
3002 
3003 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
3004 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc,  struct dp_pdev *pdev,
3005 				  uint16_t peer_id, uint32_t is_offload,
3006 				  qdf_nbuf_t netbuf)
3007 {
3008 	if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
3009 		dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf,
3010 				     peer_id, is_offload, pdev->pdev_id);
3011 }
3012 
3013 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
3014 					  uint32_t is_offload)
3015 {
3016 	if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
3017 		dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA_NO_PEER,
3018 				     soc, nbuf, HTT_INVALID_VDEV,
3019 				     is_offload, 0);
3020 }
3021 #endif
3022 
3023 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
3024 
3025 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
3026 {
3027 	QDF_STATUS ret;
3028 
3029 	if (vdev->osif_rx_flush) {
3030 		ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id);
3031 		if (!QDF_IS_STATUS_SUCCESS(ret)) {
3032 			dp_err("Failed to flush rx pkts for vdev %d",
3033 			       vdev->vdev_id);
3034 			return ret;
3035 		}
3036 	}
3037 
3038 	return QDF_STATUS_SUCCESS;
3039 }
3040 
3041 static QDF_STATUS
3042 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc,
3043 			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
3044 			   struct dp_pdev *dp_pdev,
3045 			   struct rx_desc_pool *rx_desc_pool,
3046 			   bool dp_buf_page_frag_alloc_enable)
3047 {
3048 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
3049 
3050 	if (dp_buf_page_frag_alloc_enable) {
3051 		(nbuf_frag_info_t->virt_addr).nbuf =
3052 			qdf_nbuf_frag_alloc(dp_soc->osdev,
3053 					    rx_desc_pool->buf_size,
3054 					    RX_BUFFER_RESERVATION,
3055 					    rx_desc_pool->buf_alignment, FALSE);
3056 	} else	{
3057 		(nbuf_frag_info_t->virt_addr).nbuf =
3058 			qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size,
3059 				       RX_BUFFER_RESERVATION,
3060 				       rx_desc_pool->buf_alignment, FALSE);
3061 	}
3062 	if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
3063 		dp_err("nbuf alloc failed");
3064 		DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
3065 		return ret;
3066 	}
3067 
3068 	ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
3069 					 (nbuf_frag_info_t->virt_addr).nbuf,
3070 					 QDF_DMA_FROM_DEVICE,
3071 					 rx_desc_pool->buf_size);
3072 
3073 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
3074 		qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf);
3075 		dp_err("nbuf map failed");
3076 		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
3077 		return ret;
3078 	}
3079 
3080 	nbuf_frag_info_t->paddr =
3081 		qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
3082 
3083 	ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
3084 			     &nbuf_frag_info_t->paddr,
3085 			     rx_desc_pool);
3086 	if (ret == QDF_STATUS_E_FAILURE) {
3087 		dp_err("nbuf check x86 failed");
3088 		DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
3089 		return ret;
3090 	}
3091 
3092 	return QDF_STATUS_SUCCESS;
3093 }
3094 
3095 QDF_STATUS
3096 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
3097 			  struct dp_srng *dp_rxdma_srng,
3098 			  struct rx_desc_pool *rx_desc_pool,
3099 			  uint32_t num_req_buffers)
3100 {
3101 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
3102 	hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng;
3103 	union dp_rx_desc_list_elem_t *next;
3104 	void *rxdma_ring_entry;
3105 	qdf_dma_addr_t paddr;
3106 	struct dp_rx_nbuf_frag_info *nf_info;
3107 	uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0;
3108 	uint32_t buffer_index, nbuf_ptrs_per_page;
3109 	qdf_nbuf_t nbuf;
3110 	QDF_STATUS ret;
3111 	int page_idx, total_pages;
3112 	union dp_rx_desc_list_elem_t *desc_list = NULL;
3113 	union dp_rx_desc_list_elem_t *tail = NULL;
3114 	int sync_hw_ptr = 1;
3115 	uint32_t num_entries_avail;
3116 	bool dp_buf_page_frag_alloc_enable;
3117 
3118 	if (qdf_unlikely(!dp_pdev)) {
3119 		dp_rx_err("%pK: pdev is null for mac_id = %d",
3120 			  dp_soc, mac_id);
3121 		return QDF_STATUS_E_FAILURE;
3122 	}
3123 
3124 	dp_buf_page_frag_alloc_enable =
3125 	       wlan_cfg_is_dp_buf_page_frag_alloc_enable(dp_soc->wlan_cfg_ctx);
3126 
3127 	if (qdf_unlikely(!rxdma_srng)) {
3128 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
3129 		return QDF_STATUS_E_FAILURE;
3130 	}
3131 
3132 	dp_debug("requested %u RX buffers for driver attach", num_req_buffers);
3133 
3134 	hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
3135 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
3136 						   rxdma_srng,
3137 						   sync_hw_ptr);
3138 	hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
3139 
3140 	if (!num_entries_avail) {
3141 		dp_err("Num of available entries is zero, nothing to do");
3142 		return QDF_STATUS_E_NOMEM;
3143 	}
3144 
3145 	if (num_entries_avail < num_req_buffers)
3146 		num_req_buffers = num_entries_avail;
3147 
3148 	nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool,
3149 					    num_req_buffers, &desc_list, &tail);
3150 	if (!nr_descs) {
3151 		dp_err("no free rx_descs in freelist");
3152 		DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
3153 		return QDF_STATUS_E_NOMEM;
3154 	}
3155 
3156 	dp_debug("got %u RX descs for driver attach", nr_descs);
3157 
3158 	/*
3159 	 * Try to allocate pointers to the nbuf one page at a time.
3160 	 * Take pointers that can fit in one page of memory and
3161 	 * iterate through the total descriptors that need to be
3162 	 * allocated in order of pages. Reuse the pointers that
3163 	 * have been allocated to fit in one page across each
3164 	 * iteration to index into the nbuf.
3165 	 */
3166 	total_pages = (nr_descs * sizeof(*nf_info)) / DP_BLOCKMEM_SIZE;
3167 
3168 	/*
3169 	 * Add an extra page to store the remainder if any
3170 	 */
3171 	if ((nr_descs * sizeof(*nf_info)) % DP_BLOCKMEM_SIZE)
3172 		total_pages++;
3173 	nf_info = qdf_mem_malloc(DP_BLOCKMEM_SIZE);
3174 	if (!nf_info) {
3175 		dp_err("failed to allocate nbuf array");
3176 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
3177 		QDF_BUG(0);
3178 		return QDF_STATUS_E_NOMEM;
3179 	}
3180 	nbuf_ptrs_per_page = DP_BLOCKMEM_SIZE / sizeof(*nf_info);
3181 
3182 	for (page_idx = 0; page_idx < total_pages; page_idx++) {
3183 		qdf_mem_zero(nf_info, DP_BLOCKMEM_SIZE);
3184 
3185 		for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) {
3186 			/*
3187 			 * The last page of buffer pointers may not be required
3188 			 * completely based on the number of descriptors. Below
3189 			 * check will ensure we are allocating only the
3190 			 * required number of descriptors.
3191 			 */
3192 			if (nr_nbuf_total >= nr_descs)
3193 				break;
3194 			/* Flag is set while pdev rx_desc_pool initialization */
3195 			if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
3196 				ret = dp_pdev_frag_alloc_and_map(dp_soc,
3197 						&nf_info[nr_nbuf], dp_pdev,
3198 						rx_desc_pool);
3199 			else
3200 				ret = dp_pdev_nbuf_alloc_and_map(dp_soc,
3201 						&nf_info[nr_nbuf], dp_pdev,
3202 						rx_desc_pool,
3203 						dp_buf_page_frag_alloc_enable);
3204 			if (QDF_IS_STATUS_ERROR(ret))
3205 				break;
3206 
3207 			nr_nbuf_total++;
3208 		}
3209 
3210 		hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
3211 
3212 		for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) {
3213 			rxdma_ring_entry =
3214 				hal_srng_src_get_next(dp_soc->hal_soc,
3215 						      rxdma_srng);
3216 			qdf_assert_always(rxdma_ring_entry);
3217 
3218 			next = desc_list->next;
3219 			paddr = nf_info[buffer_index].paddr;
3220 			nbuf = nf_info[buffer_index].virt_addr.nbuf;
3221 
3222 			/* Flag is set while pdev rx_desc_pool initialization */
3223 			if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
3224 				dp_rx_desc_frag_prep(&desc_list->rx_desc,
3225 						     &nf_info[buffer_index]);
3226 			else
3227 				dp_rx_desc_prep(&desc_list->rx_desc,
3228 						&nf_info[buffer_index]);
3229 			desc_list->rx_desc.in_use = 1;
3230 			dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc);
3231 			dp_rx_desc_update_dbg_info(&desc_list->rx_desc,
3232 						   __func__,
3233 						   RX_DESC_REPLENISHED);
3234 
3235 			hal_rxdma_buff_addr_info_set(dp_soc->hal_soc ,rxdma_ring_entry, paddr,
3236 						     desc_list->rx_desc.cookie,
3237 						     rx_desc_pool->owner);
3238 
3239 			dp_ipa_handle_rx_buf_smmu_mapping(
3240 						dp_soc, nbuf,
3241 						rx_desc_pool->buf_size, true,
3242 						__func__, __LINE__);
3243 
3244 			dp_audio_smmu_map(dp_soc->osdev,
3245 					  qdf_mem_paddr_from_dmaaddr(dp_soc->osdev,
3246 								     QDF_NBUF_CB_PADDR(nbuf)),
3247 					  QDF_NBUF_CB_PADDR(nbuf),
3248 					  rx_desc_pool->buf_size);
3249 
3250 			desc_list = next;
3251 		}
3252 
3253 		dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id,
3254 					       rxdma_srng, nr_nbuf, nr_nbuf);
3255 		hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
3256 	}
3257 
3258 	dp_info("filled %u RX buffers for driver attach", nr_nbuf_total);
3259 	qdf_mem_free(nf_info);
3260 
3261 	if (!nr_nbuf_total) {
3262 		dp_err("No nbuf's allocated");
3263 		QDF_BUG(0);
3264 		return QDF_STATUS_E_RESOURCES;
3265 	}
3266 
3267 	/* No need to count the number of bytes received during replenish.
3268 	 * Therefore set replenish.pkts.bytes as 0.
3269 	 */
3270 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0);
3271 
3272 	return QDF_STATUS_SUCCESS;
3273 }
3274 
3275 qdf_export_symbol(dp_pdev_rx_buffers_attach);
3276 
3277 #ifdef DP_RX_MON_MEM_FRAG
3278 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
3279 				bool is_mon_dest_desc)
3280 {
3281 	rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc;
3282 	if (is_mon_dest_desc)
3283 		dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled");
3284 	else
3285 		qdf_frag_cache_drain(&rx_desc_pool->pf_cache);
3286 }
3287 #else
3288 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
3289 				bool is_mon_dest_desc)
3290 {
3291 	rx_desc_pool->rx_mon_dest_frag_enable = false;
3292 	if (is_mon_dest_desc)
3293 		dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled");
3294 }
3295 #endif
3296 
3297 qdf_export_symbol(dp_rx_enable_mon_dest_frag);
3298 
3299 QDF_STATUS
3300 dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev)
3301 {
3302 	struct dp_soc *soc = pdev->soc;
3303 	uint32_t rxdma_entries;
3304 	uint32_t rx_sw_desc_num;
3305 	struct dp_srng *dp_rxdma_srng;
3306 	struct rx_desc_pool *rx_desc_pool;
3307 	uint32_t status = QDF_STATUS_SUCCESS;
3308 	int mac_for_pdev;
3309 
3310 	mac_for_pdev = pdev->lmac_id;
3311 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
3312 		dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d",
3313 			   soc, mac_for_pdev);
3314 		return status;
3315 	}
3316 
3317 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
3318 	rxdma_entries = dp_rxdma_srng->num_entries;
3319 
3320 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3321 	rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
3322 
3323 	rx_desc_pool->desc_type = QDF_DP_RX_DESC_BUF_TYPE;
3324 	status = dp_rx_desc_pool_alloc(soc,
3325 				       rx_sw_desc_num,
3326 				       rx_desc_pool);
3327 	if (status != QDF_STATUS_SUCCESS)
3328 		return status;
3329 
3330 	return status;
3331 }
3332 
3333 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev)
3334 {
3335 	int mac_for_pdev = pdev->lmac_id;
3336 	struct dp_soc *soc = pdev->soc;
3337 	struct rx_desc_pool *rx_desc_pool;
3338 
3339 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3340 
3341 	dp_rx_desc_pool_free(soc, rx_desc_pool);
3342 }
3343 
3344 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev)
3345 {
3346 	int mac_for_pdev = pdev->lmac_id;
3347 	struct dp_soc *soc = pdev->soc;
3348 	uint32_t rxdma_entries;
3349 	uint32_t rx_sw_desc_num;
3350 	struct dp_srng *dp_rxdma_srng;
3351 	struct rx_desc_pool *rx_desc_pool;
3352 	uint32_t target_type = hal_get_target_type(soc->hal_soc);
3353 	uint16_t buf_size;
3354 
3355 	buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx);
3356 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3357 
3358 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
3359 		/*
3360 		 * If NSS is enabled, rx_desc_pool is already filled.
3361 		 * Hence, just disable desc_pool frag flag.
3362 		 */
3363 		dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
3364 
3365 		dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d",
3366 			   soc, mac_for_pdev);
3367 		return QDF_STATUS_SUCCESS;
3368 	}
3369 
3370 	if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM)
3371 		return QDF_STATUS_E_NOMEM;
3372 
3373 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
3374 	rxdma_entries = dp_rxdma_srng->num_entries;
3375 
3376 	soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
3377 
3378 	rx_sw_desc_num =
3379 	wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
3380 
3381 	rx_desc_pool->owner = dp_rx_get_rx_bm_id(soc);
3382 	rx_desc_pool->buf_size = buf_size;
3383 	rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
3384 	/* Disable monitor dest processing via frag */
3385 	if (target_type == TARGET_TYPE_QCN9160) {
3386 		rx_desc_pool->buf_size = RX_MONITOR_BUFFER_SIZE;
3387 		rx_desc_pool->buf_alignment = RX_MONITOR_BUFFER_ALIGNMENT;
3388 		dp_rx_enable_mon_dest_frag(rx_desc_pool, true);
3389 	} else {
3390 		dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
3391 	}
3392 
3393 	dp_rx_desc_pool_init(soc, mac_for_pdev,
3394 			     rx_sw_desc_num, rx_desc_pool);
3395 	return QDF_STATUS_SUCCESS;
3396 }
3397 
3398 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev)
3399 {
3400 	int mac_for_pdev = pdev->lmac_id;
3401 	struct dp_soc *soc = pdev->soc;
3402 	struct rx_desc_pool *rx_desc_pool;
3403 
3404 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3405 
3406 	dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev);
3407 }
3408 
3409 QDF_STATUS
3410 dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev)
3411 {
3412 	int mac_for_pdev = pdev->lmac_id;
3413 	struct dp_soc *soc = pdev->soc;
3414 	struct dp_srng *dp_rxdma_srng;
3415 	struct rx_desc_pool *rx_desc_pool;
3416 	uint32_t rxdma_entries;
3417 	uint32_t target_type = hal_get_target_type(soc->hal_soc);
3418 
3419 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
3420 	rxdma_entries = dp_rxdma_srng->num_entries;
3421 
3422 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3423 
3424 	/* Initialize RX buffer pool which will be
3425 	 * used during low memory conditions
3426 	 */
3427 	dp_rx_buffer_pool_init(soc, mac_for_pdev);
3428 
3429 	if (target_type == TARGET_TYPE_QCN9160)
3430 		return dp_pdev_rx_buffers_attach(soc, mac_for_pdev,
3431 						 dp_rxdma_srng,
3432 						 rx_desc_pool,
3433 						 rxdma_entries - 1);
3434 	else
3435 		return dp_pdev_rx_buffers_attach_simple(soc, mac_for_pdev,
3436 							dp_rxdma_srng,
3437 							rx_desc_pool,
3438 							rxdma_entries - 1);
3439 }
3440 
3441 void
3442 dp_rx_pdev_buffers_free(struct dp_pdev *pdev)
3443 {
3444 	int mac_for_pdev = pdev->lmac_id;
3445 	struct dp_soc *soc = pdev->soc;
3446 	struct rx_desc_pool *rx_desc_pool;
3447 	uint32_t target_type = hal_get_target_type(soc->hal_soc);
3448 
3449 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3450 
3451 	if (target_type == TARGET_TYPE_QCN9160)
3452 		dp_rx_desc_frag_free(soc, rx_desc_pool);
3453 	else
3454 		dp_rx_desc_nbuf_free(soc, rx_desc_pool, false);
3455 
3456 	dp_rx_buffer_pool_deinit(soc, mac_for_pdev);
3457 }
3458 
3459 #ifdef DP_RX_SPECIAL_FRAME_NEED
3460 bool dp_rx_deliver_special_frame(struct dp_soc *soc,
3461 				 struct dp_txrx_peer *txrx_peer,
3462 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
3463 				 uint8_t *rx_tlv_hdr)
3464 {
3465 	uint32_t l2_hdr_offset = 0;
3466 	uint16_t msdu_len = 0;
3467 	uint32_t skip_len;
3468 
3469 	l2_hdr_offset =
3470 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
3471 
3472 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
3473 		skip_len = l2_hdr_offset;
3474 	} else {
3475 		msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
3476 		skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
3477 		qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
3478 	}
3479 
3480 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
3481 	dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
3482 	qdf_nbuf_pull_head(nbuf, skip_len);
3483 
3484 	if (txrx_peer->vdev) {
3485 		dp_rx_send_pktlog(soc, txrx_peer->vdev->pdev, nbuf,
3486 				  QDF_TX_RX_STATUS_OK);
3487 	}
3488 
3489 	if (dp_rx_is_special_frame(nbuf, frame_mask)) {
3490 		dp_info("special frame, mpdu sn 0x%x",
3491 			hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
3492 		qdf_nbuf_set_exc_frame(nbuf, 1);
3493 		dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer,
3494 				       nbuf, NULL);
3495 		return true;
3496 	}
3497 
3498 	return false;
3499 }
3500 #endif
3501 
3502 #ifdef QCA_MULTIPASS_SUPPORT
3503 bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf,
3504 			     uint8_t tid)
3505 {
3506 	struct vlan_ethhdr *vethhdrp;
3507 
3508 	if (qdf_unlikely(!txrx_peer->vlan_id))
3509 		return true;
3510 
3511 	vethhdrp = (struct vlan_ethhdr *)qdf_nbuf_data(nbuf);
3512 	/*
3513 	 * h_vlan_proto & h_vlan_TCI should be 0x8100 & zero respectively
3514 	 * as it is expected to be padded by 0
3515 	 * return false if frame doesn't have above tag so that caller will
3516 	 * drop the frame.
3517 	 */
3518 	if (qdf_unlikely(vethhdrp->h_vlan_proto != htons(QDF_ETH_TYPE_8021Q)) ||
3519 	    qdf_unlikely(vethhdrp->h_vlan_TCI != 0))
3520 		return false;
3521 
3522 	vethhdrp->h_vlan_TCI = htons(((tid & 0x7) << VLAN_PRIO_SHIFT) |
3523 		(txrx_peer->vlan_id & VLAN_VID_MASK));
3524 
3525 	if (vethhdrp->h_vlan_encapsulated_proto == htons(ETHERTYPE_PAE))
3526 		dp_tx_remove_vlan_tag(txrx_peer->vdev, nbuf);
3527 
3528 	return true;
3529 }
3530 #endif /* QCA_MULTIPASS_SUPPORT */
3531