xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_rx.h"
22 #include "dp_peer.h"
23 #include "hal_rx.h"
24 #include "hal_api.h"
25 #include "qdf_nbuf.h"
26 #ifdef MESH_MODE_SUPPORT
27 #include "if_meta_hdr.h"
28 #endif
29 #include "dp_internal.h"
30 #include "dp_rx_mon.h"
31 #include "dp_ipa.h"
32 #ifdef FEATURE_WDS
33 #include "dp_txrx_wds.h"
34 #endif
35 #include "dp_hist.h"
36 #include "dp_rx_buffer_pool.h"
37 
38 #ifdef ATH_RX_PRI_SAVE
39 #define DP_RX_TID_SAVE(_nbuf, _tid) \
40 	(qdf_nbuf_set_priority(_nbuf, _tid))
41 #else
42 #define DP_RX_TID_SAVE(_nbuf, _tid)
43 #endif
44 
45 #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING
46 static inline
47 bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
48 {
49 	if (ta_peer->vdev->opmode == wlan_op_mode_ndi &&
50 	    qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) {
51 		DP_STATS_INC(ta_peer, rx.intra_bss.mdns_no_fwd, 1);
52 		return false;
53 	}
54 		return true;
55 }
56 #else
57 static inline
58 bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
59 {
60 	return true;
61 }
62 #endif
63 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
64 {
65 	return vdev->ap_bridge_enabled;
66 }
67 
68 #ifdef DUP_RX_DESC_WAR
69 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
70 				hal_ring_handle_t hal_ring,
71 				hal_ring_desc_t ring_desc,
72 				struct dp_rx_desc *rx_desc)
73 {
74 	void *hal_soc = soc->hal_soc;
75 
76 	hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc);
77 	dp_rx_desc_dump(rx_desc);
78 }
79 #else
80 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
81 				hal_ring_handle_t hal_ring_hdl,
82 				hal_ring_desc_t ring_desc,
83 				struct dp_rx_desc *rx_desc)
84 {
85 	hal_soc_handle_t hal_soc = soc->hal_soc;
86 
87 	dp_rx_desc_dump(rx_desc);
88 	hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc);
89 	hal_srng_dump_ring(hal_soc, hal_ring_hdl);
90 	qdf_assert_always(0);
91 }
92 #endif
93 
94 #ifdef RX_DESC_SANITY_WAR
95 static inline
96 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
97 			     hal_ring_handle_t hal_ring_hdl,
98 			     hal_ring_desc_t ring_desc,
99 			     struct dp_rx_desc *rx_desc)
100 {
101 	uint8_t return_buffer_manager;
102 
103 	if (qdf_unlikely(!rx_desc)) {
104 		/*
105 		 * This is an unlikely case where the cookie obtained
106 		 * from the ring_desc is invalid and hence we are not
107 		 * able to find the corresponding rx_desc
108 		 */
109 		goto fail;
110 	}
111 
112 	return_buffer_manager = hal_rx_ret_buf_manager_get(ring_desc);
113 	if (qdf_unlikely(!(return_buffer_manager == HAL_RX_BUF_RBM_SW1_BM ||
114 			 return_buffer_manager == HAL_RX_BUF_RBM_SW3_BM))) {
115 		goto fail;
116 	}
117 
118 	return QDF_STATUS_SUCCESS;
119 
120 fail:
121 	DP_STATS_INC(soc, rx.err.invalid_cookie, 1);
122 	dp_err("Ring Desc:");
123 	hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl,
124 				ring_desc);
125 	return QDF_STATUS_E_NULL_VALUE;
126 
127 }
128 #else
129 static inline
130 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
131 			     hal_ring_handle_t hal_ring_hdl,
132 			     hal_ring_desc_t ring_desc,
133 			     struct dp_rx_desc *rx_desc)
134 {
135 	return QDF_STATUS_SUCCESS;
136 }
137 #endif
138 
139 /**
140  * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map
141  *
142  * @dp_soc: struct dp_soc *
143  * @nbuf_frag_info_t: nbuf frag info
144  * @dp_pdev: struct dp_pdev *
145  * @rx_desc_pool: Rx desc pool
146  *
147  * Return: QDF_STATUS
148  */
149 #ifdef DP_RX_MON_MEM_FRAG
150 static inline QDF_STATUS
151 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
152 			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
153 			   struct dp_pdev *dp_pdev,
154 			   struct rx_desc_pool *rx_desc_pool)
155 {
156 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
157 
158 	(nbuf_frag_info_t->virt_addr).vaddr =
159 			qdf_frag_alloc(rx_desc_pool->buf_size);
160 
161 	if (!((nbuf_frag_info_t->virt_addr).vaddr)) {
162 		dp_err("Frag alloc failed");
163 		DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1);
164 		return QDF_STATUS_E_NOMEM;
165 	}
166 
167 	ret = qdf_mem_map_page(dp_soc->osdev,
168 			       (nbuf_frag_info_t->virt_addr).vaddr,
169 			       QDF_DMA_FROM_DEVICE,
170 			       rx_desc_pool->buf_size,
171 			       &nbuf_frag_info_t->paddr);
172 
173 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
174 		qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr);
175 		dp_err("Frag map failed");
176 		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
177 		return QDF_STATUS_E_FAULT;
178 	}
179 
180 	return QDF_STATUS_SUCCESS;
181 }
182 #else
183 static inline QDF_STATUS
184 dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
185 			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
186 			   struct dp_pdev *dp_pdev,
187 			   struct rx_desc_pool *rx_desc_pool)
188 {
189 	return QDF_STATUS_SUCCESS;
190 }
191 #endif /* DP_RX_MON_MEM_FRAG */
192 
193 /**
194  * dp_pdev_nbuf_alloc_and_map() - Allocate nbuf for desc buffer and map
195  *
196  * @dp_soc: struct dp_soc *
197  * @mac_id: Mac id
198  * @num_entries_avail: num_entries_avail
199  * @nbuf_frag_info_t: nbuf frag info
200  * @dp_pdev: struct dp_pdev *
201  * @rx_desc_pool: Rx desc pool
202  *
203  * Return: QDF_STATUS
204  */
205 static inline QDF_STATUS
206 dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc,
207 				     uint32_t mac_id,
208 				     uint32_t num_entries_avail,
209 				     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
210 				     struct dp_pdev *dp_pdev,
211 				     struct rx_desc_pool *rx_desc_pool)
212 {
213 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
214 
215 	(nbuf_frag_info_t->virt_addr).nbuf =
216 		dp_rx_buffer_pool_nbuf_alloc(dp_soc,
217 					     mac_id,
218 					     rx_desc_pool,
219 					     num_entries_avail);
220 	if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
221 		dp_err("nbuf alloc failed");
222 		DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
223 		return QDF_STATUS_E_NOMEM;
224 	}
225 
226 	ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
227 					 (nbuf_frag_info_t->virt_addr).nbuf,
228 					 QDF_DMA_FROM_DEVICE,
229 					 rx_desc_pool->buf_size);
230 
231 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
232 		dp_rx_buffer_pool_nbuf_free(dp_soc,
233 			(nbuf_frag_info_t->virt_addr).nbuf, mac_id);
234 		dp_err("nbuf map failed");
235 		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
236 		return QDF_STATUS_E_FAULT;
237 	}
238 
239 	nbuf_frag_info_t->paddr =
240 		qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
241 
242 	dp_ipa_handle_rx_buf_smmu_mapping(dp_soc,
243 			(qdf_nbuf_t)((nbuf_frag_info_t->virt_addr).nbuf),
244 					  rx_desc_pool->buf_size,
245 					  true);
246 
247 	ret = check_x86_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
248 			      &nbuf_frag_info_t->paddr,
249 			      rx_desc_pool);
250 	if (ret == QDF_STATUS_E_FAILURE) {
251 		qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
252 					     (nbuf_frag_info_t->virt_addr).nbuf,
253 					     QDF_DMA_FROM_DEVICE,
254 					     rx_desc_pool->buf_size);
255 		DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
256 		return QDF_STATUS_E_ADDRNOTAVAIL;
257 	}
258 
259 	return QDF_STATUS_SUCCESS;
260 }
261 
262 /*
263  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
264  *			       called during dp rx initialization
265  *			       and at the end of dp_rx_process.
266  *
267  * @soc: core txrx main context
268  * @mac_id: mac_id which is one of 3 mac_ids
269  * @dp_rxdma_srng: dp rxdma circular ring
270  * @rx_desc_pool: Pointer to free Rx descriptor pool
271  * @num_req_buffers: number of buffer to be replenished
272  * @desc_list: list of descs if called from dp_rx_process
273  *	       or NULL during dp rx initialization or out of buffer
274  *	       interrupt.
275  * @tail: tail of descs list
276  * @func_name: name of the caller function
277  * Return: return success or failure
278  */
279 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
280 				struct dp_srng *dp_rxdma_srng,
281 				struct rx_desc_pool *rx_desc_pool,
282 				uint32_t num_req_buffers,
283 				union dp_rx_desc_list_elem_t **desc_list,
284 				union dp_rx_desc_list_elem_t **tail,
285 				const char *func_name)
286 {
287 	uint32_t num_alloc_desc;
288 	uint16_t num_desc_to_free = 0;
289 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
290 	uint32_t num_entries_avail;
291 	uint32_t count;
292 	int sync_hw_ptr = 1;
293 	struct dp_rx_nbuf_frag_info nbuf_frag_info = {0};
294 	void *rxdma_ring_entry;
295 	union dp_rx_desc_list_elem_t *next;
296 	QDF_STATUS ret;
297 	void *rxdma_srng;
298 
299 	rxdma_srng = dp_rxdma_srng->hal_srng;
300 
301 	if (!rxdma_srng) {
302 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
303 				  "rxdma srng not initialized");
304 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
305 		return QDF_STATUS_E_FAILURE;
306 	}
307 
308 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
309 		"requested %d buffers for replenish", num_req_buffers);
310 
311 	hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
312 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
313 						   rxdma_srng,
314 						   sync_hw_ptr);
315 
316 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
317 		"no of available entries in rxdma ring: %d",
318 		num_entries_avail);
319 
320 	if (!(*desc_list) && (num_entries_avail >
321 		((dp_rxdma_srng->num_entries * 3) / 4))) {
322 		num_req_buffers = num_entries_avail;
323 	} else if (num_entries_avail < num_req_buffers) {
324 		num_desc_to_free = num_req_buffers - num_entries_avail;
325 		num_req_buffers = num_entries_avail;
326 	}
327 
328 	if (qdf_unlikely(!num_req_buffers)) {
329 		num_desc_to_free = num_req_buffers;
330 		hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
331 		goto free_descs;
332 	}
333 
334 	/*
335 	 * if desc_list is NULL, allocate the descs from freelist
336 	 */
337 	if (!(*desc_list)) {
338 		num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
339 							  rx_desc_pool,
340 							  num_req_buffers,
341 							  desc_list,
342 							  tail);
343 
344 		if (!num_alloc_desc) {
345 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
346 				"no free rx_descs in freelist");
347 			DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
348 					num_req_buffers);
349 			hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
350 			return QDF_STATUS_E_NOMEM;
351 		}
352 
353 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
354 			"%d rx desc allocated", num_alloc_desc);
355 		num_req_buffers = num_alloc_desc;
356 	}
357 
358 
359 	count = 0;
360 
361 	while (count < num_req_buffers) {
362 		/* Flag is set while pdev rx_desc_pool initialization */
363 		if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
364 			ret = dp_pdev_frag_alloc_and_map(dp_soc,
365 							 &nbuf_frag_info,
366 							 dp_pdev,
367 							 rx_desc_pool);
368 		else
369 			ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc,
370 								   mac_id,
371 					num_entries_avail, &nbuf_frag_info,
372 					dp_pdev, rx_desc_pool);
373 
374 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
375 			if (qdf_unlikely(ret  == QDF_STATUS_E_FAULT))
376 				continue;
377 			break;
378 		}
379 
380 		count++;
381 
382 		rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
383 							 rxdma_srng);
384 		qdf_assert_always(rxdma_ring_entry);
385 
386 		next = (*desc_list)->next;
387 
388 		/* Flag is set while pdev rx_desc_pool initialization */
389 		if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
390 			dp_rx_desc_frag_prep(&((*desc_list)->rx_desc),
391 					     &nbuf_frag_info);
392 		else
393 			dp_rx_desc_prep(&((*desc_list)->rx_desc),
394 					&nbuf_frag_info);
395 
396 		/* rx_desc.in_use should be zero at this time*/
397 		qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
398 
399 		(*desc_list)->rx_desc.in_use = 1;
400 		(*desc_list)->rx_desc.in_err_state = 0;
401 		dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc,
402 					   func_name, RX_DESC_REPLENISHED);
403 		dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d",
404 				 nbuf_frag_info.virt_addr.nbuf,
405 				 (unsigned long long)(nbuf_frag_info.paddr),
406 				 (*desc_list)->rx_desc.cookie);
407 
408 		hal_rxdma_buff_addr_info_set(rxdma_ring_entry,
409 					     nbuf_frag_info.paddr,
410 						(*desc_list)->rx_desc.cookie,
411 						rx_desc_pool->owner);
412 
413 		*desc_list = next;
414 
415 	}
416 
417 	hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
418 
419 	dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
420 			 count, num_desc_to_free);
421 
422 	/* No need to count the number of bytes received during replenish.
423 	 * Therefore set replenish.pkts.bytes as 0.
424 	 */
425 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
426 
427 free_descs:
428 	DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
429 	/*
430 	 * add any available free desc back to the free list
431 	 */
432 	if (*desc_list)
433 		dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
434 			mac_id, rx_desc_pool);
435 
436 	return QDF_STATUS_SUCCESS;
437 }
438 
439 /*
440  * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
441  *				pkts to RAW mode simulation to
442  *				decapsulate the pkt.
443  *
444  * @vdev: vdev on which RAW mode is enabled
445  * @nbuf_list: list of RAW pkts to process
446  * @peer: peer object from which the pkt is rx
447  *
448  * Return: void
449  */
450 void
451 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
452 					struct dp_peer *peer)
453 {
454 	qdf_nbuf_t deliver_list_head = NULL;
455 	qdf_nbuf_t deliver_list_tail = NULL;
456 	qdf_nbuf_t nbuf;
457 
458 	nbuf = nbuf_list;
459 	while (nbuf) {
460 		qdf_nbuf_t next = qdf_nbuf_next(nbuf);
461 
462 		DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
463 
464 		DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
465 		DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf));
466 		/*
467 		 * reset the chfrag_start and chfrag_end bits in nbuf cb
468 		 * as this is a non-amsdu pkt and RAW mode simulation expects
469 		 * these bit s to be 0 for non-amsdu pkt.
470 		 */
471 		if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
472 			 qdf_nbuf_is_rx_chfrag_end(nbuf)) {
473 			qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
474 			qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
475 		}
476 
477 		nbuf = next;
478 	}
479 
480 	vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
481 				 &deliver_list_tail, peer->mac_addr.raw);
482 
483 	vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
484 }
485 
486 #ifndef FEATURE_WDS
487 static void
488 dp_rx_da_learn(struct dp_soc *soc,
489 	       uint8_t *rx_tlv_hdr,
490 	       struct dp_peer *ta_peer,
491 	       qdf_nbuf_t nbuf)
492 {
493 }
494 #endif
495 /*
496  * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
497  *
498  * @soc: core txrx main context
499  * @ta_peer	: source peer entry
500  * @rx_tlv_hdr	: start address of rx tlvs
501  * @nbuf	: nbuf that has to be intrabss forwarded
502  *
503  * Return: bool: true if it is forwarded else false
504  */
505 static bool
506 dp_rx_intrabss_fwd(struct dp_soc *soc,
507 			struct dp_peer *ta_peer,
508 			uint8_t *rx_tlv_hdr,
509 			qdf_nbuf_t nbuf,
510 			struct hal_rx_msdu_metadata msdu_metadata)
511 {
512 	uint16_t len;
513 	uint8_t is_frag;
514 	uint16_t da_peer_id = HTT_INVALID_PEER;
515 	struct dp_peer *da_peer = NULL;
516 	bool is_da_bss_peer = false;
517 	struct dp_ast_entry *ast_entry;
518 	qdf_nbuf_t nbuf_copy;
519 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
520 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
521 	struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
522 					tid_stats.tid_rx_stats[ring_id][tid];
523 
524 	/* check if the destination peer is available in peer table
525 	 * and also check if the source peer and destination peer
526 	 * belong to the same vap and destination peer is not bss peer.
527 	 */
528 
529 	if ((qdf_nbuf_is_da_valid(nbuf) && !qdf_nbuf_is_da_mcbc(nbuf))) {
530 
531 		ast_entry = soc->ast_table[msdu_metadata.da_idx];
532 		if (!ast_entry)
533 			return false;
534 
535 		if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) {
536 			ast_entry->is_active = TRUE;
537 			return false;
538 		}
539 
540 		da_peer_id = ast_entry->peer_id;
541 
542 		if (da_peer_id == HTT_INVALID_PEER)
543 			return false;
544 		/* TA peer cannot be same as peer(DA) on which AST is present
545 		 * this indicates a change in topology and that AST entries
546 		 * are yet to be updated.
547 		 */
548 		if (da_peer_id == ta_peer->peer_id)
549 			return false;
550 
551 		if (ast_entry->vdev_id != ta_peer->vdev->vdev_id)
552 			return false;
553 
554 		da_peer = dp_peer_get_ref_by_id(soc, da_peer_id,
555 						DP_MOD_ID_RX);
556 		if (!da_peer)
557 			return false;
558 		is_da_bss_peer = da_peer->bss_peer;
559 		dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
560 
561 		if (!is_da_bss_peer) {
562 			len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
563 			is_frag = qdf_nbuf_is_frag(nbuf);
564 			memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
565 
566 			/* If the source or destination peer in the isolation
567 			 * list then dont forward instead push to bridge stack.
568 			 */
569 			if (dp_get_peer_isolation(ta_peer) ||
570 			    dp_get_peer_isolation(da_peer))
571 				return false;
572 
573 			/* linearize the nbuf just before we send to
574 			 * dp_tx_send()
575 			 */
576 			if (qdf_unlikely(is_frag)) {
577 				if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
578 					return false;
579 
580 				nbuf = qdf_nbuf_unshare(nbuf);
581 				if (!nbuf) {
582 					DP_STATS_INC_PKT(ta_peer,
583 							 rx.intra_bss.fail,
584 							 1,
585 							 len);
586 					/* return true even though the pkt is
587 					 * not forwarded. Basically skb_unshare
588 					 * failed and we want to continue with
589 					 * next nbuf.
590 					 */
591 					tid_stats->fail_cnt[INTRABSS_DROP]++;
592 					return true;
593 				}
594 			}
595 
596 			if (!dp_tx_send((struct cdp_soc_t *)soc,
597 					ta_peer->vdev->vdev_id, nbuf)) {
598 				DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
599 						 len);
600 				return true;
601 			} else {
602 				DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
603 						len);
604 				tid_stats->fail_cnt[INTRABSS_DROP]++;
605 				return false;
606 			}
607 		}
608 	}
609 	/* if it is a broadcast pkt (eg: ARP) and it is not its own
610 	 * source, then clone the pkt and send the cloned pkt for
611 	 * intra BSS forwarding and original pkt up the network stack
612 	 * Note: how do we handle multicast pkts. do we forward
613 	 * all multicast pkts as is or let a higher layer module
614 	 * like igmpsnoop decide whether to forward or not with
615 	 * Mcast enhancement.
616 	 */
617 	else if (qdf_unlikely((qdf_nbuf_is_da_mcbc(nbuf) &&
618 			       !ta_peer->bss_peer))) {
619 		if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
620 			goto end;
621 
622 		/* If the source peer in the isolation list
623 		 * then dont forward instead push to bridge stack
624 		 */
625 		if (dp_get_peer_isolation(ta_peer))
626 			goto end;
627 
628 		nbuf_copy = qdf_nbuf_copy(nbuf);
629 		if (!nbuf_copy)
630 			goto end;
631 
632 		len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
633 		memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
634 
635 		/* Set cb->ftype to intrabss FWD */
636 		qdf_nbuf_set_tx_ftype(nbuf_copy, CB_FTYPE_INTRABSS_FWD);
637 		if (dp_tx_send((struct cdp_soc_t *)soc,
638 			       ta_peer->vdev->vdev_id, nbuf_copy)) {
639 			DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
640 			tid_stats->fail_cnt[INTRABSS_DROP]++;
641 			qdf_nbuf_free(nbuf_copy);
642 		} else {
643 			DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
644 			tid_stats->intrabss_cnt++;
645 		}
646 	}
647 
648 end:
649 	/* return false as we have to still send the original pkt
650 	 * up the stack
651 	 */
652 	return false;
653 }
654 
655 #ifdef MESH_MODE_SUPPORT
656 
657 /**
658  * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
659  *
660  * @vdev: DP Virtual device handle
661  * @nbuf: Buffer pointer
662  * @rx_tlv_hdr: start of rx tlv header
663  * @peer: pointer to peer
664  *
665  * This function allocated memory for mesh receive stats and fill the
666  * required stats. Stores the memory address in skb cb.
667  *
668  * Return: void
669  */
670 
671 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
672 				uint8_t *rx_tlv_hdr, struct dp_peer *peer)
673 {
674 	struct mesh_recv_hdr_s *rx_info = NULL;
675 	uint32_t pkt_type;
676 	uint32_t nss;
677 	uint32_t rate_mcs;
678 	uint32_t bw;
679 	uint8_t primary_chan_num;
680 	uint32_t center_chan_freq;
681 	struct dp_soc *soc;
682 
683 	/* fill recv mesh stats */
684 	rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
685 
686 	/* upper layers are resposible to free this memory */
687 
688 	if (!rx_info) {
689 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
690 			"Memory allocation failed for mesh rx stats");
691 		DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
692 		return;
693 	}
694 
695 	rx_info->rs_flags = MESH_RXHDR_VER1;
696 	if (qdf_nbuf_is_rx_chfrag_start(nbuf))
697 		rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
698 
699 	if (qdf_nbuf_is_rx_chfrag_end(nbuf))
700 		rx_info->rs_flags |= MESH_RX_LAST_MSDU;
701 
702 	if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) {
703 		rx_info->rs_flags |= MESH_RX_DECRYPTED;
704 		rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr);
705 		if (vdev->osif_get_key)
706 			vdev->osif_get_key(vdev->osif_vdev,
707 					&rx_info->rs_decryptkey[0],
708 					&peer->mac_addr.raw[0],
709 					rx_info->rs_keyix);
710 	}
711 
712 	rx_info->rs_rssi = peer->stats.rx.rssi;
713 
714 	soc = vdev->pdev->soc;
715 	primary_chan_num = hal_rx_msdu_start_get_freq(rx_tlv_hdr);
716 	center_chan_freq = hal_rx_msdu_start_get_freq(rx_tlv_hdr) >> 16;
717 
718 	if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) {
719 		rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band(
720 							soc->ctrl_psoc,
721 							vdev->pdev->pdev_id,
722 							center_chan_freq);
723 	}
724 	rx_info->rs_channel = primary_chan_num;
725 	pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
726 	rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
727 	bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
728 	nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr);
729 	rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
730 				(bw << 24);
731 
732 	qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
733 
734 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
735 		FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"),
736 						rx_info->rs_flags,
737 						rx_info->rs_rssi,
738 						rx_info->rs_channel,
739 						rx_info->rs_ratephy1,
740 						rx_info->rs_keyix);
741 
742 }
743 
744 /**
745  * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
746  *
747  * @vdev: DP Virtual device handle
748  * @nbuf: Buffer pointer
749  * @rx_tlv_hdr: start of rx tlv header
750  *
751  * This checks if the received packet is matching any filter out
752  * catogery and and drop the packet if it matches.
753  *
754  * Return: status(0 indicates drop, 1 indicate to no drop)
755  */
756 
757 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
758 					uint8_t *rx_tlv_hdr)
759 {
760 	union dp_align_mac_addr mac_addr;
761 	struct dp_soc *soc = vdev->pdev->soc;
762 
763 	if (qdf_unlikely(vdev->mesh_rx_filter)) {
764 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
765 			if (hal_rx_mpdu_get_fr_ds(soc->hal_soc,
766 						  rx_tlv_hdr))
767 				return  QDF_STATUS_SUCCESS;
768 
769 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
770 			if (hal_rx_mpdu_get_to_ds(soc->hal_soc,
771 						  rx_tlv_hdr))
772 				return  QDF_STATUS_SUCCESS;
773 
774 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
775 			if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc,
776 						   rx_tlv_hdr) &&
777 			    !hal_rx_mpdu_get_to_ds(soc->hal_soc,
778 						   rx_tlv_hdr))
779 				return  QDF_STATUS_SUCCESS;
780 
781 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
782 			if (hal_rx_mpdu_get_addr1(soc->hal_soc,
783 						  rx_tlv_hdr,
784 					&mac_addr.raw[0]))
785 				return QDF_STATUS_E_FAILURE;
786 
787 			if (!qdf_mem_cmp(&mac_addr.raw[0],
788 					&vdev->mac_addr.raw[0],
789 					QDF_MAC_ADDR_SIZE))
790 				return  QDF_STATUS_SUCCESS;
791 		}
792 
793 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
794 			if (hal_rx_mpdu_get_addr2(soc->hal_soc,
795 						  rx_tlv_hdr,
796 						  &mac_addr.raw[0]))
797 				return QDF_STATUS_E_FAILURE;
798 
799 			if (!qdf_mem_cmp(&mac_addr.raw[0],
800 					&vdev->mac_addr.raw[0],
801 					QDF_MAC_ADDR_SIZE))
802 				return  QDF_STATUS_SUCCESS;
803 		}
804 	}
805 
806 	return QDF_STATUS_E_FAILURE;
807 }
808 
809 #else
810 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
811 				uint8_t *rx_tlv_hdr, struct dp_peer *peer)
812 {
813 }
814 
815 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
816 					uint8_t *rx_tlv_hdr)
817 {
818 	return QDF_STATUS_E_FAILURE;
819 }
820 
821 #endif
822 
823 #ifdef FEATURE_NAC_RSSI
824 /**
825  * dp_rx_nac_filter(): Function to perform filtering of non-associated
826  * clients
827  * @pdev: DP pdev handle
828  * @rx_pkt_hdr: Rx packet Header
829  *
830  * return: dp_vdev*
831  */
832 static
833 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
834 		uint8_t *rx_pkt_hdr)
835 {
836 	struct ieee80211_frame *wh;
837 	struct dp_neighbour_peer *peer = NULL;
838 
839 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
840 
841 	if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
842 		return NULL;
843 
844 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
845 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
846 				neighbour_peer_list_elem) {
847 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
848 				wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) {
849 			QDF_TRACE(
850 				QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
851 				FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"),
852 				peer->neighbour_peers_macaddr.raw[0],
853 				peer->neighbour_peers_macaddr.raw[1],
854 				peer->neighbour_peers_macaddr.raw[2],
855 				peer->neighbour_peers_macaddr.raw[3],
856 				peer->neighbour_peers_macaddr.raw[4],
857 				peer->neighbour_peers_macaddr.raw[5]);
858 
859 				qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
860 
861 			return pdev->monitor_vdev;
862 		}
863 	}
864 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
865 
866 	return NULL;
867 }
868 
869 /**
870  * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
871  * @soc: DP SOC handle
872  * @mpdu: mpdu for which peer is invalid
873  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
874  * pool_id has same mapping)
875  *
876  * return: integer type
877  */
878 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
879 				   uint8_t mac_id)
880 {
881 	struct dp_invalid_peer_msg msg;
882 	struct dp_vdev *vdev = NULL;
883 	struct dp_pdev *pdev = NULL;
884 	struct ieee80211_frame *wh;
885 	qdf_nbuf_t curr_nbuf, next_nbuf;
886 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
887 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
888 
889 	rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
890 
891 	if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) {
892 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
893 			  "Drop decapped frames");
894 		goto free;
895 	}
896 
897 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
898 
899 	if (!DP_FRAME_IS_DATA(wh)) {
900 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
901 			  "NAWDS valid only for data frames");
902 		goto free;
903 	}
904 
905 	if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
906 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
907 			"Invalid nbuf length");
908 		goto free;
909 	}
910 
911 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
912 
913 	if (!pdev || qdf_unlikely(pdev->is_pdev_down)) {
914 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
915 			  "PDEV %s", !pdev ? "not found" : "down");
916 		goto free;
917 	}
918 
919 	if (pdev->filter_neighbour_peers) {
920 		/* Next Hop scenario not yet handle */
921 		vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
922 		if (vdev) {
923 			dp_rx_mon_deliver(soc, pdev->pdev_id,
924 					  pdev->invalid_peer_head_msdu,
925 					  pdev->invalid_peer_tail_msdu);
926 
927 			pdev->invalid_peer_head_msdu = NULL;
928 			pdev->invalid_peer_tail_msdu = NULL;
929 
930 			return 0;
931 		}
932 	}
933 
934 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
935 
936 		if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
937 				QDF_MAC_ADDR_SIZE) == 0) {
938 			goto out;
939 		}
940 	}
941 
942 	if (!vdev) {
943 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
944 			"VDEV not found");
945 		goto free;
946 	}
947 
948 out:
949 	msg.wh = wh;
950 	qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN);
951 	msg.nbuf = mpdu;
952 	msg.vdev_id = vdev->vdev_id;
953 
954 	/*
955 	 * NOTE: Only valid for HKv1.
956 	 * If smart monitor mode is enabled on RE, we are getting invalid
957 	 * peer frames with RA as STA mac of RE and the TA not matching
958 	 * with any NAC list or the the BSSID.Such frames need to dropped
959 	 * in order to avoid HM_WDS false addition.
960 	 */
961 	if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) {
962 		if (!soc->hw_nac_monitor_support &&
963 		    pdev->filter_neighbour_peers &&
964 		    vdev->opmode == wlan_op_mode_sta) {
965 			QDF_TRACE(QDF_MODULE_ID_DP,
966 				  QDF_TRACE_LEVEL_WARN,
967 				  "Drop inv peer pkts with STA RA:%pm",
968 				   wh->i_addr1);
969 			goto free;
970 		}
971 		pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(
972 				(struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc,
973 				pdev->pdev_id, &msg);
974 	}
975 
976 free:
977 	/* Drop and free packet */
978 	curr_nbuf = mpdu;
979 	while (curr_nbuf) {
980 		next_nbuf = qdf_nbuf_next(curr_nbuf);
981 		qdf_nbuf_free(curr_nbuf);
982 		curr_nbuf = next_nbuf;
983 	}
984 
985 	return 0;
986 }
987 
988 /**
989  * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
990  * @soc: DP SOC handle
991  * @mpdu: mpdu for which peer is invalid
992  * @mpdu_done: if an mpdu is completed
993  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
994  * pool_id has same mapping)
995  *
996  * return: integer type
997  */
998 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
999 					qdf_nbuf_t mpdu, bool mpdu_done,
1000 					uint8_t mac_id)
1001 {
1002 	/* Only trigger the process when mpdu is completed */
1003 	if (mpdu_done)
1004 		dp_rx_process_invalid_peer(soc, mpdu, mac_id);
1005 }
1006 #else
1007 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
1008 				   uint8_t mac_id)
1009 {
1010 	qdf_nbuf_t curr_nbuf, next_nbuf;
1011 	struct dp_pdev *pdev;
1012 	struct dp_vdev *vdev = NULL;
1013 	struct ieee80211_frame *wh;
1014 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
1015 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
1016 
1017 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
1018 
1019 	if (!DP_FRAME_IS_DATA(wh)) {
1020 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP,
1021 				   "only for data frames");
1022 		goto free;
1023 	}
1024 
1025 	if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
1026 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1027 			  "Invalid nbuf length");
1028 		goto free;
1029 	}
1030 
1031 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1032 	if (!pdev) {
1033 		QDF_TRACE(QDF_MODULE_ID_DP,
1034 			  QDF_TRACE_LEVEL_ERROR,
1035 			  "PDEV not found");
1036 		goto free;
1037 	}
1038 
1039 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
1040 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1041 		if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
1042 				QDF_MAC_ADDR_SIZE) == 0) {
1043 			qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1044 			goto out;
1045 		}
1046 	}
1047 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1048 
1049 	if (!vdev) {
1050 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1051 			  "VDEV not found");
1052 		goto free;
1053 	}
1054 
1055 out:
1056 	if (soc->cdp_soc.ol_ops->rx_invalid_peer)
1057 		soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh);
1058 free:
1059 	/* reset the head and tail pointers */
1060 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1061 	if (pdev) {
1062 		pdev->invalid_peer_head_msdu = NULL;
1063 		pdev->invalid_peer_tail_msdu = NULL;
1064 	}
1065 
1066 	/* Drop and free packet */
1067 	curr_nbuf = mpdu;
1068 	while (curr_nbuf) {
1069 		next_nbuf = qdf_nbuf_next(curr_nbuf);
1070 		qdf_nbuf_free(curr_nbuf);
1071 		curr_nbuf = next_nbuf;
1072 	}
1073 
1074 	/* Reset the head and tail pointers */
1075 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1076 	if (pdev) {
1077 		pdev->invalid_peer_head_msdu = NULL;
1078 		pdev->invalid_peer_tail_msdu = NULL;
1079 	}
1080 
1081 	return 0;
1082 }
1083 
1084 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
1085 					qdf_nbuf_t mpdu, bool mpdu_done,
1086 					uint8_t mac_id)
1087 {
1088 	/* Process the nbuf */
1089 	dp_rx_process_invalid_peer(soc, mpdu, mac_id);
1090 }
1091 #endif
1092 
1093 #ifdef RECEIVE_OFFLOAD
1094 /**
1095  * dp_rx_print_offload_info() - Print offload info from RX TLV
1096  * @soc: dp soc handle
1097  * @rx_tlv: RX TLV for which offload information is to be printed
1098  *
1099  * Return: None
1100  */
1101 static void dp_rx_print_offload_info(struct dp_soc *soc, uint8_t *rx_tlv)
1102 {
1103 	dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------");
1104 	dp_verbose_debug("lro_eligible 0x%x", HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv));
1105 	dp_verbose_debug("pure_ack 0x%x", HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv));
1106 	dp_verbose_debug("chksum 0x%x", hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
1107 								  rx_tlv));
1108 	dp_verbose_debug("TCP seq num 0x%x", HAL_RX_TLV_GET_TCP_SEQ(rx_tlv));
1109 	dp_verbose_debug("TCP ack num 0x%x", HAL_RX_TLV_GET_TCP_ACK(rx_tlv));
1110 	dp_verbose_debug("TCP window 0x%x", HAL_RX_TLV_GET_TCP_WIN(rx_tlv));
1111 	dp_verbose_debug("TCP protocol 0x%x", HAL_RX_TLV_GET_TCP_PROTO(rx_tlv));
1112 	dp_verbose_debug("TCP offset 0x%x", HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv));
1113 	dp_verbose_debug("toeplitz 0x%x", HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv));
1114 	dp_verbose_debug("---------------------------------------------------------");
1115 }
1116 
1117 /**
1118  * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
1119  * @soc: DP SOC handle
1120  * @rx_tlv: RX TLV received for the msdu
1121  * @msdu: msdu for which GRO info needs to be filled
1122  * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
1123  *
1124  * Return: None
1125  */
1126 static
1127 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
1128 			 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
1129 {
1130 	if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
1131 		return;
1132 
1133 	/* Filling up RX offload info only for TCP packets */
1134 	if (!HAL_RX_TLV_GET_TCP_PROTO(rx_tlv))
1135 		return;
1136 
1137 	*rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1;
1138 
1139 	QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) =
1140 		 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv);
1141 	QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) =
1142 			HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv);
1143 	QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
1144 			hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
1145 						  rx_tlv);
1146 	QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) =
1147 			 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv);
1148 	QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) =
1149 			 HAL_RX_TLV_GET_TCP_ACK(rx_tlv);
1150 	QDF_NBUF_CB_RX_TCP_WIN(msdu) =
1151 			 HAL_RX_TLV_GET_TCP_WIN(rx_tlv);
1152 	QDF_NBUF_CB_RX_TCP_PROTO(msdu) =
1153 			 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv);
1154 	QDF_NBUF_CB_RX_IPV6_PROTO(msdu) =
1155 			 HAL_RX_TLV_GET_IPV6(rx_tlv);
1156 	QDF_NBUF_CB_RX_TCP_OFFSET(msdu) =
1157 			 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv);
1158 	QDF_NBUF_CB_RX_FLOW_ID(msdu) =
1159 			 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv);
1160 
1161 	dp_rx_print_offload_info(soc, rx_tlv);
1162 }
1163 #else
1164 static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
1165 				qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
1166 {
1167 }
1168 #endif /* RECEIVE_OFFLOAD */
1169 
1170 /**
1171  * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
1172  *
1173  * @nbuf: pointer to msdu.
1174  * @mpdu_len: mpdu length
1175  *
1176  * Return: returns true if nbuf is last msdu of mpdu else retuns false.
1177  */
1178 static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len)
1179 {
1180 	bool last_nbuf;
1181 
1182 	if (*mpdu_len > (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN)) {
1183 		qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE);
1184 		last_nbuf = false;
1185 	} else {
1186 		qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN));
1187 		last_nbuf = true;
1188 	}
1189 
1190 	*mpdu_len -= (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN);
1191 
1192 	return last_nbuf;
1193 }
1194 
1195 /**
1196  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
1197  *		     multiple nbufs.
1198  * @soc: DP SOC handle
1199  * @nbuf: pointer to the first msdu of an amsdu.
1200  *
1201  * This function implements the creation of RX frag_list for cases
1202  * where an MSDU is spread across multiple nbufs.
1203  *
1204  * Return: returns the head nbuf which contains complete frag_list.
1205  */
1206 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf)
1207 {
1208 	qdf_nbuf_t parent, frag_list, next = NULL;
1209 	uint16_t frag_list_len = 0;
1210 	uint16_t mpdu_len;
1211 	bool last_nbuf;
1212 
1213 	/*
1214 	 * Use msdu len got from REO entry descriptor instead since
1215 	 * there is case the RX PKT TLV is corrupted while msdu_len
1216 	 * from REO descriptor is right for non-raw RX scatter msdu.
1217 	 */
1218 	mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1219 	/*
1220 	 * this is a case where the complete msdu fits in one single nbuf.
1221 	 * in this case HW sets both start and end bit and we only need to
1222 	 * reset these bits for RAW mode simulator to decap the pkt
1223 	 */
1224 	if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
1225 					qdf_nbuf_is_rx_chfrag_end(nbuf)) {
1226 		qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN);
1227 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1228 		return nbuf;
1229 	}
1230 
1231 	/*
1232 	 * This is a case where we have multiple msdus (A-MSDU) spread across
1233 	 * multiple nbufs. here we create a fraglist out of these nbufs.
1234 	 *
1235 	 * the moment we encounter a nbuf with continuation bit set we
1236 	 * know for sure we have an MSDU which is spread across multiple
1237 	 * nbufs. We loop through and reap nbufs till we reach last nbuf.
1238 	 */
1239 	parent = nbuf;
1240 	frag_list = nbuf->next;
1241 	nbuf = nbuf->next;
1242 
1243 	/*
1244 	 * set the start bit in the first nbuf we encounter with continuation
1245 	 * bit set. This has the proper mpdu length set as it is the first
1246 	 * msdu of the mpdu. this becomes the parent nbuf and the subsequent
1247 	 * nbufs will form the frag_list of the parent nbuf.
1248 	 */
1249 	qdf_nbuf_set_rx_chfrag_start(parent, 1);
1250 	last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len);
1251 
1252 	/*
1253 	 * HW issue:  MSDU cont bit is set but reported MPDU length can fit
1254 	 * in to single buffer
1255 	 *
1256 	 * Increment error stats and avoid SG list creation
1257 	 */
1258 	if (last_nbuf) {
1259 		DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1);
1260 		qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN);
1261 		return parent;
1262 	}
1263 
1264 	/*
1265 	 * this is where we set the length of the fragments which are
1266 	 * associated to the parent nbuf. We iterate through the frag_list
1267 	 * till we hit the last_nbuf of the list.
1268 	 */
1269 	do {
1270 		last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len);
1271 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1272 		frag_list_len += qdf_nbuf_len(nbuf);
1273 
1274 		if (last_nbuf) {
1275 			next = nbuf->next;
1276 			nbuf->next = NULL;
1277 			break;
1278 		}
1279 
1280 		nbuf = nbuf->next;
1281 	} while (!last_nbuf);
1282 
1283 	qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
1284 	qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
1285 	parent->next = next;
1286 
1287 	qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN);
1288 	return parent;
1289 }
1290 
1291 #ifdef QCA_PEER_EXT_STATS
1292 /*
1293  * dp_rx_compute_tid_delay - Computer per TID delay stats
1294  * @peer: DP soc context
1295  * @nbuf: NBuffer
1296  *
1297  * Return: Void
1298  */
1299 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
1300 			     qdf_nbuf_t nbuf)
1301 {
1302 	struct cdp_delay_rx_stats  *rx_delay = &stats->rx_delay;
1303 	uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
1304 
1305 	dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack);
1306 }
1307 #endif /* QCA_PEER_EXT_STATS */
1308 
1309 /**
1310  * dp_rx_compute_delay() - Compute and fill in all timestamps
1311  *				to pass in correct fields
1312  *
1313  * @vdev: pdev handle
1314  * @tx_desc: tx descriptor
1315  * @tid: tid value
1316  * Return: none
1317  */
1318 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1319 {
1320 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
1321 	int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get());
1322 	uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
1323 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
1324 	uint32_t interframe_delay =
1325 		(uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp);
1326 
1327 	dp_update_delay_stats(vdev->pdev, to_stack, tid,
1328 			      CDP_DELAY_STATS_REAP_STACK, ring_id);
1329 	/*
1330 	 * Update interframe delay stats calculated at deliver_data_ol point.
1331 	 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so
1332 	 * interframe delay will not be calculate correctly for 1st frame.
1333 	 * On the other side, this will help in avoiding extra per packet check
1334 	 * of vdev->prev_rx_deliver_tstamp.
1335 	 */
1336 	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
1337 			      CDP_DELAY_STATS_RX_INTERFRAME, ring_id);
1338 	vdev->prev_rx_deliver_tstamp = current_ts;
1339 }
1340 
1341 /**
1342  * dp_rx_drop_nbuf_list() - drop an nbuf list
1343  * @pdev: dp pdev reference
1344  * @buf_list: buffer list to be dropepd
1345  *
1346  * Return: int (number of bufs dropped)
1347  */
1348 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
1349 				       qdf_nbuf_t buf_list)
1350 {
1351 	struct cdp_tid_rx_stats *stats = NULL;
1352 	uint8_t tid = 0, ring_id = 0;
1353 	int num_dropped = 0;
1354 	qdf_nbuf_t buf, next_buf;
1355 
1356 	buf = buf_list;
1357 	while (buf) {
1358 		ring_id = QDF_NBUF_CB_RX_CTX_ID(buf);
1359 		next_buf = qdf_nbuf_queue_next(buf);
1360 		tid = qdf_nbuf_get_tid_val(buf);
1361 		if (qdf_likely(pdev)) {
1362 			stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
1363 			stats->fail_cnt[INVALID_PEER_VDEV]++;
1364 			stats->delivered_to_stack--;
1365 		}
1366 		qdf_nbuf_free(buf);
1367 		buf = next_buf;
1368 		num_dropped++;
1369 	}
1370 
1371 	return num_dropped;
1372 }
1373 
1374 #ifdef QCA_SUPPORT_WDS_EXTENDED
1375 /**
1376  * dp_rx_wds_ext() - Make different lists for 4-address and 3-address frames
1377  * @nbuf_head: skb list head
1378  * @vdev: vdev
1379  * @peer: peer
1380  * @peer_id: peer id of new received frame
1381  * @vdev_id: vdev_id of new received frame
1382  *
1383  * Return: true if peer_ids are different.
1384  */
1385 static inline bool
1386 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
1387 		    struct dp_vdev *vdev,
1388 		    struct dp_peer *peer,
1389 		    uint16_t peer_id,
1390 		    uint8_t vdev_id)
1391 {
1392 	if (nbuf_head && peer && (peer->peer_id != peer_id))
1393 		return true;
1394 
1395 	return false;
1396 }
1397 
1398 /**
1399  * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta
1400  * @soc: core txrx main context
1401  * @vdev: vdev
1402  * @peer: peer
1403  * @nbuf_head: skb list head
1404  *
1405  * Return: true if packet is delivered to netdev per STA.
1406  */
1407 static inline bool
1408 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
1409 			   struct dp_peer *peer, qdf_nbuf_t nbuf_head)
1410 {
1411 	/*
1412 	 * When extended WDS is disabled, frames are sent to AP netdevice.
1413 	 */
1414 	if (qdf_likely(!vdev->wds_ext_enabled))
1415 		return false;
1416 
1417 	/*
1418 	 * There can be 2 cases:
1419 	 * 1. Send frame to parent netdev if its not for netdev per STA
1420 	 * 2. If frame is meant for netdev per STA:
1421 	 *    a. Send frame to appropriate netdev using registered fp.
1422 	 *    b. If fp is NULL, drop the frames.
1423 	 */
1424 	if (!peer->wds_ext.init)
1425 		return false;
1426 
1427 	if (peer->osif_rx)
1428 		peer->osif_rx(peer->wds_ext.osif_peer, nbuf_head);
1429 	else
1430 		dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
1431 
1432 	return true;
1433 }
1434 
1435 #else
1436 static inline bool
1437 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
1438 		    struct dp_vdev *vdev,
1439 		    struct dp_peer *peer,
1440 		    uint16_t peer_id,
1441 		    uint8_t vdev_id)
1442 {
1443 	if (nbuf_head && vdev && (vdev->vdev_id != vdev_id))
1444 		return true;
1445 
1446 	return false;
1447 }
1448 
1449 static inline bool
1450 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
1451 			   struct dp_peer *peer, qdf_nbuf_t nbuf_head)
1452 {
1453 	return false;
1454 }
1455 #endif
1456 
1457 #ifdef PEER_CACHE_RX_PKTS
1458 /**
1459  * dp_rx_flush_rx_cached() - flush cached rx frames
1460  * @peer: peer
1461  * @drop: flag to drop frames or forward to net stack
1462  *
1463  * Return: None
1464  */
1465 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
1466 {
1467 	struct dp_peer_cached_bufq *bufqi;
1468 	struct dp_rx_cached_buf *cache_buf = NULL;
1469 	ol_txrx_rx_fp data_rx = NULL;
1470 	int num_buff_elem;
1471 	QDF_STATUS status;
1472 
1473 	if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
1474 		qdf_atomic_dec(&peer->flush_in_progress);
1475 		return;
1476 	}
1477 
1478 	qdf_spin_lock_bh(&peer->peer_info_lock);
1479 	if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx)
1480 		data_rx = peer->vdev->osif_rx;
1481 	else
1482 		drop = true;
1483 	qdf_spin_unlock_bh(&peer->peer_info_lock);
1484 
1485 	bufqi = &peer->bufq_info;
1486 
1487 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1488 	qdf_list_remove_front(&bufqi->cached_bufq,
1489 			      (qdf_list_node_t **)&cache_buf);
1490 	while (cache_buf) {
1491 		num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(
1492 								cache_buf->buf);
1493 		bufqi->entries -= num_buff_elem;
1494 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
1495 		if (drop) {
1496 			bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1497 							      cache_buf->buf);
1498 		} else {
1499 			/* Flush the cached frames to OSIF DEV */
1500 			status = data_rx(peer->vdev->osif_vdev, cache_buf->buf);
1501 			if (status != QDF_STATUS_SUCCESS)
1502 				bufqi->dropped = dp_rx_drop_nbuf_list(
1503 							peer->vdev->pdev,
1504 							cache_buf->buf);
1505 		}
1506 		qdf_mem_free(cache_buf);
1507 		cache_buf = NULL;
1508 		qdf_spin_lock_bh(&bufqi->bufq_lock);
1509 		qdf_list_remove_front(&bufqi->cached_bufq,
1510 				      (qdf_list_node_t **)&cache_buf);
1511 	}
1512 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1513 	qdf_atomic_dec(&peer->flush_in_progress);
1514 }
1515 
1516 /**
1517  * dp_rx_enqueue_rx() - cache rx frames
1518  * @peer: peer
1519  * @rx_buf_list: cache buffer list
1520  *
1521  * Return: None
1522  */
1523 static QDF_STATUS
1524 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1525 {
1526 	struct dp_rx_cached_buf *cache_buf;
1527 	struct dp_peer_cached_bufq *bufqi = &peer->bufq_info;
1528 	int num_buff_elem;
1529 
1530 	dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries,
1531 		    bufqi->dropped);
1532 	if (!peer->valid) {
1533 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1534 						      rx_buf_list);
1535 		return QDF_STATUS_E_INVAL;
1536 	}
1537 
1538 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1539 	if (bufqi->entries >= bufqi->thresh) {
1540 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1541 						      rx_buf_list);
1542 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
1543 		return QDF_STATUS_E_RESOURCES;
1544 	}
1545 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1546 
1547 	num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list);
1548 
1549 	cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf));
1550 	if (!cache_buf) {
1551 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1552 			  "Failed to allocate buf to cache rx frames");
1553 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1554 						      rx_buf_list);
1555 		return QDF_STATUS_E_NOMEM;
1556 	}
1557 
1558 	cache_buf->buf = rx_buf_list;
1559 
1560 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1561 	qdf_list_insert_back(&bufqi->cached_bufq,
1562 			     &cache_buf->node);
1563 	bufqi->entries += num_buff_elem;
1564 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1565 
1566 	return QDF_STATUS_SUCCESS;
1567 }
1568 
1569 static inline
1570 bool dp_rx_is_peer_cache_bufq_supported(void)
1571 {
1572 	return true;
1573 }
1574 #else
1575 static inline
1576 bool dp_rx_is_peer_cache_bufq_supported(void)
1577 {
1578 	return false;
1579 }
1580 
1581 static inline QDF_STATUS
1582 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1583 {
1584 	return QDF_STATUS_SUCCESS;
1585 }
1586 #endif
1587 
1588 #ifndef DELIVERY_TO_STACK_STATUS_CHECK
1589 /**
1590  * dp_rx_check_delivery_to_stack() - Deliver pkts to network
1591  * using the appropriate call back functions.
1592  * @soc: soc
1593  * @vdev: vdev
1594  * @peer: peer
1595  * @nbuf_head: skb list head
1596  * @nbuf_tail: skb list tail
1597  *
1598  * Return: None
1599  */
1600 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
1601 					  struct dp_vdev *vdev,
1602 					  struct dp_peer *peer,
1603 					  qdf_nbuf_t nbuf_head)
1604 {
1605 	if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev,
1606 						    peer, nbuf_head)))
1607 		return;
1608 
1609 	/* Function pointer initialized only when FISA is enabled */
1610 	if (vdev->osif_fisa_rx)
1611 		/* on failure send it via regular path */
1612 		vdev->osif_fisa_rx(soc, vdev, nbuf_head);
1613 	else
1614 		vdev->osif_rx(vdev->osif_vdev, nbuf_head);
1615 }
1616 
1617 #else
1618 /**
1619  * dp_rx_check_delivery_to_stack() - Deliver pkts to network
1620  * using the appropriate call back functions.
1621  * @soc: soc
1622  * @vdev: vdev
1623  * @peer: peer
1624  * @nbuf_head: skb list head
1625  * @nbuf_tail: skb list tail
1626  *
1627  * Check the return status of the call back function and drop
1628  * the packets if the return status indicates a failure.
1629  *
1630  * Return: None
1631  */
1632 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
1633 					  struct dp_vdev *vdev,
1634 					  struct dp_peer *peer,
1635 					  qdf_nbuf_t nbuf_head)
1636 {
1637 	int num_nbuf = 0;
1638 	QDF_STATUS ret_val = QDF_STATUS_E_FAILURE;
1639 
1640 	/* Function pointer initialized only when FISA is enabled */
1641 	if (vdev->osif_fisa_rx)
1642 		/* on failure send it via regular path */
1643 		ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head);
1644 	else if (vdev->osif_rx)
1645 		ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head);
1646 
1647 	if (!QDF_IS_STATUS_SUCCESS(ret_val)) {
1648 		num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
1649 		DP_STATS_INC(soc, rx.err.rejected, num_nbuf);
1650 		if (peer)
1651 			DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf);
1652 	}
1653 }
1654 #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */
1655 
1656 void dp_rx_deliver_to_stack(struct dp_soc *soc,
1657 			    struct dp_vdev *vdev,
1658 			    struct dp_peer *peer,
1659 			    qdf_nbuf_t nbuf_head,
1660 			    qdf_nbuf_t nbuf_tail)
1661 {
1662 	int num_nbuf = 0;
1663 
1664 	if (qdf_unlikely(!vdev || vdev->delete.pending)) {
1665 		num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head);
1666 		/*
1667 		 * This is a special case where vdev is invalid,
1668 		 * so we cannot know the pdev to which this packet
1669 		 * belonged. Hence we update the soc rx error stats.
1670 		 */
1671 		DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf);
1672 		return;
1673 	}
1674 
1675 	/*
1676 	 * highly unlikely to have a vdev without a registered rx
1677 	 * callback function. if so let us free the nbuf_list.
1678 	 */
1679 	if (qdf_unlikely(!vdev->osif_rx)) {
1680 		if (peer && dp_rx_is_peer_cache_bufq_supported()) {
1681 			dp_rx_enqueue_rx(peer, nbuf_head);
1682 		} else {
1683 			num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev,
1684 							nbuf_head);
1685 			DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf);
1686 		}
1687 		return;
1688 	}
1689 
1690 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
1691 			(vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
1692 		vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
1693 				&nbuf_tail, peer->mac_addr.raw);
1694 	}
1695 
1696 	dp_rx_check_delivery_to_stack(soc, vdev, peer, nbuf_head);
1697 }
1698 
1699 /**
1700  * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
1701  * @nbuf: pointer to the first msdu of an amsdu.
1702  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1703  *
1704  * The ipsumed field of the skb is set based on whether HW validated the
1705  * IP/TCP/UDP checksum.
1706  *
1707  * Return: void
1708  */
1709 static inline void dp_rx_cksum_offload(struct dp_pdev *pdev,
1710 				       qdf_nbuf_t nbuf,
1711 				       uint8_t *rx_tlv_hdr)
1712 {
1713 	qdf_nbuf_rx_cksum_t cksum = {0};
1714 	bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr);
1715 	bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr);
1716 
1717 	if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) {
1718 		cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
1719 		qdf_nbuf_set_rx_cksum(nbuf, &cksum);
1720 	} else {
1721 		DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err);
1722 		DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er);
1723 	}
1724 }
1725 
1726 #ifdef VDEV_PEER_PROTOCOL_COUNT
1727 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer) \
1728 { \
1729 	qdf_nbuf_t nbuf_local; \
1730 	struct dp_peer *peer_local; \
1731 	struct dp_vdev *vdev_local = vdev_hdl; \
1732 	do { \
1733 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
1734 			break; \
1735 		nbuf_local = nbuf; \
1736 		peer_local = peer; \
1737 		if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \
1738 			break; \
1739 		else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \
1740 			break; \
1741 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
1742 						       (nbuf_local), \
1743 						       (peer_local), 0, 1); \
1744 	} while (0); \
1745 }
1746 #else
1747 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer)
1748 #endif
1749 
1750 /**
1751  * dp_rx_msdu_stats_update() - update per msdu stats.
1752  * @soc: core txrx main context
1753  * @nbuf: pointer to the first msdu of an amsdu.
1754  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1755  * @peer: pointer to the peer object.
1756  * @ring_id: reo dest ring number on which pkt is reaped.
1757  * @tid_stats: per tid rx stats.
1758  *
1759  * update all the per msdu stats for that nbuf.
1760  * Return: void
1761  */
1762 static void dp_rx_msdu_stats_update(struct dp_soc *soc,
1763 				    qdf_nbuf_t nbuf,
1764 				    uint8_t *rx_tlv_hdr,
1765 				    struct dp_peer *peer,
1766 				    uint8_t ring_id,
1767 				    struct cdp_tid_rx_stats *tid_stats)
1768 {
1769 	bool is_ampdu, is_not_amsdu;
1770 	uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
1771 	struct dp_vdev *vdev = peer->vdev;
1772 	qdf_ether_header_t *eh;
1773 	uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1774 
1775 	dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, peer);
1776 	is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
1777 			qdf_nbuf_is_rx_chfrag_end(nbuf);
1778 
1779 	DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len);
1780 	DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu);
1781 	DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu);
1782 	DP_STATS_INCC(peer, rx.rx_retries, 1, qdf_nbuf_is_rx_retry_flag(nbuf));
1783 
1784 	tid_stats->msdu_cnt++;
1785 	if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
1786 			 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
1787 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1788 		DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len);
1789 		tid_stats->mcast_msdu_cnt++;
1790 		if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
1791 			DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len);
1792 			tid_stats->bcast_msdu_cnt++;
1793 		}
1794 	}
1795 
1796 	/*
1797 	 * currently we can return from here as we have similar stats
1798 	 * updated at per ppdu level instead of msdu level
1799 	 */
1800 	if (!soc->process_rx_status)
1801 		return;
1802 
1803 	is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr);
1804 	DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu);
1805 	DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
1806 
1807 	sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr);
1808 	mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
1809 	tid = qdf_nbuf_get_tid_val(nbuf);
1810 	bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
1811 	reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
1812 							      rx_tlv_hdr);
1813 	nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
1814 	pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
1815 
1816 	DP_STATS_INCC(peer, rx.rx_mpdu_cnt[mcs], 1,
1817 		      ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
1818 	DP_STATS_INCC(peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1,
1819 		      ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
1820 	DP_STATS_INC(peer, rx.bw[bw], 1);
1821 	/*
1822 	 * only if nss > 0 and pkt_type is 11N/AC/AX,
1823 	 * then increase index [nss - 1] in array counter.
1824 	 */
1825 	if (nss > 0 && (pkt_type == DOT11_N ||
1826 			pkt_type == DOT11_AC ||
1827 			pkt_type == DOT11_AX))
1828 		DP_STATS_INC(peer, rx.nss[nss - 1], 1);
1829 
1830 	DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
1831 	DP_STATS_INCC(peer, rx.err.mic_err, 1,
1832 		      hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr));
1833 	DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
1834 		      hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr));
1835 
1836 	DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
1837 	DP_STATS_INC(peer, rx.reception_type[reception_type], 1);
1838 
1839 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1840 		      ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1841 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1842 		      ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1843 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1844 		      ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1845 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1846 		      ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1847 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1848 		      ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1849 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1850 		      ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1851 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1852 		      ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1853 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1854 		      ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1855 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1856 		      ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX)));
1857 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1858 		      ((mcs < MAX_MCS) && (pkt_type == DOT11_AX)));
1859 
1860 	if ((soc->process_rx_status) &&
1861 	    hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) {
1862 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
1863 		if (!vdev->pdev)
1864 			return;
1865 
1866 		dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
1867 				     &peer->stats, peer->peer_id,
1868 				     UPDATE_PEER_STATS,
1869 				     vdev->pdev->pdev_id);
1870 #endif
1871 
1872 	}
1873 }
1874 
1875 static inline bool is_sa_da_idx_valid(struct dp_soc *soc,
1876 				      uint8_t *rx_tlv_hdr,
1877 				      qdf_nbuf_t nbuf,
1878 				      struct hal_rx_msdu_metadata msdu_info)
1879 {
1880 	if ((qdf_nbuf_is_sa_valid(nbuf) &&
1881 	    (msdu_info.sa_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) ||
1882 	    (!qdf_nbuf_is_da_mcbc(nbuf) &&
1883 	     qdf_nbuf_is_da_valid(nbuf) &&
1884 	     (msdu_info.da_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))))
1885 		return false;
1886 
1887 	return true;
1888 }
1889 
1890 #ifndef WDS_VENDOR_EXTENSION
1891 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
1892 			   struct dp_vdev *vdev,
1893 			   struct dp_peer *peer)
1894 {
1895 	return 1;
1896 }
1897 #endif
1898 
1899 #ifdef RX_DESC_DEBUG_CHECK
1900 /**
1901  * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
1902  *				  corruption
1903  *
1904  * @ring_desc: REO ring descriptor
1905  * @rx_desc: Rx descriptor
1906  *
1907  * Return: NONE
1908  */
1909 static inline
1910 QDF_STATUS dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc,
1911 					struct dp_rx_desc *rx_desc)
1912 {
1913 	struct hal_buf_info hbi;
1914 
1915 	hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
1916 	/* Sanity check for possible buffer paddr corruption */
1917 	if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
1918 		return QDF_STATUS_SUCCESS;
1919 
1920 	return QDF_STATUS_E_FAILURE;
1921 }
1922 #else
1923 static inline
1924 QDF_STATUS dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc,
1925 					struct dp_rx_desc *rx_desc)
1926 {
1927 	return QDF_STATUS_SUCCESS;
1928 }
1929 #endif
1930 
1931 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1932 static inline
1933 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
1934 {
1935 	bool limit_hit = false;
1936 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
1937 
1938 	limit_hit =
1939 		(num_reaped >= cfg->rx_reap_loop_pkt_limit) ? true : false;
1940 
1941 	if (limit_hit)
1942 		DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1)
1943 
1944 	return limit_hit;
1945 }
1946 
1947 static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1948 {
1949 	return soc->wlan_cfg_ctx->rx_enable_eol_data_check;
1950 }
1951 
1952 #else
1953 static inline
1954 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
1955 {
1956 	return false;
1957 }
1958 
1959 static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1960 {
1961 	return false;
1962 }
1963 
1964 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
1965 
1966 #ifdef DP_RX_PKT_NO_PEER_DELIVER
1967 /**
1968  * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
1969  *				      no corresbonding peer found
1970  * @soc: core txrx main context
1971  * @nbuf: pkt skb pointer
1972  *
1973  * This function will try to deliver some RX special frames to stack
1974  * even there is no peer matched found. for instance, LFR case, some
1975  * eapol data will be sent to host before peer_map done.
1976  *
1977  * Return: None
1978  */
1979 static
1980 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
1981 {
1982 	uint16_t peer_id;
1983 	uint8_t vdev_id;
1984 	struct dp_vdev *vdev = NULL;
1985 	uint32_t l2_hdr_offset = 0;
1986 	uint16_t msdu_len = 0;
1987 	uint32_t pkt_len = 0;
1988 	uint8_t *rx_tlv_hdr;
1989 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
1990 				FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
1991 
1992 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
1993 	if (peer_id > soc->max_peers)
1994 		goto deliver_fail;
1995 
1996 	vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
1997 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX);
1998 	if (!vdev || vdev->delete.pending || !vdev->osif_rx)
1999 		goto deliver_fail;
2000 
2001 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf)))
2002 		goto deliver_fail;
2003 
2004 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
2005 	l2_hdr_offset =
2006 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
2007 
2008 	msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
2009 	pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
2010 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
2011 
2012 	qdf_nbuf_set_pktlen(nbuf, pkt_len);
2013 	qdf_nbuf_pull_head(nbuf,
2014 			   RX_PKT_TLVS_LEN +
2015 			   l2_hdr_offset);
2016 
2017 	if (dp_rx_is_special_frame(nbuf, frame_mask)) {
2018 		qdf_nbuf_set_exc_frame(nbuf, 1);
2019 		if (QDF_STATUS_SUCCESS !=
2020 		    vdev->osif_rx(vdev->osif_vdev, nbuf))
2021 			goto deliver_fail;
2022 		DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1);
2023 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
2024 		return;
2025 	}
2026 
2027 deliver_fail:
2028 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
2029 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
2030 	qdf_nbuf_free(nbuf);
2031 	if (vdev)
2032 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
2033 }
2034 #else
2035 static inline
2036 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
2037 {
2038 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
2039 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
2040 	qdf_nbuf_free(nbuf);
2041 }
2042 #endif
2043 
2044 /**
2045  * dp_rx_srng_get_num_pending() - get number of pending entries
2046  * @hal_soc: hal soc opaque pointer
2047  * @hal_ring: opaque pointer to the HAL Rx Ring
2048  * @num_entries: number of entries in the hal_ring.
2049  * @near_full: pointer to a boolean. This is set if ring is near full.
2050  *
2051  * The function returns the number of entries in a destination ring which are
2052  * yet to be reaped. The function also checks if the ring is near full.
2053  * If more than half of the ring needs to be reaped, the ring is considered
2054  * approaching full.
2055  * The function useses hal_srng_dst_num_valid_locked to get the number of valid
2056  * entries. It should not be called within a SRNG lock. HW pointer value is
2057  * synced into cached_hp.
2058  *
2059  * Return: Number of pending entries if any
2060  */
2061 static
2062 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
2063 				    hal_ring_handle_t hal_ring_hdl,
2064 				    uint32_t num_entries,
2065 				    bool *near_full)
2066 {
2067 	uint32_t num_pending = 0;
2068 
2069 	num_pending = hal_srng_dst_num_valid_locked(hal_soc,
2070 						    hal_ring_hdl,
2071 						    true);
2072 
2073 	if (num_entries && (num_pending >= num_entries >> 1))
2074 		*near_full = true;
2075 	else
2076 		*near_full = false;
2077 
2078 	return num_pending;
2079 }
2080 
2081 #ifdef WLAN_SUPPORT_RX_FISA
2082 void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding)
2083 {
2084 	QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
2085 	qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN);
2086 }
2087 
2088 /**
2089  * dp_rx_set_hdr_pad() - set l3 padding in nbuf cb
2090  * @nbuf: pkt skb pointer
2091  * @l3_padding: l3 padding
2092  *
2093  * Return: None
2094  */
2095 static inline
2096 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
2097 {
2098 	QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
2099 }
2100 #else
2101 void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding)
2102 {
2103 	qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN);
2104 }
2105 
2106 static inline
2107 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
2108 {
2109 }
2110 #endif
2111 
2112 #ifdef DP_RX_DROP_RAW_FRM
2113 /**
2114  * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop
2115  * @nbuf: pkt skb pointer
2116  *
2117  * Return: true - raw frame, dropped
2118  *	   false - not raw frame, do nothing
2119  */
2120 static inline
2121 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
2122 {
2123 	if (qdf_nbuf_is_raw_frame(nbuf)) {
2124 		qdf_nbuf_free(nbuf);
2125 		return true;
2126 	}
2127 
2128 	return false;
2129 }
2130 #else
2131 static inline
2132 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
2133 {
2134 	return false;
2135 }
2136 #endif
2137 
2138 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
2139 /**
2140  * dp_rx_ring_record_entry() - Record an entry into the rx ring history.
2141  * @soc: Datapath soc structure
2142  * @ring_num: REO ring number
2143  * @ring_desc: REO ring descriptor
2144  *
2145  * Returns: None
2146  */
2147 static inline void
2148 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
2149 			hal_ring_desc_t ring_desc)
2150 {
2151 	struct dp_buf_info_record *record;
2152 	uint8_t rbm;
2153 	struct hal_buf_info hbi;
2154 	uint32_t idx;
2155 
2156 	if (qdf_unlikely(!&soc->rx_ring_history[ring_num]))
2157 		return;
2158 
2159 	hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
2160 	rbm = hal_rx_ret_buf_manager_get(ring_desc);
2161 
2162 	idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index,
2163 					DP_RX_HIST_MAX);
2164 
2165 	/* No NULL check needed for record since its an array */
2166 	record = &soc->rx_ring_history[ring_num]->entry[idx];
2167 
2168 	record->timestamp = qdf_get_log_timestamp();
2169 	record->hbi.paddr = hbi.paddr;
2170 	record->hbi.sw_cookie = hbi.sw_cookie;
2171 	record->hbi.rbm = rbm;
2172 }
2173 #else
2174 static inline void
2175 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
2176 			hal_ring_desc_t ring_desc)
2177 {
2178 }
2179 #endif
2180 
2181 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
2182 /**
2183  * dp_rx_update_stats() - Update soc level rx packet count
2184  * @soc: DP soc handle
2185  * @nbuf: nbuf received
2186  *
2187  * Returns: none
2188  */
2189 static inline void dp_rx_update_stats(struct dp_soc *soc,
2190 				      qdf_nbuf_t nbuf)
2191 {
2192 	DP_STATS_INC_PKT(soc, rx.ingress, 1,
2193 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
2194 }
2195 #else
2196 static inline void dp_rx_update_stats(struct dp_soc *soc,
2197 				      qdf_nbuf_t nbuf)
2198 {
2199 }
2200 #endif
2201 /**
2202  * dp_rx_process() - Brain of the Rx processing functionality
2203  *		     Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
2204  * @int_ctx: per interrupt context
2205  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
2206  * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
2207  * @quota: No. of units (packets) that can be serviced in one shot.
2208  *
2209  * This function implements the core of Rx functionality. This is
2210  * expected to handle only non-error frames.
2211  *
2212  * Return: uint32_t: No. of elements processed
2213  */
2214 uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
2215 			    uint8_t reo_ring_num, uint32_t quota)
2216 {
2217 	hal_ring_desc_t ring_desc;
2218 	hal_soc_handle_t hal_soc;
2219 	struct dp_rx_desc *rx_desc = NULL;
2220 	qdf_nbuf_t nbuf, next;
2221 	bool near_full;
2222 	union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT];
2223 	union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT];
2224 	uint32_t num_pending;
2225 	uint32_t rx_bufs_used = 0, rx_buf_cookie;
2226 	uint16_t msdu_len = 0;
2227 	uint16_t peer_id;
2228 	uint8_t vdev_id;
2229 	struct dp_peer *peer;
2230 	struct dp_vdev *vdev;
2231 	uint32_t pkt_len = 0;
2232 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
2233 	struct hal_rx_msdu_desc_info msdu_desc_info;
2234 	enum hal_reo_error_status error;
2235 	uint32_t peer_mdata;
2236 	uint8_t *rx_tlv_hdr;
2237 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT];
2238 	uint8_t mac_id = 0;
2239 	struct dp_pdev *rx_pdev;
2240 	struct dp_srng *dp_rxdma_srng;
2241 	struct rx_desc_pool *rx_desc_pool;
2242 	struct dp_soc *soc = int_ctx->soc;
2243 	uint8_t ring_id = 0;
2244 	uint8_t core_id = 0;
2245 	struct cdp_tid_rx_stats *tid_stats;
2246 	qdf_nbuf_t nbuf_head;
2247 	qdf_nbuf_t nbuf_tail;
2248 	qdf_nbuf_t deliver_list_head;
2249 	qdf_nbuf_t deliver_list_tail;
2250 	uint32_t num_rx_bufs_reaped = 0;
2251 	uint32_t intr_id;
2252 	struct hif_opaque_softc *scn;
2253 	int32_t tid = 0;
2254 	bool is_prev_msdu_last = true;
2255 	uint32_t num_entries_avail = 0;
2256 	uint32_t rx_ol_pkt_cnt = 0;
2257 	uint32_t num_entries = 0;
2258 	struct hal_rx_msdu_metadata msdu_metadata;
2259 	QDF_STATUS status;
2260 	qdf_nbuf_t ebuf_head;
2261 	qdf_nbuf_t ebuf_tail;
2262 
2263 	DP_HIST_INIT();
2264 
2265 	qdf_assert_always(soc && hal_ring_hdl);
2266 	hal_soc = soc->hal_soc;
2267 	qdf_assert_always(hal_soc);
2268 
2269 	scn = soc->hif_handle;
2270 	hif_pm_runtime_mark_dp_rx_busy(scn);
2271 	intr_id = int_ctx->dp_intr_id;
2272 	num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl);
2273 
2274 more_data:
2275 	/* reset local variables here to be re-used in the function */
2276 	nbuf_head = NULL;
2277 	nbuf_tail = NULL;
2278 	deliver_list_head = NULL;
2279 	deliver_list_tail = NULL;
2280 	peer = NULL;
2281 	vdev = NULL;
2282 	num_rx_bufs_reaped = 0;
2283 	ebuf_head = NULL;
2284 	ebuf_tail = NULL;
2285 
2286 	qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped));
2287 	qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
2288 	qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
2289 	qdf_mem_zero(head, sizeof(head));
2290 	qdf_mem_zero(tail, sizeof(tail));
2291 
2292 	if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
2293 
2294 		/*
2295 		 * Need API to convert from hal_ring pointer to
2296 		 * Ring Type / Ring Id combo
2297 		 */
2298 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
2299 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2300 			FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
2301 		goto done;
2302 	}
2303 
2304 	/*
2305 	 * start reaping the buffers from reo ring and queue
2306 	 * them in per vdev queue.
2307 	 * Process the received pkts in a different per vdev loop.
2308 	 */
2309 	while (qdf_likely(quota &&
2310 			  (ring_desc = hal_srng_dst_peek(hal_soc,
2311 							 hal_ring_hdl)))) {
2312 
2313 		error = HAL_RX_ERROR_STATUS_GET(ring_desc);
2314 		ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2315 
2316 		if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
2317 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2318 			FL("HAL RING 0x%pK:error %d"), hal_ring_hdl, error);
2319 			DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1);
2320 			/* Don't know how to deal with this -- assert */
2321 			qdf_assert(0);
2322 		}
2323 
2324 		dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc);
2325 		rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
2326 		status = dp_rx_cookie_check_and_invalidate(ring_desc);
2327 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
2328 			DP_STATS_INC(soc, rx.err.stale_cookie, 1);
2329 			break;
2330 		}
2331 
2332 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
2333 		status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl,
2334 					   ring_desc, rx_desc);
2335 		if (QDF_IS_STATUS_ERROR(status)) {
2336 			if (qdf_unlikely(rx_desc && rx_desc->nbuf)) {
2337 				qdf_assert_always(rx_desc->unmapped);
2338 				dp_ipa_handle_rx_buf_smmu_mapping(
2339 							soc,
2340 							rx_desc->nbuf,
2341 							RX_DATA_BUFFER_SIZE,
2342 							false);
2343 				qdf_nbuf_unmap_nbytes_single(
2344 							soc->osdev,
2345 							rx_desc->nbuf,
2346 							QDF_DMA_FROM_DEVICE,
2347 							RX_DATA_BUFFER_SIZE);
2348 				rx_desc->unmapped = 1;
2349 				dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
2350 							    rx_desc->pool_id);
2351 				dp_rx_add_to_free_desc_list(
2352 							&head[rx_desc->pool_id],
2353 							&tail[rx_desc->pool_id],
2354 							rx_desc);
2355 			}
2356 			hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2357 			continue;
2358 		}
2359 
2360 		/*
2361 		 * this is a unlikely scenario where the host is reaping
2362 		 * a descriptor which it already reaped just a while ago
2363 		 * but is yet to replenish it back to HW.
2364 		 * In this case host will dump the last 128 descriptors
2365 		 * including the software descriptor rx_desc and assert.
2366 		 */
2367 
2368 		if (qdf_unlikely(!rx_desc->in_use)) {
2369 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
2370 			dp_info_rl("Reaping rx_desc not in use!");
2371 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2372 						   ring_desc, rx_desc);
2373 			/* ignore duplicate RX desc and continue to process */
2374 			/* Pop out the descriptor */
2375 			hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2376 			continue;
2377 		}
2378 
2379 		status = dp_rx_desc_nbuf_sanity_check(ring_desc, rx_desc);
2380 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
2381 			DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
2382 			dp_info_rl("Nbuf sanity check failure!");
2383 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2384 						   ring_desc, rx_desc);
2385 			rx_desc->in_err_state = 1;
2386 			hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2387 			continue;
2388 		}
2389 
2390 		if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) {
2391 			dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie);
2392 			DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1);
2393 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2394 						   ring_desc, rx_desc);
2395 		}
2396 
2397 		/* Get MPDU DESC info */
2398 		hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
2399 
2400 		/* Get MSDU DESC info */
2401 		hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info);
2402 
2403 		if (qdf_unlikely(msdu_desc_info.msdu_flags &
2404 				 HAL_MSDU_F_MSDU_CONTINUATION)) {
2405 			/* previous msdu has end bit set, so current one is
2406 			 * the new MPDU
2407 			 */
2408 			if (is_prev_msdu_last) {
2409 				/* Get number of entries available in HW ring */
2410 				num_entries_avail =
2411 				hal_srng_dst_num_valid(hal_soc,
2412 						       hal_ring_hdl, 1);
2413 
2414 				/* For new MPDU check if we can read complete
2415 				 * MPDU by comparing the number of buffers
2416 				 * available and number of buffers needed to
2417 				 * reap this MPDU
2418 				 */
2419 				if (((msdu_desc_info.msdu_len /
2420 				     (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN) +
2421 				     1)) > num_entries_avail) {
2422 					DP_STATS_INC(
2423 						soc,
2424 						rx.msdu_scatter_wait_break,
2425 						1);
2426 					break;
2427 				}
2428 				is_prev_msdu_last = false;
2429 			}
2430 
2431 		}
2432 
2433 		core_id = smp_processor_id();
2434 		DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1);
2435 
2436 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
2437 			qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1);
2438 
2439 		if (qdf_unlikely(mpdu_desc_info.mpdu_flags &
2440 				 HAL_MPDU_F_RAW_AMPDU))
2441 			qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1);
2442 
2443 		if (!is_prev_msdu_last &&
2444 		    msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
2445 			is_prev_msdu_last = true;
2446 
2447 		/* Pop out the descriptor*/
2448 		hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2449 
2450 		rx_bufs_reaped[rx_desc->pool_id]++;
2451 		peer_mdata = mpdu_desc_info.peer_meta_data;
2452 		QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) =
2453 			DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
2454 		QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) =
2455 			DP_PEER_METADATA_VDEV_ID_GET(peer_mdata);
2456 
2457 		/*
2458 		 * save msdu flags first, last and continuation msdu in
2459 		 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
2460 		 * length to nbuf->cb. This ensures the info required for
2461 		 * per pkt processing is always in the same cache line.
2462 		 * This helps in improving throughput for smaller pkt
2463 		 * sizes.
2464 		 */
2465 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
2466 			qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
2467 
2468 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
2469 			qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
2470 
2471 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
2472 			qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
2473 
2474 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
2475 			qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1);
2476 
2477 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
2478 			qdf_nbuf_set_da_valid(rx_desc->nbuf, 1);
2479 
2480 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
2481 			qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1);
2482 
2483 		qdf_nbuf_set_tid_val(rx_desc->nbuf,
2484 				     HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc));
2485 		qdf_nbuf_set_rx_reo_dest_ind(
2486 				rx_desc->nbuf,
2487 				HAL_RX_REO_MSDU_REO_DST_IND_GET(ring_desc));
2488 
2489 		QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len;
2490 
2491 		QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num;
2492 
2493 		/*
2494 		 * move unmap after scattered msdu waiting break logic
2495 		 * in case double skb unmap happened.
2496 		 */
2497 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2498 		dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
2499 						  rx_desc_pool->buf_size,
2500 						  false);
2501 		qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
2502 					     QDF_DMA_FROM_DEVICE,
2503 					     rx_desc_pool->buf_size);
2504 		rx_desc->unmapped = 1;
2505 		DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head,
2506 				   ebuf_tail, rx_desc);
2507 		/*
2508 		 * if continuation bit is set then we have MSDU spread
2509 		 * across multiple buffers, let us not decrement quota
2510 		 * till we reap all buffers of that MSDU.
2511 		 */
2512 		if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)))
2513 			quota -= 1;
2514 
2515 		dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
2516 						&tail[rx_desc->pool_id],
2517 						rx_desc);
2518 
2519 		num_rx_bufs_reaped++;
2520 		/*
2521 		 * only if complete msdu is received for scatter case,
2522 		 * then allow break.
2523 		 */
2524 		if (is_prev_msdu_last &&
2525 		    dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped))
2526 			break;
2527 	}
2528 done:
2529 	dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl);
2530 
2531 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2532 		/*
2533 		 * continue with next mac_id if no pkts were reaped
2534 		 * from that pool
2535 		 */
2536 		if (!rx_bufs_reaped[mac_id])
2537 			continue;
2538 
2539 		dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2540 
2541 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
2542 
2543 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2544 					rx_desc_pool, rx_bufs_reaped[mac_id],
2545 					&head[mac_id], &tail[mac_id]);
2546 	}
2547 
2548 	dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]);
2549 	/* Peer can be NULL is case of LFR */
2550 	if (qdf_likely(peer))
2551 		vdev = NULL;
2552 
2553 	/*
2554 	 * BIG loop where each nbuf is dequeued from global queue,
2555 	 * processed and queued back on a per vdev basis. These nbufs
2556 	 * are sent to stack as and when we run out of nbufs
2557 	 * or a new nbuf dequeued from global queue has a different
2558 	 * vdev when compared to previous nbuf.
2559 	 */
2560 	nbuf = nbuf_head;
2561 	while (nbuf) {
2562 		next = nbuf->next;
2563 		if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) {
2564 			nbuf = next;
2565 			DP_STATS_INC(soc, rx.err.raw_frm_drop, 1);
2566 			continue;
2567 		}
2568 
2569 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
2570 		vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
2571 		peer_id =  QDF_NBUF_CB_RX_PEER_ID(nbuf);
2572 
2573 		if (dp_rx_is_list_ready(deliver_list_head, vdev, peer,
2574 					peer_id, vdev_id)) {
2575 			dp_rx_deliver_to_stack(soc, vdev, peer,
2576 					       deliver_list_head,
2577 					       deliver_list_tail);
2578 			deliver_list_head = NULL;
2579 			deliver_list_tail = NULL;
2580 		}
2581 
2582 		/* Get TID from struct cb->tid_val, save to tid */
2583 		if (qdf_nbuf_is_rx_chfrag_start(nbuf))
2584 			tid = qdf_nbuf_get_tid_val(nbuf);
2585 
2586 		if (qdf_unlikely(!peer)) {
2587 			peer = dp_peer_get_ref_by_id(soc, peer_id,
2588 						     DP_MOD_ID_RX);
2589 		} else if (peer && peer->peer_id != peer_id) {
2590 			dp_peer_unref_delete(peer, DP_MOD_ID_RX);
2591 			peer = dp_peer_get_ref_by_id(soc, peer_id,
2592 						     DP_MOD_ID_RX);
2593 		}
2594 
2595 		if (peer) {
2596 			QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
2597 			qdf_dp_trace_set_track(nbuf, QDF_RX);
2598 			QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
2599 			QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
2600 				QDF_NBUF_RX_PKT_DATA_TRACK;
2601 		}
2602 
2603 		rx_bufs_used++;
2604 
2605 		if (qdf_likely(peer)) {
2606 			vdev = peer->vdev;
2607 		} else {
2608 			nbuf->next = NULL;
2609 			dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2610 			nbuf = next;
2611 			continue;
2612 		}
2613 
2614 		if (qdf_unlikely(!vdev)) {
2615 			qdf_nbuf_free(nbuf);
2616 			nbuf = next;
2617 			DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
2618 			continue;
2619 		}
2620 
2621 		/* when hlos tid override is enabled, save tid in
2622 		 * skb->priority
2623 		 */
2624 		if (qdf_unlikely(vdev->skip_sw_tid_classification &
2625 					DP_TXRX_HLOS_TID_OVERRIDE_ENABLED))
2626 			qdf_nbuf_set_priority(nbuf, tid);
2627 
2628 		rx_pdev = vdev->pdev;
2629 		DP_RX_TID_SAVE(nbuf, tid);
2630 		if (qdf_unlikely(rx_pdev->delay_stats_flag) ||
2631 		    qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled(
2632 				 soc->wlan_cfg_ctx)))
2633 			qdf_nbuf_set_timestamp(nbuf);
2634 
2635 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
2636 		tid_stats =
2637 			&rx_pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
2638 
2639 		/*
2640 		 * Check if DMA completed -- msdu_done is the last bit
2641 		 * to be written
2642 		 */
2643 		if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) &&
2644 				 !hal_rx_attn_msdu_done_get(rx_tlv_hdr))) {
2645 			dp_err("MSDU DONE failure");
2646 			DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
2647 			hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
2648 					     QDF_TRACE_LEVEL_INFO);
2649 			tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
2650 			qdf_nbuf_free(nbuf);
2651 			qdf_assert(0);
2652 			nbuf = next;
2653 			continue;
2654 		}
2655 
2656 		DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
2657 		/*
2658 		 * First IF condition:
2659 		 * 802.11 Fragmented pkts are reinjected to REO
2660 		 * HW block as SG pkts and for these pkts we only
2661 		 * need to pull the RX TLVS header length.
2662 		 * Second IF condition:
2663 		 * The below condition happens when an MSDU is spread
2664 		 * across multiple buffers. This can happen in two cases
2665 		 * 1. The nbuf size is smaller then the received msdu.
2666 		 *    ex: we have set the nbuf size to 2048 during
2667 		 *        nbuf_alloc. but we received an msdu which is
2668 		 *        2304 bytes in size then this msdu is spread
2669 		 *        across 2 nbufs.
2670 		 *
2671 		 * 2. AMSDUs when RAW mode is enabled.
2672 		 *    ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
2673 		 *        across 1st nbuf and 2nd nbuf and last MSDU is
2674 		 *        spread across 2nd nbuf and 3rd nbuf.
2675 		 *
2676 		 * for these scenarios let us create a skb frag_list and
2677 		 * append these buffers till the last MSDU of the AMSDU
2678 		 * Third condition:
2679 		 * This is the most likely case, we receive 802.3 pkts
2680 		 * decapsulated by HW, here we need to set the pkt length.
2681 		 */
2682 		hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata);
2683 		if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
2684 			bool is_mcbc, is_sa_vld, is_da_vld;
2685 
2686 			is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
2687 								 rx_tlv_hdr);
2688 			is_sa_vld =
2689 				hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
2690 								rx_tlv_hdr);
2691 			is_da_vld =
2692 				hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
2693 								rx_tlv_hdr);
2694 
2695 			qdf_nbuf_set_da_mcbc(nbuf, is_mcbc);
2696 			qdf_nbuf_set_da_valid(nbuf, is_da_vld);
2697 			qdf_nbuf_set_sa_valid(nbuf, is_sa_vld);
2698 
2699 			qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
2700 		} else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
2701 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
2702 			nbuf = dp_rx_sg_create(soc, nbuf);
2703 			next = nbuf->next;
2704 
2705 			if (qdf_nbuf_is_raw_frame(nbuf)) {
2706 				DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
2707 				DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len);
2708 			} else {
2709 				qdf_nbuf_free(nbuf);
2710 				DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
2711 				dp_info_rl("scatter msdu len %d, dropped",
2712 					   msdu_len);
2713 				nbuf = next;
2714 				continue;
2715 			}
2716 		} else {
2717 
2718 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
2719 			pkt_len = msdu_len +
2720 				  msdu_metadata.l3_hdr_pad +
2721 				  RX_PKT_TLVS_LEN;
2722 
2723 			qdf_nbuf_set_pktlen(nbuf, pkt_len);
2724 			dp_rx_skip_tlvs(nbuf, msdu_metadata.l3_hdr_pad);
2725 		}
2726 
2727 		/*
2728 		 * process frame for mulitpass phrase processing
2729 		 */
2730 		if (qdf_unlikely(vdev->multipass_en)) {
2731 			if (dp_rx_multipass_process(peer, nbuf, tid) == false) {
2732 				DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1);
2733 				qdf_nbuf_free(nbuf);
2734 				nbuf = next;
2735 				continue;
2736 			}
2737 		}
2738 
2739 		if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
2740 			QDF_TRACE(QDF_MODULE_ID_DP,
2741 					QDF_TRACE_LEVEL_ERROR,
2742 					FL("Policy Check Drop pkt"));
2743 			tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
2744 			/* Drop & free packet */
2745 			qdf_nbuf_free(nbuf);
2746 			/* Statistics */
2747 			nbuf = next;
2748 			continue;
2749 		}
2750 
2751 		if (qdf_unlikely(peer && (peer->nawds_enabled) &&
2752 				 (qdf_nbuf_is_da_mcbc(nbuf)) &&
2753 				 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc,
2754 								rx_tlv_hdr) ==
2755 				  false))) {
2756 			tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
2757 			DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
2758 			qdf_nbuf_free(nbuf);
2759 			nbuf = next;
2760 			continue;
2761 		}
2762 
2763 		if (soc->process_rx_status)
2764 			dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
2765 
2766 		/* Update the protocol tag in SKB based on CCE metadata */
2767 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
2768 					  reo_ring_num, false, true);
2769 
2770 		/* Update the flow tag in SKB based on FSE metadata */
2771 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
2772 
2773 		dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer,
2774 					ring_id, tid_stats);
2775 
2776 		if (qdf_unlikely(vdev->mesh_vdev)) {
2777 			if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
2778 					== QDF_STATUS_SUCCESS) {
2779 				QDF_TRACE(QDF_MODULE_ID_DP,
2780 						QDF_TRACE_LEVEL_INFO_MED,
2781 						FL("mesh pkt filtered"));
2782 				tid_stats->fail_cnt[MESH_FILTER_DROP]++;
2783 				DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
2784 					     1);
2785 
2786 				qdf_nbuf_free(nbuf);
2787 				nbuf = next;
2788 				continue;
2789 			}
2790 			dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
2791 		}
2792 
2793 		if (qdf_likely(vdev->rx_decap_type ==
2794 			       htt_cmn_pkt_type_ethernet) &&
2795 		    qdf_likely(!vdev->mesh_vdev)) {
2796 			/* WDS Destination Address Learning */
2797 			dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf);
2798 
2799 			/* Due to HW issue, sometimes we see that the sa_idx
2800 			 * and da_idx are invalid with sa_valid and da_valid
2801 			 * bits set
2802 			 *
2803 			 * in this case we also see that value of
2804 			 * sa_sw_peer_id is set as 0
2805 			 *
2806 			 * Drop the packet if sa_idx and da_idx OOB or
2807 			 * sa_sw_peerid is 0
2808 			 */
2809 			if (!is_sa_da_idx_valid(soc, rx_tlv_hdr, nbuf,
2810 						msdu_metadata)) {
2811 				qdf_nbuf_free(nbuf);
2812 				nbuf = next;
2813 				DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
2814 				continue;
2815 			}
2816 			/* WDS Source Port Learning */
2817 			if (qdf_likely(vdev->wds_enabled))
2818 				dp_rx_wds_srcport_learn(soc,
2819 							rx_tlv_hdr,
2820 							peer,
2821 							nbuf,
2822 							msdu_metadata);
2823 
2824 			/* Intrabss-fwd */
2825 			if (dp_rx_check_ap_bridge(vdev))
2826 				if (dp_rx_intrabss_fwd(soc,
2827 							peer,
2828 							rx_tlv_hdr,
2829 							nbuf,
2830 							msdu_metadata)) {
2831 					nbuf = next;
2832 					tid_stats->intrabss_cnt++;
2833 					continue; /* Get next desc */
2834 				}
2835 		}
2836 
2837 		dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt);
2838 
2839 		dp_rx_update_stats(soc, nbuf);
2840 		DP_RX_LIST_APPEND(deliver_list_head,
2841 				  deliver_list_tail,
2842 				  nbuf);
2843 		DP_STATS_INC_PKT(peer, rx.to_stack, 1,
2844 				 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
2845 		if (qdf_unlikely(peer->in_twt))
2846 			DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1,
2847 					 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
2848 
2849 		tid_stats->delivered_to_stack++;
2850 		nbuf = next;
2851 	}
2852 
2853 	if (qdf_likely(deliver_list_head)) {
2854 		if (qdf_likely(peer))
2855 			dp_rx_deliver_to_stack(soc, vdev, peer,
2856 					       deliver_list_head,
2857 					       deliver_list_tail);
2858 		else {
2859 			nbuf = deliver_list_head;
2860 			while (nbuf) {
2861 				next = nbuf->next;
2862 				nbuf->next = NULL;
2863 				dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2864 				nbuf = next;
2865 			}
2866 		}
2867 	}
2868 
2869 	if (qdf_likely(peer))
2870 		dp_peer_unref_delete(peer, DP_MOD_ID_RX);
2871 
2872 	if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
2873 		if (quota) {
2874 			num_pending =
2875 				dp_rx_srng_get_num_pending(hal_soc,
2876 							   hal_ring_hdl,
2877 							   num_entries,
2878 							   &near_full);
2879 			if (num_pending) {
2880 				DP_STATS_INC(soc, rx.hp_oos2, 1);
2881 
2882 				if (!hif_exec_should_yield(scn, intr_id))
2883 					goto more_data;
2884 
2885 				if (qdf_unlikely(near_full)) {
2886 					DP_STATS_INC(soc, rx.near_full, 1);
2887 					goto more_data;
2888 				}
2889 			}
2890 		}
2891 
2892 		if (vdev && vdev->osif_fisa_flush)
2893 			vdev->osif_fisa_flush(soc, reo_ring_num);
2894 
2895 		if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) {
2896 			vdev->osif_gro_flush(vdev->osif_vdev,
2897 					     reo_ring_num);
2898 		}
2899 	}
2900 
2901 	/* Update histogram statistics by looping through pdev's */
2902 	DP_RX_HIST_STATS_PER_PDEV();
2903 
2904 	return rx_bufs_used; /* Assume no scale factor for now */
2905 }
2906 
2907 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
2908 {
2909 	QDF_STATUS ret;
2910 
2911 	if (vdev->osif_rx_flush) {
2912 		ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id);
2913 		if (!QDF_IS_STATUS_SUCCESS(ret)) {
2914 			dp_err("Failed to flush rx pkts for vdev %d\n",
2915 			       vdev->vdev_id);
2916 			return ret;
2917 		}
2918 	}
2919 
2920 	return QDF_STATUS_SUCCESS;
2921 }
2922 
2923 static QDF_STATUS
2924 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc,
2925 			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
2926 			   struct dp_pdev *dp_pdev,
2927 			   struct rx_desc_pool *rx_desc_pool)
2928 {
2929 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
2930 
2931 	(nbuf_frag_info_t->virt_addr).nbuf =
2932 		qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size,
2933 			       RX_BUFFER_RESERVATION,
2934 			       rx_desc_pool->buf_alignment, FALSE);
2935 	if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
2936 		dp_err("nbuf alloc failed");
2937 		DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
2938 		return ret;
2939 	}
2940 
2941 	ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
2942 					 (nbuf_frag_info_t->virt_addr).nbuf,
2943 					 QDF_DMA_FROM_DEVICE,
2944 					 rx_desc_pool->buf_size);
2945 
2946 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
2947 		qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf);
2948 		dp_err("nbuf map failed");
2949 		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
2950 		return ret;
2951 	}
2952 
2953 	nbuf_frag_info_t->paddr =
2954 		qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
2955 
2956 	ret = check_x86_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
2957 			      &nbuf_frag_info_t->paddr,
2958 			      rx_desc_pool);
2959 	if (ret == QDF_STATUS_E_FAILURE) {
2960 		qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
2961 					     (nbuf_frag_info_t->virt_addr).nbuf,
2962 					     QDF_DMA_FROM_DEVICE,
2963 					     rx_desc_pool->buf_size);
2964 		qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf);
2965 		dp_err("nbuf check x86 failed");
2966 		DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
2967 		return ret;
2968 	}
2969 
2970 	return QDF_STATUS_SUCCESS;
2971 }
2972 
2973 QDF_STATUS
2974 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
2975 			  struct dp_srng *dp_rxdma_srng,
2976 			  struct rx_desc_pool *rx_desc_pool,
2977 			  uint32_t num_req_buffers)
2978 {
2979 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
2980 	hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng;
2981 	union dp_rx_desc_list_elem_t *next;
2982 	void *rxdma_ring_entry;
2983 	qdf_dma_addr_t paddr;
2984 	struct dp_rx_nbuf_frag_info *nf_info;
2985 	uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0;
2986 	uint32_t buffer_index, nbuf_ptrs_per_page;
2987 	qdf_nbuf_t nbuf;
2988 	QDF_STATUS ret;
2989 	int page_idx, total_pages;
2990 	union dp_rx_desc_list_elem_t *desc_list = NULL;
2991 	union dp_rx_desc_list_elem_t *tail = NULL;
2992 	int sync_hw_ptr = 1;
2993 	uint32_t num_entries_avail;
2994 
2995 	if (qdf_unlikely(!rxdma_srng)) {
2996 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
2997 		return QDF_STATUS_E_FAILURE;
2998 	}
2999 
3000 	dp_debug("requested %u RX buffers for driver attach", num_req_buffers);
3001 
3002 	hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
3003 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
3004 						   rxdma_srng,
3005 						   sync_hw_ptr);
3006 	hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
3007 
3008 	if (!num_entries_avail) {
3009 		dp_err("Num of available entries is zero, nothing to do");
3010 		return QDF_STATUS_E_NOMEM;
3011 	}
3012 
3013 	if (num_entries_avail < num_req_buffers)
3014 		num_req_buffers = num_entries_avail;
3015 
3016 	nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool,
3017 					    num_req_buffers, &desc_list, &tail);
3018 	if (!nr_descs) {
3019 		dp_err("no free rx_descs in freelist");
3020 		DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
3021 		return QDF_STATUS_E_NOMEM;
3022 	}
3023 
3024 	dp_debug("got %u RX descs for driver attach", nr_descs);
3025 
3026 	/*
3027 	 * Try to allocate pointers to the nbuf one page at a time.
3028 	 * Take pointers that can fit in one page of memory and
3029 	 * iterate through the total descriptors that need to be
3030 	 * allocated in order of pages. Reuse the pointers that
3031 	 * have been allocated to fit in one page across each
3032 	 * iteration to index into the nbuf.
3033 	 */
3034 	total_pages = (nr_descs * sizeof(*nf_info)) / PAGE_SIZE;
3035 
3036 	/*
3037 	 * Add an extra page to store the remainder if any
3038 	 */
3039 	if ((nr_descs * sizeof(*nf_info)) % PAGE_SIZE)
3040 		total_pages++;
3041 	nf_info = qdf_mem_malloc(PAGE_SIZE);
3042 	if (!nf_info) {
3043 		dp_err("failed to allocate nbuf array");
3044 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
3045 		QDF_BUG(0);
3046 		return QDF_STATUS_E_NOMEM;
3047 	}
3048 	nbuf_ptrs_per_page = PAGE_SIZE / sizeof(*nf_info);
3049 
3050 	for (page_idx = 0; page_idx < total_pages; page_idx++) {
3051 		qdf_mem_zero(nf_info, PAGE_SIZE);
3052 
3053 		for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) {
3054 			/*
3055 			 * The last page of buffer pointers may not be required
3056 			 * completely based on the number of descriptors. Below
3057 			 * check will ensure we are allocating only the
3058 			 * required number of descriptors.
3059 			 */
3060 			if (nr_nbuf_total >= nr_descs)
3061 				break;
3062 			/* Flag is set while pdev rx_desc_pool initialization */
3063 			if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
3064 				ret = dp_pdev_frag_alloc_and_map(dp_soc,
3065 						&nf_info[nr_nbuf], dp_pdev,
3066 						rx_desc_pool);
3067 			else
3068 				ret = dp_pdev_nbuf_alloc_and_map(dp_soc,
3069 						&nf_info[nr_nbuf], dp_pdev,
3070 						rx_desc_pool);
3071 			if (QDF_IS_STATUS_ERROR(ret))
3072 				break;
3073 
3074 			nr_nbuf_total++;
3075 		}
3076 
3077 		hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
3078 
3079 		for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) {
3080 			rxdma_ring_entry =
3081 				hal_srng_src_get_next(dp_soc->hal_soc,
3082 						      rxdma_srng);
3083 			qdf_assert_always(rxdma_ring_entry);
3084 
3085 			next = desc_list->next;
3086 			paddr = nf_info[buffer_index].paddr;
3087 			nbuf = nf_info[buffer_index].virt_addr.nbuf;
3088 
3089 			/* Flag is set while pdev rx_desc_pool initialization */
3090 			if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
3091 				dp_rx_desc_frag_prep(&desc_list->rx_desc,
3092 						     &nf_info[buffer_index]);
3093 			else
3094 				dp_rx_desc_prep(&desc_list->rx_desc,
3095 						&nf_info[buffer_index]);
3096 			desc_list->rx_desc.in_use = 1;
3097 			dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc);
3098 			dp_rx_desc_update_dbg_info(&desc_list->rx_desc,
3099 						   __func__,
3100 						   RX_DESC_REPLENISHED);
3101 
3102 			hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
3103 						     desc_list->rx_desc.cookie,
3104 						     rx_desc_pool->owner);
3105 			dp_ipa_handle_rx_buf_smmu_mapping(
3106 						dp_soc, nbuf,
3107 						rx_desc_pool->buf_size,
3108 						true);
3109 
3110 			desc_list = next;
3111 		}
3112 
3113 		hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
3114 	}
3115 
3116 	dp_info("filled %u RX buffers for driver attach", nr_nbuf_total);
3117 	qdf_mem_free(nf_info);
3118 
3119 	if (!nr_nbuf_total) {
3120 		dp_err("No nbuf's allocated");
3121 		QDF_BUG(0);
3122 		return QDF_STATUS_E_RESOURCES;
3123 	}
3124 
3125 	/* No need to count the number of bytes received during replenish.
3126 	 * Therefore set replenish.pkts.bytes as 0.
3127 	 */
3128 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0);
3129 
3130 	return QDF_STATUS_SUCCESS;
3131 }
3132 
3133 /**
3134  * dp_rx_enable_mon_dest_frag() - Enable frag processing for
3135  *              monitor destination ring via frag.
3136  *
3137  * Enable this flag only for monitor destination buffer processing
3138  * if DP_RX_MON_MEM_FRAG feature is enabled.
3139  * If flag is set then frag based function will be called for alloc,
3140  * map, prep desc and free ops for desc buffer else normal nbuf based
3141  * function will be called.
3142  *
3143  * @rx_desc_pool: Rx desc pool
3144  * @is_mon_dest_desc: Is it for monitor dest buffer
3145  *
3146  * Return: None
3147  */
3148 #ifdef DP_RX_MON_MEM_FRAG
3149 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
3150 				bool is_mon_dest_desc)
3151 {
3152 	rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc;
3153 	if (is_mon_dest_desc)
3154 		dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled");
3155 }
3156 #else
3157 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
3158 				bool is_mon_dest_desc)
3159 {
3160 	rx_desc_pool->rx_mon_dest_frag_enable = false;
3161 	if (is_mon_dest_desc)
3162 		dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled");
3163 }
3164 #endif
3165 
3166 /*
3167  * dp_rx_pdev_desc_pool_alloc() -  allocate memory for software rx descriptor
3168  *				   pool
3169  *
3170  * @pdev: core txrx pdev context
3171  *
3172  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
3173  *			QDF_STATUS_E_NOMEM
3174  */
3175 QDF_STATUS
3176 dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev)
3177 {
3178 	struct dp_soc *soc = pdev->soc;
3179 	uint32_t rxdma_entries;
3180 	uint32_t rx_sw_desc_num;
3181 	struct dp_srng *dp_rxdma_srng;
3182 	struct rx_desc_pool *rx_desc_pool;
3183 	uint32_t status = QDF_STATUS_SUCCESS;
3184 	int mac_for_pdev;
3185 
3186 	mac_for_pdev = pdev->lmac_id;
3187 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
3188 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3189 			  "nss-wifi<4> skip Rx refil %d", mac_for_pdev);
3190 		return status;
3191 	}
3192 
3193 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
3194 	rxdma_entries = dp_rxdma_srng->num_entries;
3195 
3196 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3197 	rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
3198 
3199 	rx_desc_pool->desc_type = DP_RX_DESC_BUF_TYPE;
3200 	status = dp_rx_desc_pool_alloc(soc,
3201 				       rx_sw_desc_num,
3202 				       rx_desc_pool);
3203 	if (status != QDF_STATUS_SUCCESS)
3204 		return status;
3205 
3206 	return status;
3207 }
3208 
3209 /*
3210  * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool
3211  *
3212  * @pdev: core txrx pdev context
3213  */
3214 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev)
3215 {
3216 	int mac_for_pdev = pdev->lmac_id;
3217 	struct dp_soc *soc = pdev->soc;
3218 	struct rx_desc_pool *rx_desc_pool;
3219 
3220 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3221 
3222 	dp_rx_desc_pool_free(soc, rx_desc_pool);
3223 }
3224 
3225 /*
3226  * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors
3227  *
3228  * @pdev: core txrx pdev context
3229  *
3230  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
3231  *			QDF_STATUS_E_NOMEM
3232  */
3233 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev)
3234 {
3235 	int mac_for_pdev = pdev->lmac_id;
3236 	struct dp_soc *soc = pdev->soc;
3237 	uint32_t rxdma_entries;
3238 	uint32_t rx_sw_desc_num;
3239 	struct dp_srng *dp_rxdma_srng;
3240 	struct rx_desc_pool *rx_desc_pool;
3241 
3242 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
3243 		/**
3244 		 * If NSS is enabled, rx_desc_pool is already filled.
3245 		 * Hence, just disable desc_pool frag flag.
3246 		 */
3247 		rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3248 		dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
3249 
3250 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3251 			  "nss-wifi<4> skip Rx refil %d", mac_for_pdev);
3252 		return QDF_STATUS_SUCCESS;
3253 	}
3254 
3255 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3256 	if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM)
3257 		return QDF_STATUS_E_NOMEM;
3258 
3259 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
3260 	rxdma_entries = dp_rxdma_srng->num_entries;
3261 
3262 	soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
3263 
3264 	rx_sw_desc_num =
3265 	wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
3266 
3267 	rx_desc_pool->owner = DP_WBM2SW_RBM;
3268 	rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
3269 	rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
3270 	/* Disable monitor dest processing via frag */
3271 	dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
3272 
3273 	dp_rx_desc_pool_init(soc, mac_for_pdev,
3274 			     rx_sw_desc_num, rx_desc_pool);
3275 	return QDF_STATUS_SUCCESS;
3276 }
3277 
3278 /*
3279  * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools
3280  * @pdev: core txrx pdev context
3281  *
3282  * This function resets the freelist of rx descriptors and destroys locks
3283  * associated with this list of descriptors.
3284  */
3285 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev)
3286 {
3287 	int mac_for_pdev = pdev->lmac_id;
3288 	struct dp_soc *soc = pdev->soc;
3289 	struct rx_desc_pool *rx_desc_pool;
3290 
3291 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3292 
3293 	dp_rx_desc_pool_deinit(soc, rx_desc_pool);
3294 }
3295 
3296 /*
3297  * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring
3298  *
3299  * @pdev: core txrx pdev context
3300  *
3301  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
3302  *			QDF_STATUS_E_NOMEM
3303  */
3304 QDF_STATUS
3305 dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev)
3306 {
3307 	int mac_for_pdev = pdev->lmac_id;
3308 	struct dp_soc *soc = pdev->soc;
3309 	struct dp_srng *dp_rxdma_srng;
3310 	struct rx_desc_pool *rx_desc_pool;
3311 	uint32_t rxdma_entries;
3312 
3313 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
3314 	rxdma_entries = dp_rxdma_srng->num_entries;
3315 
3316 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3317 
3318 	/* Initialize RX buffer pool which will be
3319 	 * used during low memory conditions
3320 	 */
3321 	dp_rx_buffer_pool_init(soc, mac_for_pdev);
3322 
3323 	return dp_pdev_rx_buffers_attach(soc, mac_for_pdev, dp_rxdma_srng,
3324 					 rx_desc_pool, rxdma_entries - 1);
3325 }
3326 
3327 /*
3328  * dp_rx_pdev_buffers_free - Free nbufs (skbs)
3329  *
3330  * @pdev: core txrx pdev context
3331  */
3332 void
3333 dp_rx_pdev_buffers_free(struct dp_pdev *pdev)
3334 {
3335 	int mac_for_pdev = pdev->lmac_id;
3336 	struct dp_soc *soc = pdev->soc;
3337 	struct rx_desc_pool *rx_desc_pool;
3338 
3339 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
3340 
3341 	dp_rx_desc_nbuf_free(soc, rx_desc_pool);
3342 	dp_rx_buffer_pool_deinit(soc, mac_for_pdev);
3343 }
3344 
3345 #ifdef DP_RX_SPECIAL_FRAME_NEED
3346 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
3347 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
3348 				 uint8_t *rx_tlv_hdr)
3349 {
3350 	uint32_t l2_hdr_offset = 0;
3351 	uint16_t msdu_len = 0;
3352 	uint32_t skip_len;
3353 
3354 	l2_hdr_offset =
3355 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
3356 
3357 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
3358 		skip_len = l2_hdr_offset;
3359 	} else {
3360 		msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
3361 		skip_len = l2_hdr_offset + RX_PKT_TLVS_LEN;
3362 		qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
3363 	}
3364 
3365 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
3366 	dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
3367 	qdf_nbuf_pull_head(nbuf, skip_len);
3368 
3369 	if (dp_rx_is_special_frame(nbuf, frame_mask)) {
3370 		qdf_nbuf_set_exc_frame(nbuf, 1);
3371 		dp_rx_deliver_to_stack(soc, peer->vdev, peer,
3372 				       nbuf, NULL);
3373 		return true;
3374 	}
3375 
3376 	return false;
3377 }
3378 #endif
3379