xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.c (revision 6d768494e5ce14eb1603a695c86739d12ecc6ec2)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_rx.h"
22 #include "dp_peer.h"
23 #include "hal_rx.h"
24 #include "hal_api.h"
25 #include "qdf_nbuf.h"
26 #ifdef MESH_MODE_SUPPORT
27 #include "if_meta_hdr.h"
28 #endif
29 #include "dp_internal.h"
30 #include "dp_rx_mon.h"
31 #include "dp_ipa.h"
32 #ifdef FEATURE_WDS
33 #include "dp_txrx_wds.h"
34 #endif
35 
36 #ifdef ATH_RX_PRI_SAVE
37 #define DP_RX_TID_SAVE(_nbuf, _tid) \
38 	(qdf_nbuf_set_priority(_nbuf, _tid))
39 #else
40 #define DP_RX_TID_SAVE(_nbuf, _tid)
41 #endif
42 
43 #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING
44 static inline
45 bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
46 {
47 	if (ta_peer->vdev->opmode == wlan_op_mode_ndi &&
48 	    qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) {
49 		DP_STATS_INC(ta_peer, rx.intra_bss.mdns_no_fwd, 1);
50 		return false;
51 	}
52 		return true;
53 }
54 #else
55 static inline
56 bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
57 {
58 	return true;
59 }
60 #endif
61 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
62 {
63 	return vdev->ap_bridge_enabled;
64 }
65 
66 #ifdef DUP_RX_DESC_WAR
67 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
68 				hal_ring_handle_t hal_ring,
69 				hal_ring_desc_t ring_desc,
70 				struct dp_rx_desc *rx_desc)
71 {
72 	void *hal_soc = soc->hal_soc;
73 
74 	hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc);
75 	dp_rx_desc_dump(rx_desc);
76 }
77 #else
78 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
79 				hal_ring_handle_t hal_ring_hdl,
80 				hal_ring_desc_t ring_desc,
81 				struct dp_rx_desc *rx_desc)
82 {
83 	hal_soc_handle_t hal_soc = soc->hal_soc;
84 
85 	dp_rx_desc_dump(rx_desc);
86 	hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc);
87 	hal_srng_dump_ring(hal_soc, hal_ring_hdl);
88 	qdf_assert_always(0);
89 }
90 #endif
91 
92 #ifdef RX_DESC_SANITY_WAR
93 static inline
94 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
95 			     hal_ring_handle_t hal_ring_hdl,
96 			     hal_ring_desc_t ring_desc,
97 			     struct dp_rx_desc *rx_desc)
98 {
99 	uint8_t return_buffer_manager;
100 
101 	if (qdf_unlikely(!rx_desc)) {
102 		/*
103 		 * This is an unlikely case where the cookie obtained
104 		 * from the ring_desc is invalid and hence we are not
105 		 * able to find the corresponding rx_desc
106 		 */
107 		goto fail;
108 	}
109 
110 	return_buffer_manager = hal_rx_ret_buf_manager_get(ring_desc);
111 	if (qdf_unlikely(!(return_buffer_manager == HAL_RX_BUF_RBM_SW1_BM ||
112 			 return_buffer_manager == HAL_RX_BUF_RBM_SW3_BM))) {
113 		goto fail;
114 	}
115 
116 	return QDF_STATUS_SUCCESS;
117 
118 fail:
119 	DP_STATS_INC(soc, rx.err.invalid_cookie, 1);
120 	dp_err("Ring Desc:");
121 	hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl,
122 				ring_desc);
123 	return QDF_STATUS_E_NULL_VALUE;
124 
125 }
126 #else
127 static inline
128 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
129 			     hal_ring_handle_t hal_ring_hdl,
130 			     hal_ring_desc_t ring_desc,
131 			     struct dp_rx_desc *rx_desc)
132 {
133 	return QDF_STATUS_SUCCESS;
134 }
135 #endif
136 
137 /*
138  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
139  *			       called during dp rx initialization
140  *			       and at the end of dp_rx_process.
141  *
142  * @soc: core txrx main context
143  * @mac_id: mac_id which is one of 3 mac_ids
144  * @dp_rxdma_srng: dp rxdma circular ring
145  * @rx_desc_pool: Pointer to free Rx descriptor pool
146  * @num_req_buffers: number of buffer to be replenished
147  * @desc_list: list of descs if called from dp_rx_process
148  *	       or NULL during dp rx initialization or out of buffer
149  *	       interrupt.
150  * @tail: tail of descs list
151  * @func_name: name of the caller function
152  * Return: return success or failure
153  */
154 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
155 				struct dp_srng *dp_rxdma_srng,
156 				struct rx_desc_pool *rx_desc_pool,
157 				uint32_t num_req_buffers,
158 				union dp_rx_desc_list_elem_t **desc_list,
159 				union dp_rx_desc_list_elem_t **tail,
160 				const char *func_name)
161 {
162 	uint32_t num_alloc_desc;
163 	uint16_t num_desc_to_free = 0;
164 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
165 	uint32_t num_entries_avail;
166 	uint32_t count;
167 	int sync_hw_ptr = 1;
168 	qdf_dma_addr_t paddr;
169 	qdf_nbuf_t rx_netbuf;
170 	void *rxdma_ring_entry;
171 	union dp_rx_desc_list_elem_t *next;
172 	QDF_STATUS ret;
173 	uint16_t buf_size = rx_desc_pool->buf_size;
174 	uint8_t buf_alignment = rx_desc_pool->buf_alignment;
175 
176 	void *rxdma_srng;
177 
178 	rxdma_srng = dp_rxdma_srng->hal_srng;
179 
180 	if (!rxdma_srng) {
181 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
182 				  "rxdma srng not initialized");
183 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
184 		return QDF_STATUS_E_FAILURE;
185 	}
186 
187 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
188 		"requested %d buffers for replenish", num_req_buffers);
189 
190 	hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
191 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
192 						   rxdma_srng,
193 						   sync_hw_ptr);
194 
195 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
196 		"no of available entries in rxdma ring: %d",
197 		num_entries_avail);
198 
199 	if (!(*desc_list) && (num_entries_avail >
200 		((dp_rxdma_srng->num_entries * 3) / 4))) {
201 		num_req_buffers = num_entries_avail;
202 	} else if (num_entries_avail < num_req_buffers) {
203 		num_desc_to_free = num_req_buffers - num_entries_avail;
204 		num_req_buffers = num_entries_avail;
205 	}
206 
207 	if (qdf_unlikely(!num_req_buffers)) {
208 		num_desc_to_free = num_req_buffers;
209 		hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
210 		goto free_descs;
211 	}
212 
213 	/*
214 	 * if desc_list is NULL, allocate the descs from freelist
215 	 */
216 	if (!(*desc_list)) {
217 		num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
218 							  rx_desc_pool,
219 							  num_req_buffers,
220 							  desc_list,
221 							  tail);
222 
223 		if (!num_alloc_desc) {
224 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
225 				"no free rx_descs in freelist");
226 			DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
227 					num_req_buffers);
228 			hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
229 			return QDF_STATUS_E_NOMEM;
230 		}
231 
232 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
233 			"%d rx desc allocated", num_alloc_desc);
234 		num_req_buffers = num_alloc_desc;
235 	}
236 
237 
238 	count = 0;
239 
240 	while (count < num_req_buffers) {
241 		rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
242 					buf_size,
243 					RX_BUFFER_RESERVATION,
244 					buf_alignment,
245 					FALSE);
246 
247 		if (qdf_unlikely(!rx_netbuf)) {
248 			DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
249 			break;
250 		}
251 
252 		ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, rx_netbuf,
253 						 QDF_DMA_FROM_DEVICE, buf_size);
254 
255 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
256 			qdf_nbuf_free(rx_netbuf);
257 			DP_STATS_INC(dp_pdev, replenish.map_err, 1);
258 			continue;
259 		}
260 
261 		paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
262 
263 		dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, rx_netbuf, true);
264 		/*
265 		 * check if the physical address of nbuf->data is
266 		 * less then 0x50000000 then free the nbuf and try
267 		 * allocating new nbuf. We can try for 100 times.
268 		 * this is a temp WAR till we fix it properly.
269 		 */
270 		ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, rx_desc_pool);
271 		if (ret == QDF_STATUS_E_FAILURE) {
272 			DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
273 			break;
274 		}
275 
276 		count++;
277 
278 		rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
279 							 rxdma_srng);
280 		qdf_assert_always(rxdma_ring_entry);
281 
282 		next = (*desc_list)->next;
283 
284 		dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf);
285 
286 		/* rx_desc.in_use should be zero at this time*/
287 		qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
288 
289 		(*desc_list)->rx_desc.in_use = 1;
290 		dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc,
291 					   func_name, RX_DESC_REPLENISHED);
292 		dp_verbose_debug("rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d",
293 				 rx_netbuf, qdf_nbuf_data(rx_netbuf),
294 				 (unsigned long long)paddr,
295 				 (*desc_list)->rx_desc.cookie);
296 
297 		hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
298 						(*desc_list)->rx_desc.cookie,
299 						rx_desc_pool->owner);
300 
301 		*desc_list = next;
302 
303 	}
304 
305 	hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
306 
307 	dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
308 			 count, num_desc_to_free);
309 
310 	/* No need to count the number of bytes received during replenish.
311 	 * Therefore set replenish.pkts.bytes as 0.
312 	 */
313 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
314 
315 free_descs:
316 	DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
317 	/*
318 	 * add any available free desc back to the free list
319 	 */
320 	if (*desc_list)
321 		dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
322 			mac_id, rx_desc_pool);
323 
324 	return QDF_STATUS_SUCCESS;
325 }
326 
327 /*
328  * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
329  *				pkts to RAW mode simulation to
330  *				decapsulate the pkt.
331  *
332  * @vdev: vdev on which RAW mode is enabled
333  * @nbuf_list: list of RAW pkts to process
334  * @peer: peer object from which the pkt is rx
335  *
336  * Return: void
337  */
338 void
339 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
340 					struct dp_peer *peer)
341 {
342 	qdf_nbuf_t deliver_list_head = NULL;
343 	qdf_nbuf_t deliver_list_tail = NULL;
344 	qdf_nbuf_t nbuf;
345 
346 	nbuf = nbuf_list;
347 	while (nbuf) {
348 		qdf_nbuf_t next = qdf_nbuf_next(nbuf);
349 
350 		DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
351 
352 		DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
353 		DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf));
354 		/*
355 		 * reset the chfrag_start and chfrag_end bits in nbuf cb
356 		 * as this is a non-amsdu pkt and RAW mode simulation expects
357 		 * these bit s to be 0 for non-amsdu pkt.
358 		 */
359 		if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
360 			 qdf_nbuf_is_rx_chfrag_end(nbuf)) {
361 			qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
362 			qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
363 		}
364 
365 		nbuf = next;
366 	}
367 
368 	vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
369 				 &deliver_list_tail, peer->mac_addr.raw);
370 
371 	vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
372 }
373 
374 
375 #ifdef DP_LFR
376 /*
377  * In case of LFR, data of a new peer might be sent up
378  * even before peer is added.
379  */
380 static inline struct dp_vdev *
381 dp_get_vdev_from_peer(struct dp_soc *soc,
382 			uint16_t peer_id,
383 			struct dp_peer *peer,
384 			struct hal_rx_mpdu_desc_info mpdu_desc_info)
385 {
386 	struct dp_vdev *vdev;
387 	uint8_t vdev_id;
388 
389 	if (unlikely(!peer)) {
390 		if (peer_id != HTT_INVALID_PEER) {
391 			vdev_id = DP_PEER_METADATA_VDEV_ID_GET(
392 					mpdu_desc_info.peer_meta_data);
393 			QDF_TRACE(QDF_MODULE_ID_DP,
394 				QDF_TRACE_LEVEL_DEBUG,
395 				FL("PeerID %d not found use vdevID %d"),
396 				peer_id, vdev_id);
397 			vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
398 								  vdev_id);
399 		} else {
400 			QDF_TRACE(QDF_MODULE_ID_DP,
401 				QDF_TRACE_LEVEL_DEBUG,
402 				FL("Invalid PeerID %d"),
403 				peer_id);
404 			return NULL;
405 		}
406 	} else {
407 		vdev = peer->vdev;
408 	}
409 	return vdev;
410 }
411 #else
412 static inline struct dp_vdev *
413 dp_get_vdev_from_peer(struct dp_soc *soc,
414 			uint16_t peer_id,
415 			struct dp_peer *peer,
416 			struct hal_rx_mpdu_desc_info mpdu_desc_info)
417 {
418 	if (unlikely(!peer)) {
419 		QDF_TRACE(QDF_MODULE_ID_DP,
420 			QDF_TRACE_LEVEL_DEBUG,
421 			FL("Peer not found for peerID %d"),
422 			peer_id);
423 		return NULL;
424 	} else {
425 		return peer->vdev;
426 	}
427 }
428 #endif
429 
430 #ifndef FEATURE_WDS
431 static void
432 dp_rx_da_learn(struct dp_soc *soc,
433 	       uint8_t *rx_tlv_hdr,
434 	       struct dp_peer *ta_peer,
435 	       qdf_nbuf_t nbuf)
436 {
437 }
438 #endif
439 /*
440  * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
441  *
442  * @soc: core txrx main context
443  * @ta_peer	: source peer entry
444  * @rx_tlv_hdr	: start address of rx tlvs
445  * @nbuf	: nbuf that has to be intrabss forwarded
446  *
447  * Return: bool: true if it is forwarded else false
448  */
449 static bool
450 dp_rx_intrabss_fwd(struct dp_soc *soc,
451 			struct dp_peer *ta_peer,
452 			uint8_t *rx_tlv_hdr,
453 			qdf_nbuf_t nbuf,
454 			struct hal_rx_msdu_metadata msdu_metadata)
455 {
456 	uint16_t len;
457 	uint8_t is_frag;
458 	struct dp_peer *da_peer;
459 	struct dp_ast_entry *ast_entry;
460 	qdf_nbuf_t nbuf_copy;
461 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
462 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
463 	struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
464 					tid_stats.tid_rx_stats[ring_id][tid];
465 
466 	/* check if the destination peer is available in peer table
467 	 * and also check if the source peer and destination peer
468 	 * belong to the same vap and destination peer is not bss peer.
469 	 */
470 
471 	if ((qdf_nbuf_is_da_valid(nbuf) && !qdf_nbuf_is_da_mcbc(nbuf))) {
472 
473 		ast_entry = soc->ast_table[msdu_metadata.da_idx];
474 		if (!ast_entry)
475 			return false;
476 
477 		if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) {
478 			ast_entry->is_active = TRUE;
479 			return false;
480 		}
481 
482 		da_peer = ast_entry->peer;
483 
484 		if (!da_peer)
485 			return false;
486 		/* TA peer cannot be same as peer(DA) on which AST is present
487 		 * this indicates a change in topology and that AST entries
488 		 * are yet to be updated.
489 		 */
490 		if (da_peer == ta_peer)
491 			return false;
492 
493 		if (da_peer->vdev == ta_peer->vdev && !da_peer->bss_peer) {
494 			len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
495 			is_frag = qdf_nbuf_is_frag(nbuf);
496 			memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
497 
498 			/* If the source or destination peer in the isolation
499 			 * list then dont forward instead push to bridge stack.
500 			 */
501 			if (dp_get_peer_isolation(ta_peer) ||
502 			    dp_get_peer_isolation(da_peer))
503 				return false;
504 
505 			/* linearize the nbuf just before we send to
506 			 * dp_tx_send()
507 			 */
508 			if (qdf_unlikely(is_frag)) {
509 				if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
510 					return false;
511 
512 				nbuf = qdf_nbuf_unshare(nbuf);
513 				if (!nbuf) {
514 					DP_STATS_INC_PKT(ta_peer,
515 							 rx.intra_bss.fail,
516 							 1,
517 							 len);
518 					/* return true even though the pkt is
519 					 * not forwarded. Basically skb_unshare
520 					 * failed and we want to continue with
521 					 * next nbuf.
522 					 */
523 					tid_stats->fail_cnt[INTRABSS_DROP]++;
524 					return true;
525 				}
526 			}
527 
528 			if (!dp_tx_send((struct cdp_soc_t *)soc,
529 					ta_peer->vdev->vdev_id, nbuf)) {
530 				DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
531 						 len);
532 				return true;
533 			} else {
534 				DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
535 						len);
536 				tid_stats->fail_cnt[INTRABSS_DROP]++;
537 				return false;
538 			}
539 		}
540 	}
541 	/* if it is a broadcast pkt (eg: ARP) and it is not its own
542 	 * source, then clone the pkt and send the cloned pkt for
543 	 * intra BSS forwarding and original pkt up the network stack
544 	 * Note: how do we handle multicast pkts. do we forward
545 	 * all multicast pkts as is or let a higher layer module
546 	 * like igmpsnoop decide whether to forward or not with
547 	 * Mcast enhancement.
548 	 */
549 	else if (qdf_unlikely((qdf_nbuf_is_da_mcbc(nbuf) &&
550 			       !ta_peer->bss_peer))) {
551 		if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
552 			goto end;
553 
554 		/* If the source peer in the isolation list
555 		 * then dont forward instead push to bridge stack
556 		 */
557 		if (dp_get_peer_isolation(ta_peer))
558 			goto end;
559 
560 		nbuf_copy = qdf_nbuf_copy(nbuf);
561 		if (!nbuf_copy)
562 			goto end;
563 
564 		len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
565 		memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
566 
567 		/* Set cb->ftype to intrabss FWD */
568 		qdf_nbuf_set_tx_ftype(nbuf_copy, CB_FTYPE_INTRABSS_FWD);
569 		if (dp_tx_send((struct cdp_soc_t *)soc,
570 			       ta_peer->vdev->vdev_id, nbuf_copy)) {
571 			DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
572 			tid_stats->fail_cnt[INTRABSS_DROP]++;
573 			qdf_nbuf_free(nbuf_copy);
574 		} else {
575 			DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
576 			tid_stats->intrabss_cnt++;
577 		}
578 	}
579 
580 end:
581 	/* return false as we have to still send the original pkt
582 	 * up the stack
583 	 */
584 	return false;
585 }
586 
587 #ifdef MESH_MODE_SUPPORT
588 
589 /**
590  * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
591  *
592  * @vdev: DP Virtual device handle
593  * @nbuf: Buffer pointer
594  * @rx_tlv_hdr: start of rx tlv header
595  * @peer: pointer to peer
596  *
597  * This function allocated memory for mesh receive stats and fill the
598  * required stats. Stores the memory address in skb cb.
599  *
600  * Return: void
601  */
602 
603 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
604 				uint8_t *rx_tlv_hdr, struct dp_peer *peer)
605 {
606 	struct mesh_recv_hdr_s *rx_info = NULL;
607 	uint32_t pkt_type;
608 	uint32_t nss;
609 	uint32_t rate_mcs;
610 	uint32_t bw;
611 	uint8_t primary_chan_num;
612 	uint32_t center_chan_freq;
613 	struct dp_soc *soc;
614 
615 	/* fill recv mesh stats */
616 	rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
617 
618 	/* upper layers are resposible to free this memory */
619 
620 	if (!rx_info) {
621 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
622 			"Memory allocation failed for mesh rx stats");
623 		DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
624 		return;
625 	}
626 
627 	rx_info->rs_flags = MESH_RXHDR_VER1;
628 	if (qdf_nbuf_is_rx_chfrag_start(nbuf))
629 		rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
630 
631 	if (qdf_nbuf_is_rx_chfrag_end(nbuf))
632 		rx_info->rs_flags |= MESH_RX_LAST_MSDU;
633 
634 	if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) {
635 		rx_info->rs_flags |= MESH_RX_DECRYPTED;
636 		rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr);
637 		if (vdev->osif_get_key)
638 			vdev->osif_get_key(vdev->osif_vdev,
639 					&rx_info->rs_decryptkey[0],
640 					&peer->mac_addr.raw[0],
641 					rx_info->rs_keyix);
642 	}
643 
644 	rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr);
645 
646 	soc = vdev->pdev->soc;
647 	primary_chan_num = hal_rx_msdu_start_get_freq(rx_tlv_hdr);
648 	center_chan_freq = hal_rx_msdu_start_get_freq(rx_tlv_hdr) >> 16;
649 
650 	if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) {
651 		rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band(
652 							soc->ctrl_psoc,
653 							vdev->pdev->pdev_id,
654 							center_chan_freq);
655 	}
656 	rx_info->rs_channel = primary_chan_num;
657 	pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
658 	rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
659 	bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
660 	nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr);
661 	rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
662 				(bw << 24);
663 
664 	qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
665 
666 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
667 		FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"),
668 						rx_info->rs_flags,
669 						rx_info->rs_rssi,
670 						rx_info->rs_channel,
671 						rx_info->rs_ratephy1,
672 						rx_info->rs_keyix);
673 
674 }
675 
676 /**
677  * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
678  *
679  * @vdev: DP Virtual device handle
680  * @nbuf: Buffer pointer
681  * @rx_tlv_hdr: start of rx tlv header
682  *
683  * This checks if the received packet is matching any filter out
684  * catogery and and drop the packet if it matches.
685  *
686  * Return: status(0 indicates drop, 1 indicate to no drop)
687  */
688 
689 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
690 					uint8_t *rx_tlv_hdr)
691 {
692 	union dp_align_mac_addr mac_addr;
693 	struct dp_soc *soc = vdev->pdev->soc;
694 
695 	if (qdf_unlikely(vdev->mesh_rx_filter)) {
696 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
697 			if (hal_rx_mpdu_get_fr_ds(soc->hal_soc,
698 						  rx_tlv_hdr))
699 				return  QDF_STATUS_SUCCESS;
700 
701 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
702 			if (hal_rx_mpdu_get_to_ds(soc->hal_soc,
703 						  rx_tlv_hdr))
704 				return  QDF_STATUS_SUCCESS;
705 
706 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
707 			if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc,
708 						   rx_tlv_hdr) &&
709 			    !hal_rx_mpdu_get_to_ds(soc->hal_soc,
710 						   rx_tlv_hdr))
711 				return  QDF_STATUS_SUCCESS;
712 
713 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
714 			if (hal_rx_mpdu_get_addr1(soc->hal_soc,
715 						  rx_tlv_hdr,
716 					&mac_addr.raw[0]))
717 				return QDF_STATUS_E_FAILURE;
718 
719 			if (!qdf_mem_cmp(&mac_addr.raw[0],
720 					&vdev->mac_addr.raw[0],
721 					QDF_MAC_ADDR_SIZE))
722 				return  QDF_STATUS_SUCCESS;
723 		}
724 
725 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
726 			if (hal_rx_mpdu_get_addr2(soc->hal_soc,
727 						  rx_tlv_hdr,
728 						  &mac_addr.raw[0]))
729 				return QDF_STATUS_E_FAILURE;
730 
731 			if (!qdf_mem_cmp(&mac_addr.raw[0],
732 					&vdev->mac_addr.raw[0],
733 					QDF_MAC_ADDR_SIZE))
734 				return  QDF_STATUS_SUCCESS;
735 		}
736 	}
737 
738 	return QDF_STATUS_E_FAILURE;
739 }
740 
741 #else
742 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
743 				uint8_t *rx_tlv_hdr, struct dp_peer *peer)
744 {
745 }
746 
747 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
748 					uint8_t *rx_tlv_hdr)
749 {
750 	return QDF_STATUS_E_FAILURE;
751 }
752 
753 #endif
754 
755 #ifdef FEATURE_NAC_RSSI
756 /**
757  * dp_rx_nac_filter(): Function to perform filtering of non-associated
758  * clients
759  * @pdev: DP pdev handle
760  * @rx_pkt_hdr: Rx packet Header
761  *
762  * return: dp_vdev*
763  */
764 static
765 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
766 		uint8_t *rx_pkt_hdr)
767 {
768 	struct ieee80211_frame *wh;
769 	struct dp_neighbour_peer *peer = NULL;
770 
771 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
772 
773 	if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
774 		return NULL;
775 
776 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
777 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
778 				neighbour_peer_list_elem) {
779 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
780 				wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) {
781 			QDF_TRACE(
782 				QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
783 				FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"),
784 				peer->neighbour_peers_macaddr.raw[0],
785 				peer->neighbour_peers_macaddr.raw[1],
786 				peer->neighbour_peers_macaddr.raw[2],
787 				peer->neighbour_peers_macaddr.raw[3],
788 				peer->neighbour_peers_macaddr.raw[4],
789 				peer->neighbour_peers_macaddr.raw[5]);
790 
791 				qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
792 
793 			return pdev->monitor_vdev;
794 		}
795 	}
796 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
797 
798 	return NULL;
799 }
800 
801 /**
802  * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
803  * @soc: DP SOC handle
804  * @mpdu: mpdu for which peer is invalid
805  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
806  * pool_id has same mapping)
807  *
808  * return: integer type
809  */
810 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
811 				   uint8_t mac_id)
812 {
813 	struct dp_invalid_peer_msg msg;
814 	struct dp_vdev *vdev = NULL;
815 	struct dp_pdev *pdev = NULL;
816 	struct ieee80211_frame *wh;
817 	qdf_nbuf_t curr_nbuf, next_nbuf;
818 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
819 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
820 
821 	rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
822 
823 	if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) {
824 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
825 			  "Drop decapped frames");
826 		goto free;
827 	}
828 
829 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
830 
831 	if (!DP_FRAME_IS_DATA(wh)) {
832 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
833 			  "NAWDS valid only for data frames");
834 		goto free;
835 	}
836 
837 	if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
838 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
839 			"Invalid nbuf length");
840 		goto free;
841 	}
842 
843 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
844 
845 	if (!pdev || qdf_unlikely(pdev->is_pdev_down)) {
846 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
847 			  "PDEV %s", !pdev ? "not found" : "down");
848 		goto free;
849 	}
850 
851 	if (pdev->filter_neighbour_peers) {
852 		/* Next Hop scenario not yet handle */
853 		vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
854 		if (vdev) {
855 			dp_rx_mon_deliver(soc, pdev->pdev_id,
856 					  pdev->invalid_peer_head_msdu,
857 					  pdev->invalid_peer_tail_msdu);
858 
859 			pdev->invalid_peer_head_msdu = NULL;
860 			pdev->invalid_peer_tail_msdu = NULL;
861 
862 			return 0;
863 		}
864 	}
865 
866 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
867 
868 		if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
869 				QDF_MAC_ADDR_SIZE) == 0) {
870 			goto out;
871 		}
872 	}
873 
874 	if (!vdev) {
875 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
876 			"VDEV not found");
877 		goto free;
878 	}
879 
880 out:
881 	msg.wh = wh;
882 	qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN);
883 	msg.nbuf = mpdu;
884 	msg.vdev_id = vdev->vdev_id;
885 	if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer)
886 		pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(
887 				(struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc,
888 				pdev->pdev_id, &msg);
889 
890 free:
891 	/* Drop and free packet */
892 	curr_nbuf = mpdu;
893 	while (curr_nbuf) {
894 		next_nbuf = qdf_nbuf_next(curr_nbuf);
895 		qdf_nbuf_free(curr_nbuf);
896 		curr_nbuf = next_nbuf;
897 	}
898 
899 	return 0;
900 }
901 
902 /**
903  * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
904  * @soc: DP SOC handle
905  * @mpdu: mpdu for which peer is invalid
906  * @mpdu_done: if an mpdu is completed
907  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
908  * pool_id has same mapping)
909  *
910  * return: integer type
911  */
912 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
913 					qdf_nbuf_t mpdu, bool mpdu_done,
914 					uint8_t mac_id)
915 {
916 	/* Only trigger the process when mpdu is completed */
917 	if (mpdu_done)
918 		dp_rx_process_invalid_peer(soc, mpdu, mac_id);
919 }
920 #else
921 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
922 				   uint8_t mac_id)
923 {
924 	qdf_nbuf_t curr_nbuf, next_nbuf;
925 	struct dp_pdev *pdev;
926 	struct dp_vdev *vdev = NULL;
927 	struct ieee80211_frame *wh;
928 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
929 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
930 
931 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
932 
933 	if (!DP_FRAME_IS_DATA(wh)) {
934 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP,
935 				   "only for data frames");
936 		goto free;
937 	}
938 
939 	if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
940 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
941 			  "Invalid nbuf length");
942 		goto free;
943 	}
944 
945 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
946 	if (!pdev) {
947 		QDF_TRACE(QDF_MODULE_ID_DP,
948 			  QDF_TRACE_LEVEL_ERROR,
949 			  "PDEV not found");
950 		goto free;
951 	}
952 
953 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
954 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
955 		if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
956 				QDF_MAC_ADDR_SIZE) == 0) {
957 			qdf_spin_unlock_bh(&pdev->vdev_list_lock);
958 			goto out;
959 		}
960 	}
961 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
962 
963 	if (!vdev) {
964 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
965 			  "VDEV not found");
966 		goto free;
967 	}
968 
969 out:
970 	if (soc->cdp_soc.ol_ops->rx_invalid_peer)
971 		soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh);
972 free:
973 	/* reset the head and tail pointers */
974 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
975 	if (pdev) {
976 		pdev->invalid_peer_head_msdu = NULL;
977 		pdev->invalid_peer_tail_msdu = NULL;
978 	}
979 
980 	/* Drop and free packet */
981 	curr_nbuf = mpdu;
982 	while (curr_nbuf) {
983 		next_nbuf = qdf_nbuf_next(curr_nbuf);
984 		qdf_nbuf_free(curr_nbuf);
985 		curr_nbuf = next_nbuf;
986 	}
987 
988 	/* Reset the head and tail pointers */
989 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
990 	if (pdev) {
991 		pdev->invalid_peer_head_msdu = NULL;
992 		pdev->invalid_peer_tail_msdu = NULL;
993 	}
994 
995 	return 0;
996 }
997 
998 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
999 					qdf_nbuf_t mpdu, bool mpdu_done,
1000 					uint8_t mac_id)
1001 {
1002 	/* Process the nbuf */
1003 	dp_rx_process_invalid_peer(soc, mpdu, mac_id);
1004 }
1005 #endif
1006 
1007 #ifdef RECEIVE_OFFLOAD
1008 /**
1009  * dp_rx_print_offload_info() - Print offload info from RX TLV
1010  * @soc: dp soc handle
1011  * @rx_tlv: RX TLV for which offload information is to be printed
1012  *
1013  * Return: None
1014  */
1015 static void dp_rx_print_offload_info(struct dp_soc *soc, uint8_t *rx_tlv)
1016 {
1017 	dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------");
1018 	dp_verbose_debug("lro_eligible 0x%x", HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv));
1019 	dp_verbose_debug("pure_ack 0x%x", HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv));
1020 	dp_verbose_debug("chksum 0x%x", hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
1021 								  rx_tlv));
1022 	dp_verbose_debug("TCP seq num 0x%x", HAL_RX_TLV_GET_TCP_SEQ(rx_tlv));
1023 	dp_verbose_debug("TCP ack num 0x%x", HAL_RX_TLV_GET_TCP_ACK(rx_tlv));
1024 	dp_verbose_debug("TCP window 0x%x", HAL_RX_TLV_GET_TCP_WIN(rx_tlv));
1025 	dp_verbose_debug("TCP protocol 0x%x", HAL_RX_TLV_GET_TCP_PROTO(rx_tlv));
1026 	dp_verbose_debug("TCP offset 0x%x", HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv));
1027 	dp_verbose_debug("toeplitz 0x%x", HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv));
1028 	dp_verbose_debug("---------------------------------------------------------");
1029 }
1030 
1031 /**
1032  * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
1033  * @soc: DP SOC handle
1034  * @rx_tlv: RX TLV received for the msdu
1035  * @msdu: msdu for which GRO info needs to be filled
1036  * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
1037  *
1038  * Return: None
1039  */
1040 static
1041 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
1042 			 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
1043 {
1044 	if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
1045 		return;
1046 
1047 	/* Filling up RX offload info only for TCP packets */
1048 	if (!HAL_RX_TLV_GET_TCP_PROTO(rx_tlv))
1049 		return;
1050 
1051 	*rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1;
1052 
1053 	QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) =
1054 		 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv);
1055 	QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) =
1056 			HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv);
1057 	QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
1058 			hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
1059 						  rx_tlv);
1060 	QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) =
1061 			 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv);
1062 	QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) =
1063 			 HAL_RX_TLV_GET_TCP_ACK(rx_tlv);
1064 	QDF_NBUF_CB_RX_TCP_WIN(msdu) =
1065 			 HAL_RX_TLV_GET_TCP_WIN(rx_tlv);
1066 	QDF_NBUF_CB_RX_TCP_PROTO(msdu) =
1067 			 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv);
1068 	QDF_NBUF_CB_RX_IPV6_PROTO(msdu) =
1069 			 HAL_RX_TLV_GET_IPV6(rx_tlv);
1070 	QDF_NBUF_CB_RX_TCP_OFFSET(msdu) =
1071 			 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv);
1072 	QDF_NBUF_CB_RX_FLOW_ID(msdu) =
1073 			 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv);
1074 
1075 	dp_rx_print_offload_info(soc, rx_tlv);
1076 }
1077 #else
1078 static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
1079 				qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
1080 {
1081 }
1082 #endif /* RECEIVE_OFFLOAD */
1083 
1084 /**
1085  * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
1086  *
1087  * @nbuf: pointer to msdu.
1088  * @mpdu_len: mpdu length
1089  *
1090  * Return: returns true if nbuf is last msdu of mpdu else retuns false.
1091  */
1092 static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len)
1093 {
1094 	bool last_nbuf;
1095 
1096 	if (*mpdu_len > (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN)) {
1097 		qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE);
1098 		last_nbuf = false;
1099 	} else {
1100 		qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN));
1101 		last_nbuf = true;
1102 	}
1103 
1104 	*mpdu_len -= (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN);
1105 
1106 	return last_nbuf;
1107 }
1108 
1109 /**
1110  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
1111  *		     multiple nbufs.
1112  * @nbuf: pointer to the first msdu of an amsdu.
1113  *
1114  * This function implements the creation of RX frag_list for cases
1115  * where an MSDU is spread across multiple nbufs.
1116  *
1117  * Return: returns the head nbuf which contains complete frag_list.
1118  */
1119 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf)
1120 {
1121 	qdf_nbuf_t parent, frag_list, next = NULL;
1122 	uint16_t frag_list_len = 0;
1123 	uint16_t mpdu_len;
1124 	bool last_nbuf;
1125 
1126 	/*
1127 	 * Use msdu len got from REO entry descriptor instead since
1128 	 * there is case the RX PKT TLV is corrupted while msdu_len
1129 	 * from REO descriptor is right for non-raw RX scatter msdu.
1130 	 */
1131 	mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1132 	/*
1133 	 * this is a case where the complete msdu fits in one single nbuf.
1134 	 * in this case HW sets both start and end bit and we only need to
1135 	 * reset these bits for RAW mode simulator to decap the pkt
1136 	 */
1137 	if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
1138 					qdf_nbuf_is_rx_chfrag_end(nbuf)) {
1139 		qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN);
1140 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1141 		return nbuf;
1142 	}
1143 
1144 	/*
1145 	 * This is a case where we have multiple msdus (A-MSDU) spread across
1146 	 * multiple nbufs. here we create a fraglist out of these nbufs.
1147 	 *
1148 	 * the moment we encounter a nbuf with continuation bit set we
1149 	 * know for sure we have an MSDU which is spread across multiple
1150 	 * nbufs. We loop through and reap nbufs till we reach last nbuf.
1151 	 */
1152 	parent = nbuf;
1153 	frag_list = nbuf->next;
1154 	nbuf = nbuf->next;
1155 
1156 	/*
1157 	 * set the start bit in the first nbuf we encounter with continuation
1158 	 * bit set. This has the proper mpdu length set as it is the first
1159 	 * msdu of the mpdu. this becomes the parent nbuf and the subsequent
1160 	 * nbufs will form the frag_list of the parent nbuf.
1161 	 */
1162 	qdf_nbuf_set_rx_chfrag_start(parent, 1);
1163 	last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len);
1164 
1165 	/*
1166 	 * this is where we set the length of the fragments which are
1167 	 * associated to the parent nbuf. We iterate through the frag_list
1168 	 * till we hit the last_nbuf of the list.
1169 	 */
1170 	do {
1171 		last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len);
1172 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1173 		frag_list_len += qdf_nbuf_len(nbuf);
1174 
1175 		if (last_nbuf) {
1176 			next = nbuf->next;
1177 			nbuf->next = NULL;
1178 			break;
1179 		}
1180 
1181 		nbuf = nbuf->next;
1182 	} while (!last_nbuf);
1183 
1184 	qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
1185 	qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
1186 	parent->next = next;
1187 
1188 	qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN);
1189 	return parent;
1190 }
1191 
1192 /**
1193  * dp_rx_compute_delay() - Compute and fill in all timestamps
1194  *				to pass in correct fields
1195  *
1196  * @vdev: pdev handle
1197  * @tx_desc: tx descriptor
1198  * @tid: tid value
1199  * Return: none
1200  */
1201 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1202 {
1203 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
1204 	int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get());
1205 	uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
1206 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
1207 	uint32_t interframe_delay =
1208 		(uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp);
1209 
1210 	dp_update_delay_stats(vdev->pdev, to_stack, tid,
1211 			      CDP_DELAY_STATS_REAP_STACK, ring_id);
1212 	/*
1213 	 * Update interframe delay stats calculated at deliver_data_ol point.
1214 	 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so
1215 	 * interframe delay will not be calculate correctly for 1st frame.
1216 	 * On the other side, this will help in avoiding extra per packet check
1217 	 * of vdev->prev_rx_deliver_tstamp.
1218 	 */
1219 	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
1220 			      CDP_DELAY_STATS_RX_INTERFRAME, ring_id);
1221 	vdev->prev_rx_deliver_tstamp = current_ts;
1222 }
1223 
1224 /**
1225  * dp_rx_drop_nbuf_list() - drop an nbuf list
1226  * @pdev: dp pdev reference
1227  * @buf_list: buffer list to be dropepd
1228  *
1229  * Return: int (number of bufs dropped)
1230  */
1231 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
1232 				       qdf_nbuf_t buf_list)
1233 {
1234 	struct cdp_tid_rx_stats *stats = NULL;
1235 	uint8_t tid = 0, ring_id = 0;
1236 	int num_dropped = 0;
1237 	qdf_nbuf_t buf, next_buf;
1238 
1239 	buf = buf_list;
1240 	while (buf) {
1241 		ring_id = QDF_NBUF_CB_RX_CTX_ID(buf);
1242 		next_buf = qdf_nbuf_queue_next(buf);
1243 		tid = qdf_nbuf_get_tid_val(buf);
1244 		if (qdf_likely(pdev)) {
1245 			stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
1246 			stats->fail_cnt[INVALID_PEER_VDEV]++;
1247 			stats->delivered_to_stack--;
1248 		}
1249 		qdf_nbuf_free(buf);
1250 		buf = next_buf;
1251 		num_dropped++;
1252 	}
1253 
1254 	return num_dropped;
1255 }
1256 
1257 #ifdef PEER_CACHE_RX_PKTS
1258 /**
1259  * dp_rx_flush_rx_cached() - flush cached rx frames
1260  * @peer: peer
1261  * @drop: flag to drop frames or forward to net stack
1262  *
1263  * Return: None
1264  */
1265 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
1266 {
1267 	struct dp_peer_cached_bufq *bufqi;
1268 	struct dp_rx_cached_buf *cache_buf = NULL;
1269 	ol_txrx_rx_fp data_rx = NULL;
1270 	int num_buff_elem;
1271 	QDF_STATUS status;
1272 
1273 	if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
1274 		qdf_atomic_dec(&peer->flush_in_progress);
1275 		return;
1276 	}
1277 
1278 	qdf_spin_lock_bh(&peer->peer_info_lock);
1279 	if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx)
1280 		data_rx = peer->vdev->osif_rx;
1281 	else
1282 		drop = true;
1283 	qdf_spin_unlock_bh(&peer->peer_info_lock);
1284 
1285 	bufqi = &peer->bufq_info;
1286 
1287 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1288 	qdf_list_remove_front(&bufqi->cached_bufq,
1289 			      (qdf_list_node_t **)&cache_buf);
1290 	while (cache_buf) {
1291 		num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(
1292 								cache_buf->buf);
1293 		bufqi->entries -= num_buff_elem;
1294 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
1295 		if (drop) {
1296 			bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1297 							      cache_buf->buf);
1298 		} else {
1299 			/* Flush the cached frames to OSIF DEV */
1300 			status = data_rx(peer->vdev->osif_vdev, cache_buf->buf);
1301 			if (status != QDF_STATUS_SUCCESS)
1302 				bufqi->dropped = dp_rx_drop_nbuf_list(
1303 							peer->vdev->pdev,
1304 							cache_buf->buf);
1305 		}
1306 		qdf_mem_free(cache_buf);
1307 		cache_buf = NULL;
1308 		qdf_spin_lock_bh(&bufqi->bufq_lock);
1309 		qdf_list_remove_front(&bufqi->cached_bufq,
1310 				      (qdf_list_node_t **)&cache_buf);
1311 	}
1312 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1313 	qdf_atomic_dec(&peer->flush_in_progress);
1314 }
1315 
1316 /**
1317  * dp_rx_enqueue_rx() - cache rx frames
1318  * @peer: peer
1319  * @rx_buf_list: cache buffer list
1320  *
1321  * Return: None
1322  */
1323 static QDF_STATUS
1324 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1325 {
1326 	struct dp_rx_cached_buf *cache_buf;
1327 	struct dp_peer_cached_bufq *bufqi = &peer->bufq_info;
1328 	int num_buff_elem;
1329 
1330 	dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries,
1331 		    bufqi->dropped);
1332 	if (!peer->valid) {
1333 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1334 						      rx_buf_list);
1335 		return QDF_STATUS_E_INVAL;
1336 	}
1337 
1338 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1339 	if (bufqi->entries >= bufqi->thresh) {
1340 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1341 						      rx_buf_list);
1342 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
1343 		return QDF_STATUS_E_RESOURCES;
1344 	}
1345 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1346 
1347 	num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list);
1348 
1349 	cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf));
1350 	if (!cache_buf) {
1351 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1352 			  "Failed to allocate buf to cache rx frames");
1353 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1354 						      rx_buf_list);
1355 		return QDF_STATUS_E_NOMEM;
1356 	}
1357 
1358 	cache_buf->buf = rx_buf_list;
1359 
1360 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1361 	qdf_list_insert_back(&bufqi->cached_bufq,
1362 			     &cache_buf->node);
1363 	bufqi->entries += num_buff_elem;
1364 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1365 
1366 	return QDF_STATUS_SUCCESS;
1367 }
1368 
1369 static inline
1370 bool dp_rx_is_peer_cache_bufq_supported(void)
1371 {
1372 	return true;
1373 }
1374 #else
1375 static inline
1376 bool dp_rx_is_peer_cache_bufq_supported(void)
1377 {
1378 	return false;
1379 }
1380 
1381 static inline QDF_STATUS
1382 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1383 {
1384 	return QDF_STATUS_SUCCESS;
1385 }
1386 #endif
1387 
1388 #ifndef DELIVERY_TO_STACK_STATUS_CHECK
1389 /**
1390  * dp_rx_check_delivery_to_stack() - Deliver pkts to network
1391  * using the appropriate call back functions.
1392  * @soc: soc
1393  * @vdev: vdev
1394  * @peer: peer
1395  * @nbuf_head: skb list head
1396  * @nbuf_tail: skb list tail
1397  *
1398  * Return: None
1399  */
1400 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
1401 					  struct dp_vdev *vdev,
1402 					  struct dp_peer *peer,
1403 					  qdf_nbuf_t nbuf_head)
1404 {
1405 	/* Function pointer initialized only when FISA is enabled */
1406 	if (vdev->osif_fisa_rx)
1407 		/* on failure send it via regular path */
1408 		vdev->osif_fisa_rx(soc, vdev, nbuf_head);
1409 	else
1410 		vdev->osif_rx(vdev->osif_vdev, nbuf_head);
1411 }
1412 
1413 #else
1414 /**
1415  * dp_rx_check_delivery_to_stack() - Deliver pkts to network
1416  * using the appropriate call back functions.
1417  * @soc: soc
1418  * @vdev: vdev
1419  * @peer: peer
1420  * @nbuf_head: skb list head
1421  * @nbuf_tail: skb list tail
1422  *
1423  * Check the return status of the call back function and drop
1424  * the packets if the return status indicates a failure.
1425  *
1426  * Return: None
1427  */
1428 static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
1429 					  struct dp_vdev *vdev,
1430 					  struct dp_peer *peer,
1431 					  qdf_nbuf_t nbuf_head)
1432 {
1433 	int num_nbuf = 0;
1434 	QDF_STATUS ret_val = QDF_STATUS_E_FAILURE;
1435 
1436 	/* Function pointer initialized only when FISA is enabled */
1437 	if (vdev->osif_fisa_rx)
1438 		/* on failure send it via regular path */
1439 		ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head);
1440 	else if (vdev->osif_rx)
1441 		ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head);
1442 
1443 	if (!QDF_IS_STATUS_SUCCESS(ret_val)) {
1444 		num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
1445 		DP_STATS_INC(soc, rx.err.rejected, num_nbuf);
1446 		if (peer)
1447 			DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf);
1448 	}
1449 }
1450 #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */
1451 
1452 void dp_rx_deliver_to_stack(struct dp_soc *soc,
1453 			    struct dp_vdev *vdev,
1454 			    struct dp_peer *peer,
1455 			    qdf_nbuf_t nbuf_head,
1456 			    qdf_nbuf_t nbuf_tail)
1457 {
1458 	int num_nbuf = 0;
1459 
1460 	if (qdf_unlikely(!vdev || vdev->delete.pending)) {
1461 		num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head);
1462 		/*
1463 		 * This is a special case where vdev is invalid,
1464 		 * so we cannot know the pdev to which this packet
1465 		 * belonged. Hence we update the soc rx error stats.
1466 		 */
1467 		DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf);
1468 		return;
1469 	}
1470 
1471 	/*
1472 	 * highly unlikely to have a vdev without a registered rx
1473 	 * callback function. if so let us free the nbuf_list.
1474 	 */
1475 	if (qdf_unlikely(!vdev->osif_rx)) {
1476 		if (peer && dp_rx_is_peer_cache_bufq_supported()) {
1477 			dp_rx_enqueue_rx(peer, nbuf_head);
1478 		} else {
1479 			num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev,
1480 							nbuf_head);
1481 			DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf);
1482 		}
1483 		return;
1484 	}
1485 
1486 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
1487 			(vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
1488 		vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
1489 				&nbuf_tail, peer->mac_addr.raw);
1490 	}
1491 
1492 	dp_rx_check_delivery_to_stack(soc, vdev, peer, nbuf_head);
1493 }
1494 
1495 /**
1496  * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
1497  * @nbuf: pointer to the first msdu of an amsdu.
1498  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1499  *
1500  * The ipsumed field of the skb is set based on whether HW validated the
1501  * IP/TCP/UDP checksum.
1502  *
1503  * Return: void
1504  */
1505 static inline void dp_rx_cksum_offload(struct dp_pdev *pdev,
1506 				       qdf_nbuf_t nbuf,
1507 				       uint8_t *rx_tlv_hdr)
1508 {
1509 	qdf_nbuf_rx_cksum_t cksum = {0};
1510 	bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr);
1511 	bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr);
1512 
1513 	if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) {
1514 		cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
1515 		qdf_nbuf_set_rx_cksum(nbuf, &cksum);
1516 	} else {
1517 		DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err);
1518 		DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er);
1519 	}
1520 }
1521 
1522 #ifdef VDEV_PEER_PROTOCOL_COUNT
1523 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer) \
1524 { \
1525 	qdf_nbuf_t nbuf_local; \
1526 	struct dp_peer *peer_local; \
1527 	struct dp_vdev *vdev_local = vdev_hdl; \
1528 	do { \
1529 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
1530 			break; \
1531 		nbuf_local = nbuf; \
1532 		peer_local = peer; \
1533 		if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \
1534 			break; \
1535 		else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \
1536 			break; \
1537 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
1538 						       (nbuf_local), \
1539 						       (peer_local), 0, 1); \
1540 	} while (0); \
1541 }
1542 #else
1543 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer)
1544 #endif
1545 
1546 /**
1547  * dp_rx_msdu_stats_update() - update per msdu stats.
1548  * @soc: core txrx main context
1549  * @nbuf: pointer to the first msdu of an amsdu.
1550  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1551  * @peer: pointer to the peer object.
1552  * @ring_id: reo dest ring number on which pkt is reaped.
1553  * @tid_stats: per tid rx stats.
1554  *
1555  * update all the per msdu stats for that nbuf.
1556  * Return: void
1557  */
1558 static void dp_rx_msdu_stats_update(struct dp_soc *soc,
1559 				    qdf_nbuf_t nbuf,
1560 				    uint8_t *rx_tlv_hdr,
1561 				    struct dp_peer *peer,
1562 				    uint8_t ring_id,
1563 				    struct cdp_tid_rx_stats *tid_stats)
1564 {
1565 	bool is_ampdu, is_not_amsdu;
1566 	uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
1567 	struct dp_vdev *vdev = peer->vdev;
1568 	qdf_ether_header_t *eh;
1569 	uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1570 
1571 	dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, peer);
1572 	is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
1573 			qdf_nbuf_is_rx_chfrag_end(nbuf);
1574 
1575 	DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len);
1576 	DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu);
1577 	DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu);
1578 	DP_STATS_INCC(peer, rx.rx_retries, 1, qdf_nbuf_is_rx_retry_flag(nbuf));
1579 
1580 	tid_stats->msdu_cnt++;
1581 	if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
1582 			 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
1583 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1584 		DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len);
1585 		tid_stats->mcast_msdu_cnt++;
1586 		if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
1587 			DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len);
1588 			tid_stats->bcast_msdu_cnt++;
1589 		}
1590 	}
1591 
1592 	/*
1593 	 * currently we can return from here as we have similar stats
1594 	 * updated at per ppdu level instead of msdu level
1595 	 */
1596 	if (!soc->process_rx_status)
1597 		return;
1598 
1599 	is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr);
1600 	DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu);
1601 	DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
1602 
1603 	sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr);
1604 	mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
1605 	tid = qdf_nbuf_get_tid_val(nbuf);
1606 	bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
1607 	reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
1608 							      rx_tlv_hdr);
1609 	nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
1610 	pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
1611 
1612 	DP_STATS_INCC(peer, rx.rx_mpdu_cnt[mcs], 1,
1613 		      ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
1614 	DP_STATS_INCC(peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1,
1615 		      ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
1616 	DP_STATS_INC(peer, rx.bw[bw], 1);
1617 	/*
1618 	 * only if nss > 0 and pkt_type is 11N/AC/AX,
1619 	 * then increase index [nss - 1] in array counter.
1620 	 */
1621 	if (nss > 0 && (pkt_type == DOT11_N ||
1622 			pkt_type == DOT11_AC ||
1623 			pkt_type == DOT11_AX))
1624 		DP_STATS_INC(peer, rx.nss[nss - 1], 1);
1625 
1626 	DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
1627 	DP_STATS_INCC(peer, rx.err.mic_err, 1,
1628 		      hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr));
1629 	DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
1630 		      hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr));
1631 
1632 	DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
1633 	DP_STATS_INC(peer, rx.reception_type[reception_type], 1);
1634 
1635 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1636 		      ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1637 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1638 		      ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1639 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1640 		      ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1641 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1642 		      ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1643 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1644 		      ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1645 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1646 		      ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1647 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1648 		      ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1649 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1650 		      ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1651 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1652 		      ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX)));
1653 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1654 		      ((mcs < MAX_MCS) && (pkt_type == DOT11_AX)));
1655 
1656 	if ((soc->process_rx_status) &&
1657 	    hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) {
1658 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
1659 		if (!vdev->pdev)
1660 			return;
1661 
1662 		dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
1663 				     &peer->stats, peer->peer_id,
1664 				     UPDATE_PEER_STATS,
1665 				     vdev->pdev->pdev_id);
1666 #endif
1667 
1668 	}
1669 }
1670 
1671 static inline bool is_sa_da_idx_valid(struct dp_soc *soc,
1672 				      uint8_t *rx_tlv_hdr,
1673 				      qdf_nbuf_t nbuf,
1674 				      struct hal_rx_msdu_metadata msdu_info)
1675 {
1676 	if ((qdf_nbuf_is_sa_valid(nbuf) &&
1677 	    (msdu_info.sa_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) ||
1678 	    (!qdf_nbuf_is_da_mcbc(nbuf) &&
1679 	     qdf_nbuf_is_da_valid(nbuf) &&
1680 	     (msdu_info.da_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))))
1681 		return false;
1682 
1683 	return true;
1684 }
1685 
1686 #ifndef WDS_VENDOR_EXTENSION
1687 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
1688 			   struct dp_vdev *vdev,
1689 			   struct dp_peer *peer)
1690 {
1691 	return 1;
1692 }
1693 #endif
1694 
1695 #ifdef RX_DESC_DEBUG_CHECK
1696 /**
1697  * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
1698  *				  corruption
1699  *
1700  * @ring_desc: REO ring descriptor
1701  * @rx_desc: Rx descriptor
1702  *
1703  * Return: NONE
1704  */
1705 static inline
1706 void dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc,
1707 				  struct dp_rx_desc *rx_desc)
1708 {
1709 	struct hal_buf_info hbi;
1710 
1711 	hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
1712 	/* Sanity check for possible buffer paddr corruption */
1713 	qdf_assert_always((&hbi)->paddr ==
1714 			  qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0));
1715 }
1716 #else
1717 static inline
1718 void dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc,
1719 				  struct dp_rx_desc *rx_desc)
1720 {
1721 }
1722 #endif
1723 
1724 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1725 static inline
1726 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
1727 {
1728 	bool limit_hit = false;
1729 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
1730 
1731 	limit_hit =
1732 		(num_reaped >= cfg->rx_reap_loop_pkt_limit) ? true : false;
1733 
1734 	if (limit_hit)
1735 		DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1)
1736 
1737 	return limit_hit;
1738 }
1739 
1740 static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1741 {
1742 	return soc->wlan_cfg_ctx->rx_enable_eol_data_check;
1743 }
1744 
1745 #else
1746 static inline
1747 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
1748 {
1749 	return false;
1750 }
1751 
1752 static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1753 {
1754 	return false;
1755 }
1756 
1757 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
1758 
1759 #ifdef DP_RX_PKT_NO_PEER_DELIVER
1760 /**
1761  * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
1762  *				      no corresbonding peer found
1763  * @soc: core txrx main context
1764  * @nbuf: pkt skb pointer
1765  *
1766  * This function will try to deliver some RX special frames to stack
1767  * even there is no peer matched found. for instance, LFR case, some
1768  * eapol data will be sent to host before peer_map done.
1769  *
1770  * Return: None
1771  */
1772 static
1773 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
1774 {
1775 	uint16_t peer_id;
1776 	uint8_t vdev_id;
1777 	struct dp_vdev *vdev;
1778 	uint32_t l2_hdr_offset = 0;
1779 	uint16_t msdu_len = 0;
1780 	uint32_t pkt_len = 0;
1781 	uint8_t *rx_tlv_hdr;
1782 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
1783 				FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
1784 
1785 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
1786 	if (peer_id > soc->max_peers)
1787 		goto deliver_fail;
1788 
1789 	vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
1790 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
1791 	if (!vdev || vdev->delete.pending || !vdev->osif_rx)
1792 		goto deliver_fail;
1793 
1794 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf)))
1795 		goto deliver_fail;
1796 
1797 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
1798 	l2_hdr_offset =
1799 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
1800 
1801 	msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1802 	pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
1803 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
1804 
1805 	qdf_nbuf_set_pktlen(nbuf, pkt_len);
1806 	qdf_nbuf_pull_head(nbuf,
1807 			   RX_PKT_TLVS_LEN +
1808 			   l2_hdr_offset);
1809 
1810 	if (dp_rx_is_special_frame(nbuf, frame_mask)) {
1811 		qdf_nbuf_set_exc_frame(nbuf, 1);
1812 		if (QDF_STATUS_SUCCESS !=
1813 		    vdev->osif_rx(vdev->osif_vdev, nbuf))
1814 			goto deliver_fail;
1815 		DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1);
1816 		return;
1817 	}
1818 
1819 deliver_fail:
1820 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1821 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
1822 	qdf_nbuf_free(nbuf);
1823 }
1824 #else
1825 static inline
1826 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
1827 {
1828 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1829 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
1830 	qdf_nbuf_free(nbuf);
1831 }
1832 #endif
1833 
1834 /**
1835  * dp_rx_srng_get_num_pending() - get number of pending entries
1836  * @hal_soc: hal soc opaque pointer
1837  * @hal_ring: opaque pointer to the HAL Rx Ring
1838  * @num_entries: number of entries in the hal_ring.
1839  * @near_full: pointer to a boolean. This is set if ring is near full.
1840  *
1841  * The function returns the number of entries in a destination ring which are
1842  * yet to be reaped. The function also checks if the ring is near full.
1843  * If more than half of the ring needs to be reaped, the ring is considered
1844  * approaching full.
1845  * The function useses hal_srng_dst_num_valid_locked to get the number of valid
1846  * entries. It should not be called within a SRNG lock. HW pointer value is
1847  * synced into cached_hp.
1848  *
1849  * Return: Number of pending entries if any
1850  */
1851 static
1852 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
1853 				    hal_ring_handle_t hal_ring_hdl,
1854 				    uint32_t num_entries,
1855 				    bool *near_full)
1856 {
1857 	uint32_t num_pending = 0;
1858 
1859 	num_pending = hal_srng_dst_num_valid_locked(hal_soc,
1860 						    hal_ring_hdl,
1861 						    true);
1862 
1863 	if (num_entries && (num_pending >= num_entries >> 1))
1864 		*near_full = true;
1865 	else
1866 		*near_full = false;
1867 
1868 	return num_pending;
1869 }
1870 
1871 #ifdef WLAN_SUPPORT_RX_FISA
1872 void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding)
1873 {
1874 	QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
1875 	qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN);
1876 }
1877 
1878 /**
1879  * dp_rx_set_hdr_pad() - set l3 padding in nbuf cb
1880  * @nbuf: pkt skb pointer
1881  * @l3_padding: l3 padding
1882  *
1883  * Return: None
1884  */
1885 static inline
1886 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
1887 {
1888 	QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
1889 }
1890 #else
1891 void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding)
1892 {
1893 	qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN);
1894 }
1895 
1896 static inline
1897 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
1898 {
1899 }
1900 #endif
1901 
1902 
1903 /**
1904  * dp_rx_process() - Brain of the Rx processing functionality
1905  *		     Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
1906  * @int_ctx: per interrupt context
1907  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1908  * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
1909  * @quota: No. of units (packets) that can be serviced in one shot.
1910  *
1911  * This function implements the core of Rx functionality. This is
1912  * expected to handle only non-error frames.
1913  *
1914  * Return: uint32_t: No. of elements processed
1915  */
1916 uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
1917 			    uint8_t reo_ring_num, uint32_t quota)
1918 {
1919 	hal_ring_desc_t ring_desc;
1920 	hal_soc_handle_t hal_soc;
1921 	struct dp_rx_desc *rx_desc = NULL;
1922 	qdf_nbuf_t nbuf, next;
1923 	bool near_full;
1924 	union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT];
1925 	union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT];
1926 	uint32_t num_pending;
1927 	uint32_t rx_bufs_used = 0, rx_buf_cookie;
1928 	uint16_t msdu_len = 0;
1929 	uint16_t peer_id;
1930 	uint8_t vdev_id;
1931 	struct dp_peer *peer;
1932 	struct dp_vdev *vdev;
1933 	uint32_t pkt_len = 0;
1934 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
1935 	struct hal_rx_msdu_desc_info msdu_desc_info;
1936 	enum hal_reo_error_status error;
1937 	uint32_t peer_mdata;
1938 	uint8_t *rx_tlv_hdr;
1939 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT];
1940 	uint8_t mac_id = 0;
1941 	struct dp_pdev *rx_pdev;
1942 	struct dp_srng *dp_rxdma_srng;
1943 	struct rx_desc_pool *rx_desc_pool;
1944 	struct dp_soc *soc = int_ctx->soc;
1945 	uint8_t ring_id = 0;
1946 	uint8_t core_id = 0;
1947 	struct cdp_tid_rx_stats *tid_stats;
1948 	qdf_nbuf_t nbuf_head;
1949 	qdf_nbuf_t nbuf_tail;
1950 	qdf_nbuf_t deliver_list_head;
1951 	qdf_nbuf_t deliver_list_tail;
1952 	uint32_t num_rx_bufs_reaped = 0;
1953 	uint32_t intr_id;
1954 	struct hif_opaque_softc *scn;
1955 	int32_t tid = 0;
1956 	bool is_prev_msdu_last = true;
1957 	uint32_t num_entries_avail = 0;
1958 	uint32_t rx_ol_pkt_cnt = 0;
1959 	uint32_t num_entries = 0;
1960 	struct hal_rx_msdu_metadata msdu_metadata;
1961 	QDF_STATUS status;
1962 
1963 	DP_HIST_INIT();
1964 
1965 	qdf_assert_always(soc && hal_ring_hdl);
1966 	hal_soc = soc->hal_soc;
1967 	qdf_assert_always(hal_soc);
1968 
1969 	scn = soc->hif_handle;
1970 	hif_pm_runtime_mark_dp_rx_busy(scn);
1971 	intr_id = int_ctx->dp_intr_id;
1972 	num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl);
1973 
1974 more_data:
1975 	/* reset local variables here to be re-used in the function */
1976 	nbuf_head = NULL;
1977 	nbuf_tail = NULL;
1978 	deliver_list_head = NULL;
1979 	deliver_list_tail = NULL;
1980 	peer = NULL;
1981 	vdev = NULL;
1982 	num_rx_bufs_reaped = 0;
1983 
1984 	qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped));
1985 	qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
1986 	qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
1987 	qdf_mem_zero(head, sizeof(head));
1988 	qdf_mem_zero(tail, sizeof(tail));
1989 
1990 	if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
1991 
1992 		/*
1993 		 * Need API to convert from hal_ring pointer to
1994 		 * Ring Type / Ring Id combo
1995 		 */
1996 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
1997 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1998 			FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
1999 		goto done;
2000 	}
2001 
2002 	/*
2003 	 * start reaping the buffers from reo ring and queue
2004 	 * them in per vdev queue.
2005 	 * Process the received pkts in a different per vdev loop.
2006 	 */
2007 	while (qdf_likely(quota &&
2008 			  (ring_desc = hal_srng_dst_peek(hal_soc,
2009 							 hal_ring_hdl)))) {
2010 
2011 		error = HAL_RX_ERROR_STATUS_GET(ring_desc);
2012 		ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2013 
2014 		if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
2015 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2016 			FL("HAL RING 0x%pK:error %d"), hal_ring_hdl, error);
2017 			DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1);
2018 			/* Don't know how to deal with this -- assert */
2019 			qdf_assert(0);
2020 		}
2021 
2022 		rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
2023 
2024 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
2025 		status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl,
2026 					   ring_desc, rx_desc);
2027 		if (QDF_IS_STATUS_ERROR(status)) {
2028 			if (qdf_unlikely(rx_desc && rx_desc->nbuf)) {
2029 				qdf_assert_always(rx_desc->unmapped);
2030 				qdf_nbuf_unmap_nbytes_single(
2031 							soc->osdev,
2032 							rx_desc->nbuf,
2033 							QDF_DMA_FROM_DEVICE,
2034 							RX_DATA_BUFFER_SIZE);
2035 				rx_desc->unmapped = 1;
2036 				qdf_nbuf_free(rx_desc->nbuf);
2037 				dp_rx_add_to_free_desc_list(
2038 							&head[rx_desc->pool_id],
2039 							&tail[rx_desc->pool_id],
2040 							rx_desc);
2041 			}
2042 			hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2043 			continue;
2044 		}
2045 
2046 		/*
2047 		 * this is a unlikely scenario where the host is reaping
2048 		 * a descriptor which it already reaped just a while ago
2049 		 * but is yet to replenish it back to HW.
2050 		 * In this case host will dump the last 128 descriptors
2051 		 * including the software descriptor rx_desc and assert.
2052 		 */
2053 
2054 		if (qdf_unlikely(!rx_desc->in_use)) {
2055 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
2056 			dp_info_rl("Reaping rx_desc not in use!");
2057 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2058 						   ring_desc, rx_desc);
2059 			/* ignore duplicate RX desc and continue to process */
2060 			/* Pop out the descriptor */
2061 			hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2062 			continue;
2063 		}
2064 
2065 		dp_rx_desc_nbuf_sanity_check(ring_desc, rx_desc);
2066 
2067 		if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) {
2068 			dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie);
2069 			DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1);
2070 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
2071 						   ring_desc, rx_desc);
2072 		}
2073 
2074 		/* Get MPDU DESC info */
2075 		hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
2076 
2077 		/* Get MSDU DESC info */
2078 		hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info);
2079 
2080 		if (qdf_unlikely(msdu_desc_info.msdu_flags &
2081 				 HAL_MSDU_F_MSDU_CONTINUATION)) {
2082 			/* previous msdu has end bit set, so current one is
2083 			 * the new MPDU
2084 			 */
2085 			if (is_prev_msdu_last) {
2086 				/* Get number of entries available in HW ring */
2087 				num_entries_avail =
2088 				hal_srng_dst_num_valid(hal_soc,
2089 						       hal_ring_hdl, 1);
2090 
2091 				/* For new MPDU check if we can read complete
2092 				 * MPDU by comparing the number of buffers
2093 				 * available and number of buffers needed to
2094 				 * reap this MPDU
2095 				 */
2096 				if (((msdu_desc_info.msdu_len /
2097 				     (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN) +
2098 				     1)) > num_entries_avail) {
2099 					DP_STATS_INC(
2100 						soc,
2101 						rx.msdu_scatter_wait_break,
2102 						1);
2103 					break;
2104 				}
2105 				is_prev_msdu_last = false;
2106 			}
2107 
2108 		}
2109 
2110 		core_id = smp_processor_id();
2111 		DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1);
2112 
2113 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
2114 			qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1);
2115 
2116 		if (qdf_unlikely(mpdu_desc_info.mpdu_flags &
2117 				 HAL_MPDU_F_RAW_AMPDU))
2118 			qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1);
2119 
2120 		if (!is_prev_msdu_last &&
2121 		    msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
2122 			is_prev_msdu_last = true;
2123 
2124 		/* Pop out the descriptor*/
2125 		hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2126 
2127 		rx_bufs_reaped[rx_desc->pool_id]++;
2128 		peer_mdata = mpdu_desc_info.peer_meta_data;
2129 		QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) =
2130 			DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
2131 		QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) =
2132 			DP_PEER_METADATA_VDEV_ID_GET(peer_mdata);
2133 
2134 		/*
2135 		 * save msdu flags first, last and continuation msdu in
2136 		 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
2137 		 * length to nbuf->cb. This ensures the info required for
2138 		 * per pkt processing is always in the same cache line.
2139 		 * This helps in improving throughput for smaller pkt
2140 		 * sizes.
2141 		 */
2142 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
2143 			qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
2144 
2145 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
2146 			qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
2147 
2148 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
2149 			qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
2150 
2151 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
2152 			qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1);
2153 
2154 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
2155 			qdf_nbuf_set_da_valid(rx_desc->nbuf, 1);
2156 
2157 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
2158 			qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1);
2159 
2160 		qdf_nbuf_set_tid_val(rx_desc->nbuf,
2161 				     HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc));
2162 
2163 		QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len;
2164 
2165 		QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num;
2166 
2167 		/*
2168 		 * move unmap after scattered msdu waiting break logic
2169 		 * in case double skb unmap happened.
2170 		 */
2171 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2172 		qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
2173 					     QDF_DMA_FROM_DEVICE,
2174 					     rx_desc_pool->buf_size);
2175 		rx_desc->unmapped = 1;
2176 		DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
2177 
2178 		/*
2179 		 * if continuation bit is set then we have MSDU spread
2180 		 * across multiple buffers, let us not decrement quota
2181 		 * till we reap all buffers of that MSDU.
2182 		 */
2183 		if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)))
2184 			quota -= 1;
2185 
2186 		dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
2187 						&tail[rx_desc->pool_id],
2188 						rx_desc);
2189 
2190 		num_rx_bufs_reaped++;
2191 		/*
2192 		 * only if complete msdu is received for scatter case,
2193 		 * then allow break.
2194 		 */
2195 		if (is_prev_msdu_last &&
2196 		    dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped))
2197 			break;
2198 	}
2199 done:
2200 	dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl);
2201 
2202 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2203 		/*
2204 		 * continue with next mac_id if no pkts were reaped
2205 		 * from that pool
2206 		 */
2207 		if (!rx_bufs_reaped[mac_id])
2208 			continue;
2209 
2210 		dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2211 
2212 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
2213 
2214 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2215 					rx_desc_pool, rx_bufs_reaped[mac_id],
2216 					&head[mac_id], &tail[mac_id]);
2217 	}
2218 
2219 	dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]);
2220 	/* Peer can be NULL is case of LFR */
2221 	if (qdf_likely(peer))
2222 		vdev = NULL;
2223 
2224 	/*
2225 	 * BIG loop where each nbuf is dequeued from global queue,
2226 	 * processed and queued back on a per vdev basis. These nbufs
2227 	 * are sent to stack as and when we run out of nbufs
2228 	 * or a new nbuf dequeued from global queue has a different
2229 	 * vdev when compared to previous nbuf.
2230 	 */
2231 	nbuf = nbuf_head;
2232 	while (nbuf) {
2233 		next = nbuf->next;
2234 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
2235 		vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
2236 
2237 		if (deliver_list_head && vdev && (vdev->vdev_id != vdev_id)) {
2238 			dp_rx_deliver_to_stack(soc, vdev, peer,
2239 					       deliver_list_head,
2240 					       deliver_list_tail);
2241 			deliver_list_head = NULL;
2242 			deliver_list_tail = NULL;
2243 		}
2244 
2245 		/* Get TID from struct cb->tid_val, save to tid */
2246 		if (qdf_nbuf_is_rx_chfrag_start(nbuf))
2247 			tid = qdf_nbuf_get_tid_val(nbuf);
2248 
2249 		peer_id =  QDF_NBUF_CB_RX_PEER_ID(nbuf);
2250 		peer = dp_peer_find_by_id(soc, peer_id);
2251 
2252 		if (peer) {
2253 			QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
2254 			qdf_dp_trace_set_track(nbuf, QDF_RX);
2255 			QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
2256 			QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
2257 				QDF_NBUF_RX_PKT_DATA_TRACK;
2258 		}
2259 
2260 		rx_bufs_used++;
2261 
2262 		if (qdf_likely(peer)) {
2263 			vdev = peer->vdev;
2264 		} else {
2265 			nbuf->next = NULL;
2266 			dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2267 			nbuf = next;
2268 			continue;
2269 		}
2270 
2271 		if (qdf_unlikely(!vdev)) {
2272 			qdf_nbuf_free(nbuf);
2273 			nbuf = next;
2274 			DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
2275 			dp_peer_unref_del_find_by_id(peer);
2276 			continue;
2277 		}
2278 
2279 		rx_pdev = vdev->pdev;
2280 		DP_RX_TID_SAVE(nbuf, tid);
2281 		if (qdf_unlikely(rx_pdev->delay_stats_flag))
2282 			qdf_nbuf_set_timestamp(nbuf);
2283 
2284 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
2285 		tid_stats =
2286 			&rx_pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
2287 
2288 		/*
2289 		 * Check if DMA completed -- msdu_done is the last bit
2290 		 * to be written
2291 		 */
2292 		if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) &&
2293 				 !hal_rx_attn_msdu_done_get(rx_tlv_hdr))) {
2294 			dp_err("MSDU DONE failure");
2295 			DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
2296 			hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
2297 					     QDF_TRACE_LEVEL_INFO);
2298 			tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
2299 			qdf_nbuf_free(nbuf);
2300 			qdf_assert(0);
2301 			nbuf = next;
2302 			continue;
2303 		}
2304 
2305 		DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
2306 		/*
2307 		 * First IF condition:
2308 		 * 802.11 Fragmented pkts are reinjected to REO
2309 		 * HW block as SG pkts and for these pkts we only
2310 		 * need to pull the RX TLVS header length.
2311 		 * Second IF condition:
2312 		 * The below condition happens when an MSDU is spread
2313 		 * across multiple buffers. This can happen in two cases
2314 		 * 1. The nbuf size is smaller then the received msdu.
2315 		 *    ex: we have set the nbuf size to 2048 during
2316 		 *        nbuf_alloc. but we received an msdu which is
2317 		 *        2304 bytes in size then this msdu is spread
2318 		 *        across 2 nbufs.
2319 		 *
2320 		 * 2. AMSDUs when RAW mode is enabled.
2321 		 *    ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
2322 		 *        across 1st nbuf and 2nd nbuf and last MSDU is
2323 		 *        spread across 2nd nbuf and 3rd nbuf.
2324 		 *
2325 		 * for these scenarios let us create a skb frag_list and
2326 		 * append these buffers till the last MSDU of the AMSDU
2327 		 * Third condition:
2328 		 * This is the most likely case, we receive 802.3 pkts
2329 		 * decapsulated by HW, here we need to set the pkt length.
2330 		 */
2331 		hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata);
2332 		if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
2333 			bool is_mcbc, is_sa_vld, is_da_vld;
2334 
2335 			is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
2336 								 rx_tlv_hdr);
2337 			is_sa_vld =
2338 				hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
2339 								rx_tlv_hdr);
2340 			is_da_vld =
2341 				hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
2342 								rx_tlv_hdr);
2343 
2344 			qdf_nbuf_set_da_mcbc(nbuf, is_mcbc);
2345 			qdf_nbuf_set_da_valid(nbuf, is_da_vld);
2346 			qdf_nbuf_set_sa_valid(nbuf, is_sa_vld);
2347 
2348 			qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
2349 		} else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
2350 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
2351 			nbuf = dp_rx_sg_create(nbuf);
2352 			next = nbuf->next;
2353 
2354 			if (qdf_nbuf_is_raw_frame(nbuf)) {
2355 				DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
2356 				DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len);
2357 			} else {
2358 				qdf_nbuf_free(nbuf);
2359 				DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
2360 				dp_info_rl("scatter msdu len %d, dropped",
2361 					   msdu_len);
2362 				nbuf = next;
2363 				dp_peer_unref_del_find_by_id(peer);
2364 				continue;
2365 			}
2366 		} else {
2367 
2368 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
2369 			pkt_len = msdu_len +
2370 				  msdu_metadata.l3_hdr_pad +
2371 				  RX_PKT_TLVS_LEN;
2372 
2373 			qdf_nbuf_set_pktlen(nbuf, pkt_len);
2374 			dp_rx_skip_tlvs(nbuf, msdu_metadata.l3_hdr_pad);
2375 		}
2376 
2377 		/*
2378 		 * process frame for mulitpass phrase processing
2379 		 */
2380 		if (qdf_unlikely(vdev->multipass_en)) {
2381 			if (dp_rx_multipass_process(peer, nbuf, tid) == false) {
2382 				DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1);
2383 				qdf_nbuf_free(nbuf);
2384 				nbuf = next;
2385 				dp_peer_unref_del_find_by_id(peer);
2386 				continue;
2387 			}
2388 		}
2389 
2390 		if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
2391 			QDF_TRACE(QDF_MODULE_ID_DP,
2392 					QDF_TRACE_LEVEL_ERROR,
2393 					FL("Policy Check Drop pkt"));
2394 			tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
2395 			/* Drop & free packet */
2396 			qdf_nbuf_free(nbuf);
2397 			/* Statistics */
2398 			nbuf = next;
2399 			dp_peer_unref_del_find_by_id(peer);
2400 			continue;
2401 		}
2402 
2403 		if (qdf_unlikely(peer && (peer->nawds_enabled) &&
2404 				 (qdf_nbuf_is_da_mcbc(nbuf)) &&
2405 				 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc,
2406 								rx_tlv_hdr) ==
2407 				  false))) {
2408 			tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
2409 			DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
2410 			qdf_nbuf_free(nbuf);
2411 			nbuf = next;
2412 			dp_peer_unref_del_find_by_id(peer);
2413 			continue;
2414 		}
2415 
2416 		if (soc->process_rx_status)
2417 			dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
2418 
2419 		/* Update the protocol tag in SKB based on CCE metadata */
2420 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
2421 					  reo_ring_num, false, true);
2422 
2423 		/* Update the flow tag in SKB based on FSE metadata */
2424 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
2425 
2426 		dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer,
2427 					ring_id, tid_stats);
2428 
2429 		if (qdf_unlikely(vdev->mesh_vdev)) {
2430 			if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
2431 					== QDF_STATUS_SUCCESS) {
2432 				QDF_TRACE(QDF_MODULE_ID_DP,
2433 						QDF_TRACE_LEVEL_INFO_MED,
2434 						FL("mesh pkt filtered"));
2435 				tid_stats->fail_cnt[MESH_FILTER_DROP]++;
2436 				DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
2437 					     1);
2438 
2439 				qdf_nbuf_free(nbuf);
2440 				nbuf = next;
2441 				dp_peer_unref_del_find_by_id(peer);
2442 				continue;
2443 			}
2444 			dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
2445 		}
2446 
2447 		if (qdf_likely(vdev->rx_decap_type ==
2448 			       htt_cmn_pkt_type_ethernet) &&
2449 		    qdf_likely(!vdev->mesh_vdev)) {
2450 			/* WDS Destination Address Learning */
2451 			dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf);
2452 
2453 			/* Due to HW issue, sometimes we see that the sa_idx
2454 			 * and da_idx are invalid with sa_valid and da_valid
2455 			 * bits set
2456 			 *
2457 			 * in this case we also see that value of
2458 			 * sa_sw_peer_id is set as 0
2459 			 *
2460 			 * Drop the packet if sa_idx and da_idx OOB or
2461 			 * sa_sw_peerid is 0
2462 			 */
2463 			if (!is_sa_da_idx_valid(soc, rx_tlv_hdr, nbuf,
2464 						msdu_metadata)) {
2465 				qdf_nbuf_free(nbuf);
2466 				nbuf = next;
2467 				DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
2468 				dp_peer_unref_del_find_by_id(peer);
2469 				continue;
2470 			}
2471 			/* WDS Source Port Learning */
2472 			if (qdf_likely(vdev->wds_enabled))
2473 				dp_rx_wds_srcport_learn(soc,
2474 							rx_tlv_hdr,
2475 							peer,
2476 							nbuf,
2477 							msdu_metadata);
2478 
2479 			/* Intrabss-fwd */
2480 			if (dp_rx_check_ap_bridge(vdev))
2481 				if (dp_rx_intrabss_fwd(soc,
2482 							peer,
2483 							rx_tlv_hdr,
2484 							nbuf,
2485 							msdu_metadata)) {
2486 					nbuf = next;
2487 					dp_peer_unref_del_find_by_id(peer);
2488 					tid_stats->intrabss_cnt++;
2489 					continue; /* Get next desc */
2490 				}
2491 		}
2492 
2493 		dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt);
2494 
2495 		DP_RX_LIST_APPEND(deliver_list_head,
2496 				  deliver_list_tail,
2497 				  nbuf);
2498 		DP_STATS_INC_PKT(peer, rx.to_stack, 1,
2499 				 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
2500 
2501 		tid_stats->delivered_to_stack++;
2502 		nbuf = next;
2503 		dp_peer_unref_del_find_by_id(peer);
2504 	}
2505 
2506 	if (qdf_likely(deliver_list_head)) {
2507 		if (qdf_likely(peer))
2508 			dp_rx_deliver_to_stack(soc, vdev, peer,
2509 					       deliver_list_head,
2510 					       deliver_list_tail);
2511 		else {
2512 			nbuf = deliver_list_head;
2513 			while (nbuf) {
2514 				next = nbuf->next;
2515 				nbuf->next = NULL;
2516 				dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2517 				nbuf = next;
2518 			}
2519 		}
2520 	}
2521 
2522 	if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
2523 		if (quota) {
2524 			num_pending =
2525 				dp_rx_srng_get_num_pending(hal_soc,
2526 							   hal_ring_hdl,
2527 							   num_entries,
2528 							   &near_full);
2529 			if (num_pending) {
2530 				DP_STATS_INC(soc, rx.hp_oos2, 1);
2531 
2532 				if (!hif_exec_should_yield(scn, intr_id))
2533 					goto more_data;
2534 
2535 				if (qdf_unlikely(near_full)) {
2536 					DP_STATS_INC(soc, rx.near_full, 1);
2537 					goto more_data;
2538 				}
2539 			}
2540 		}
2541 
2542 		if (vdev && vdev->osif_fisa_flush)
2543 			vdev->osif_fisa_flush(soc, reo_ring_num);
2544 
2545 		if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) {
2546 			vdev->osif_gro_flush(vdev->osif_vdev,
2547 					     reo_ring_num);
2548 		}
2549 	}
2550 
2551 	/* Update histogram statistics by looping through pdev's */
2552 	DP_RX_HIST_STATS_PER_PDEV();
2553 
2554 	return rx_bufs_used; /* Assume no scale factor for now */
2555 }
2556 
2557 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
2558 {
2559 	QDF_STATUS ret;
2560 
2561 	if (vdev->osif_rx_flush) {
2562 		ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id);
2563 		if (!QDF_IS_STATUS_SUCCESS(ret)) {
2564 			dp_err("Failed to flush rx pkts for vdev %d\n",
2565 			       vdev->vdev_id);
2566 			return ret;
2567 		}
2568 	}
2569 
2570 	return QDF_STATUS_SUCCESS;
2571 }
2572 
2573 static QDF_STATUS
2574 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, qdf_nbuf_t *nbuf,
2575 			   struct dp_pdev *dp_pdev,
2576 			   struct rx_desc_pool *rx_desc_pool)
2577 {
2578 	qdf_dma_addr_t paddr;
2579 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
2580 
2581 	*nbuf = qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size,
2582 			       RX_BUFFER_RESERVATION,
2583 			       rx_desc_pool->buf_alignment, FALSE);
2584 	if (!(*nbuf)) {
2585 		dp_err("nbuf alloc failed");
2586 		DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
2587 		return ret;
2588 	}
2589 
2590 	ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, *nbuf,
2591 					 QDF_DMA_FROM_DEVICE,
2592 					 rx_desc_pool->buf_size);
2593 
2594 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
2595 		qdf_nbuf_free(*nbuf);
2596 		dp_err("nbuf map failed");
2597 		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
2598 		return ret;
2599 	}
2600 
2601 	paddr = qdf_nbuf_get_frag_paddr(*nbuf, 0);
2602 
2603 	ret = check_x86_paddr(dp_soc, nbuf, &paddr, rx_desc_pool);
2604 	if (ret == QDF_STATUS_E_FAILURE) {
2605 		qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, *nbuf,
2606 					     QDF_DMA_FROM_DEVICE,
2607 					     rx_desc_pool->buf_size);
2608 		qdf_nbuf_free(*nbuf);
2609 		dp_err("nbuf check x86 failed");
2610 		DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
2611 		return ret;
2612 	}
2613 
2614 	return QDF_STATUS_SUCCESS;
2615 }
2616 
2617 QDF_STATUS
2618 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
2619 			  struct dp_srng *dp_rxdma_srng,
2620 			  struct rx_desc_pool *rx_desc_pool,
2621 			  uint32_t num_req_buffers)
2622 {
2623 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
2624 	hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng;
2625 	union dp_rx_desc_list_elem_t *next;
2626 	void *rxdma_ring_entry;
2627 	qdf_dma_addr_t paddr;
2628 	qdf_nbuf_t *rx_nbuf_arr;
2629 	uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0;
2630 	uint32_t buffer_index, nbuf_ptrs_per_page;
2631 	qdf_nbuf_t nbuf;
2632 	QDF_STATUS ret;
2633 	int page_idx, total_pages;
2634 	union dp_rx_desc_list_elem_t *desc_list = NULL;
2635 	union dp_rx_desc_list_elem_t *tail = NULL;
2636 	int sync_hw_ptr = 1;
2637 	uint32_t num_entries_avail;
2638 
2639 	if (qdf_unlikely(!rxdma_srng)) {
2640 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
2641 		return QDF_STATUS_E_FAILURE;
2642 	}
2643 
2644 	dp_debug("requested %u RX buffers for driver attach", num_req_buffers);
2645 
2646 	hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
2647 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
2648 						   rxdma_srng,
2649 						   sync_hw_ptr);
2650 	hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
2651 
2652 	if (!num_entries_avail) {
2653 		dp_err("Num of available entries is zero, nothing to do");
2654 		return QDF_STATUS_E_NOMEM;
2655 	}
2656 
2657 	if (num_entries_avail < num_req_buffers)
2658 		num_req_buffers = num_entries_avail;
2659 
2660 	nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool,
2661 					    num_req_buffers, &desc_list, &tail);
2662 	if (!nr_descs) {
2663 		dp_err("no free rx_descs in freelist");
2664 		DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
2665 		return QDF_STATUS_E_NOMEM;
2666 	}
2667 
2668 	dp_debug("got %u RX descs for driver attach", nr_descs);
2669 
2670 	/*
2671 	 * Try to allocate pointers to the nbuf one page at a time.
2672 	 * Take pointers that can fit in one page of memory and
2673 	 * iterate through the total descriptors that need to be
2674 	 * allocated in order of pages. Reuse the pointers that
2675 	 * have been allocated to fit in one page across each
2676 	 * iteration to index into the nbuf.
2677 	 */
2678 	total_pages = (nr_descs * sizeof(*rx_nbuf_arr)) / PAGE_SIZE;
2679 
2680 	/*
2681 	 * Add an extra page to store the remainder if any
2682 	 */
2683 	if ((nr_descs * sizeof(*rx_nbuf_arr)) % PAGE_SIZE)
2684 		total_pages++;
2685 	rx_nbuf_arr = qdf_mem_malloc(PAGE_SIZE);
2686 	if (!rx_nbuf_arr) {
2687 		dp_err("failed to allocate nbuf array");
2688 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
2689 		QDF_BUG(0);
2690 		return QDF_STATUS_E_NOMEM;
2691 	}
2692 	nbuf_ptrs_per_page = PAGE_SIZE / sizeof(*rx_nbuf_arr);
2693 
2694 	for (page_idx = 0; page_idx < total_pages; page_idx++) {
2695 		qdf_mem_zero(rx_nbuf_arr, PAGE_SIZE);
2696 
2697 		for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) {
2698 			/*
2699 			 * The last page of buffer pointers may not be required
2700 			 * completely based on the number of descriptors. Below
2701 			 * check will ensure we are allocating only the
2702 			 * required number of descriptors.
2703 			 */
2704 			if (nr_nbuf_total >= nr_descs)
2705 				break;
2706 			ret = dp_pdev_nbuf_alloc_and_map(dp_soc,
2707 							 &rx_nbuf_arr[nr_nbuf],
2708 							 dp_pdev, rx_desc_pool);
2709 			if (QDF_IS_STATUS_ERROR(ret))
2710 				break;
2711 
2712 			nr_nbuf_total++;
2713 		}
2714 
2715 		hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
2716 
2717 		for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) {
2718 			rxdma_ring_entry =
2719 				hal_srng_src_get_next(dp_soc->hal_soc,
2720 						      rxdma_srng);
2721 			qdf_assert_always(rxdma_ring_entry);
2722 
2723 			next = desc_list->next;
2724 			nbuf = rx_nbuf_arr[buffer_index];
2725 			paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
2726 
2727 			dp_rx_desc_prep(&desc_list->rx_desc, nbuf);
2728 			desc_list->rx_desc.in_use = 1;
2729 			dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc);
2730 			dp_rx_desc_update_dbg_info(&desc_list->rx_desc,
2731 						   __func__,
2732 						   RX_DESC_REPLENISHED);
2733 
2734 			hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
2735 						     desc_list->rx_desc.cookie,
2736 						     rx_desc_pool->owner);
2737 
2738 			dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, nbuf, true);
2739 
2740 			desc_list = next;
2741 		}
2742 
2743 		hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
2744 	}
2745 
2746 	dp_info("filled %u RX buffers for driver attach", nr_nbuf_total);
2747 	qdf_mem_free(rx_nbuf_arr);
2748 
2749 	if (!nr_nbuf_total) {
2750 		dp_err("No nbuf's allocated");
2751 		QDF_BUG(0);
2752 		return QDF_STATUS_E_RESOURCES;
2753 	}
2754 
2755 	/* No need to count the number of bytes received during replenish.
2756 	 * Therefore set replenish.pkts.bytes as 0.
2757 	 */
2758 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0);
2759 
2760 	return QDF_STATUS_SUCCESS;
2761 }
2762 
2763 /*
2764  * dp_rx_pdev_desc_pool_alloc() -  allocate memory for software rx descriptor
2765  *				   pool
2766  *
2767  * @pdev: core txrx pdev context
2768  *
2769  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
2770  *			QDF_STATUS_E_NOMEM
2771  */
2772 QDF_STATUS
2773 dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev)
2774 {
2775 	struct dp_soc *soc = pdev->soc;
2776 	uint32_t rxdma_entries;
2777 	uint32_t rx_sw_desc_weight;
2778 	struct dp_srng *dp_rxdma_srng;
2779 	struct rx_desc_pool *rx_desc_pool;
2780 	uint32_t status = QDF_STATUS_SUCCESS;
2781 	int mac_for_pdev;
2782 
2783 	mac_for_pdev = pdev->lmac_id;
2784 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
2785 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2786 			  "nss-wifi<4> skip Rx refil %d", mac_for_pdev);
2787 		return status;
2788 	}
2789 
2790 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
2791 	rxdma_entries = dp_rxdma_srng->num_entries;
2792 
2793 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2794 	rx_sw_desc_weight = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx);
2795 
2796 	status = dp_rx_desc_pool_alloc(soc,
2797 				       rx_sw_desc_weight * rxdma_entries,
2798 				       rx_desc_pool);
2799 	if (status != QDF_STATUS_SUCCESS)
2800 		return status;
2801 
2802 	return status;
2803 }
2804 
2805 /*
2806  * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool
2807  *
2808  * @pdev: core txrx pdev context
2809  */
2810 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev)
2811 {
2812 	int mac_for_pdev = pdev->lmac_id;
2813 	struct dp_soc *soc = pdev->soc;
2814 	struct rx_desc_pool *rx_desc_pool;
2815 
2816 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2817 
2818 	dp_rx_desc_pool_free(soc, rx_desc_pool);
2819 }
2820 
2821 /*
2822  * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors
2823  *
2824  * @pdev: core txrx pdev context
2825  *
2826  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
2827  *			QDF_STATUS_E_NOMEM
2828  */
2829 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev)
2830 {
2831 	int mac_for_pdev = pdev->lmac_id;
2832 	struct dp_soc *soc = pdev->soc;
2833 	uint32_t rxdma_entries;
2834 	uint32_t rx_sw_desc_weight;
2835 	struct dp_srng *dp_rxdma_srng;
2836 	struct rx_desc_pool *rx_desc_pool;
2837 
2838 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
2839 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2840 			  "nss-wifi<4> skip Rx refil %d", mac_for_pdev);
2841 		return QDF_STATUS_SUCCESS;
2842 	}
2843 
2844 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2845 	if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM)
2846 		return QDF_STATUS_E_NOMEM;
2847 
2848 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
2849 	rxdma_entries = dp_rxdma_srng->num_entries;
2850 
2851 	soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
2852 
2853 	rx_sw_desc_weight =
2854 	wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx);
2855 
2856 	rx_desc_pool->owner = DP_WBM2SW_RBM;
2857 	rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
2858 	rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
2859 
2860 	dp_rx_desc_pool_init(soc, mac_for_pdev,
2861 			     rx_sw_desc_weight * rxdma_entries,
2862 			     rx_desc_pool);
2863 	return QDF_STATUS_SUCCESS;
2864 }
2865 
2866 /*
2867  * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools
2868  * @pdev: core txrx pdev context
2869  *
2870  * This function resets the freelist of rx descriptors and destroys locks
2871  * associated with this list of descriptors.
2872  */
2873 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev)
2874 {
2875 	int mac_for_pdev = pdev->lmac_id;
2876 	struct dp_soc *soc = pdev->soc;
2877 	struct rx_desc_pool *rx_desc_pool;
2878 
2879 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2880 
2881 	dp_rx_desc_pool_deinit(soc, rx_desc_pool);
2882 }
2883 
2884 /*
2885  * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring
2886  *
2887  * @pdev: core txrx pdev context
2888  *
2889  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
2890  *			QDF_STATUS_E_NOMEM
2891  */
2892 QDF_STATUS
2893 dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev)
2894 {
2895 	int mac_for_pdev = pdev->lmac_id;
2896 	struct dp_soc *soc = pdev->soc;
2897 	struct dp_srng *dp_rxdma_srng;
2898 	struct rx_desc_pool *rx_desc_pool;
2899 	uint32_t rxdma_entries;
2900 
2901 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
2902 	rxdma_entries = dp_rxdma_srng->num_entries;
2903 
2904 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2905 
2906 	return dp_pdev_rx_buffers_attach(soc, mac_for_pdev, dp_rxdma_srng,
2907 					 rx_desc_pool, rxdma_entries - 1);
2908 }
2909 
2910 /*
2911  * dp_rx_pdev_buffers_free - Free nbufs (skbs)
2912  *
2913  * @pdev: core txrx pdev context
2914  */
2915 void
2916 dp_rx_pdev_buffers_free(struct dp_pdev *pdev)
2917 {
2918 	int mac_for_pdev = pdev->lmac_id;
2919 	struct dp_soc *soc = pdev->soc;
2920 	struct rx_desc_pool *rx_desc_pool;
2921 
2922 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2923 
2924 	dp_rx_desc_nbuf_free(soc, rx_desc_pool);
2925 }
2926 
2927 /*
2928  * dp_rx_nbuf_prepare() - prepare RX nbuf
2929  * @soc: core txrx main context
2930  * @pdev: core txrx pdev context
2931  *
2932  * This function alloc & map nbuf for RX dma usage, retry it if failed
2933  * until retry times reaches max threshold or succeeded.
2934  *
2935  * Return: qdf_nbuf_t pointer if succeeded, NULL if failed.
2936  */
2937 qdf_nbuf_t
2938 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev)
2939 {
2940 	uint8_t *buf;
2941 	int32_t nbuf_retry_count;
2942 	QDF_STATUS ret;
2943 	qdf_nbuf_t nbuf = NULL;
2944 
2945 	for (nbuf_retry_count = 0; nbuf_retry_count <
2946 		QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD;
2947 			nbuf_retry_count++) {
2948 		/* Allocate a new skb */
2949 		nbuf = qdf_nbuf_alloc(soc->osdev,
2950 					RX_DATA_BUFFER_SIZE,
2951 					RX_BUFFER_RESERVATION,
2952 					RX_DATA_BUFFER_ALIGNMENT,
2953 					FALSE);
2954 
2955 		if (!nbuf) {
2956 			DP_STATS_INC(pdev,
2957 				replenish.nbuf_alloc_fail, 1);
2958 			continue;
2959 		}
2960 
2961 		buf = qdf_nbuf_data(nbuf);
2962 
2963 		memset(buf, 0, RX_DATA_BUFFER_SIZE);
2964 
2965 		ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
2966 						 QDF_DMA_FROM_DEVICE,
2967 						 RX_DATA_BUFFER_SIZE);
2968 
2969 		/* nbuf map failed */
2970 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
2971 			qdf_nbuf_free(nbuf);
2972 			DP_STATS_INC(pdev, replenish.map_err, 1);
2973 			continue;
2974 		}
2975 		/* qdf_nbuf alloc and map succeeded */
2976 		break;
2977 	}
2978 
2979 	/* qdf_nbuf still alloc or map failed */
2980 	if (qdf_unlikely(nbuf_retry_count >=
2981 			QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD))
2982 		return NULL;
2983 
2984 	return nbuf;
2985 }
2986 
2987 #ifdef DP_RX_SPECIAL_FRAME_NEED
2988 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
2989 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
2990 				 uint8_t *rx_tlv_hdr)
2991 {
2992 	uint32_t l2_hdr_offset = 0;
2993 	uint16_t msdu_len = 0;
2994 	uint32_t skip_len;
2995 
2996 	l2_hdr_offset =
2997 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
2998 
2999 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
3000 		skip_len = l2_hdr_offset;
3001 	} else {
3002 		msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
3003 		skip_len = l2_hdr_offset + RX_PKT_TLVS_LEN;
3004 		qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
3005 	}
3006 
3007 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
3008 	dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
3009 	qdf_nbuf_pull_head(nbuf, skip_len);
3010 
3011 	if (dp_rx_is_special_frame(nbuf, frame_mask)) {
3012 		qdf_nbuf_set_exc_frame(nbuf, 1);
3013 		dp_rx_deliver_to_stack(soc, peer->vdev, peer,
3014 				       nbuf, NULL);
3015 		return true;
3016 	}
3017 
3018 	return false;
3019 }
3020 #endif
3021