xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.c (revision f28396d060cff5c6519f883cb28ae0116ce479f1) !
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_rx.h"
22 #include "dp_peer.h"
23 #include "hal_rx.h"
24 #include "hal_api.h"
25 #include "qdf_nbuf.h"
26 #ifdef MESH_MODE_SUPPORT
27 #include "if_meta_hdr.h"
28 #endif
29 #include "dp_internal.h"
30 #include "dp_rx_mon.h"
31 #include "dp_ipa.h"
32 #ifdef FEATURE_WDS
33 #include "dp_txrx_wds.h"
34 #endif
35 
36 #ifdef ATH_RX_PRI_SAVE
37 #define DP_RX_TID_SAVE(_nbuf, _tid) \
38 	(qdf_nbuf_set_priority(_nbuf, _tid))
39 #else
40 #define DP_RX_TID_SAVE(_nbuf, _tid)
41 #endif
42 
43 #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING
44 static inline
45 bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
46 {
47 	if (ta_peer->vdev->opmode == wlan_op_mode_ndi &&
48 	    qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) {
49 		DP_STATS_INC(ta_peer, rx.intra_bss.mdns_no_fwd, 1);
50 		return false;
51 	}
52 		return true;
53 }
54 #else
55 static inline
56 bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
57 {
58 	return true;
59 }
60 #endif
61 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
62 {
63 	return vdev->ap_bridge_enabled;
64 }
65 
66 #ifdef DUP_RX_DESC_WAR
67 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
68 				hal_ring_handle_t hal_ring,
69 				hal_ring_desc_t ring_desc,
70 				struct dp_rx_desc *rx_desc)
71 {
72 	void *hal_soc = soc->hal_soc;
73 
74 	hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc);
75 	dp_rx_desc_dump(rx_desc);
76 }
77 #else
78 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
79 				hal_ring_handle_t hal_ring_hdl,
80 				hal_ring_desc_t ring_desc,
81 				struct dp_rx_desc *rx_desc)
82 {
83 	hal_soc_handle_t hal_soc = soc->hal_soc;
84 
85 	dp_rx_desc_dump(rx_desc);
86 	hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc);
87 	hal_srng_dump_ring(hal_soc, hal_ring_hdl);
88 	qdf_assert_always(0);
89 }
90 #endif
91 
92 /*
93  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
94  *			       called during dp rx initialization
95  *			       and at the end of dp_rx_process.
96  *
97  * @soc: core txrx main context
98  * @mac_id: mac_id which is one of 3 mac_ids
99  * @dp_rxdma_srng: dp rxdma circular ring
100  * @rx_desc_pool: Pointer to free Rx descriptor pool
101  * @num_req_buffers: number of buffer to be replenished
102  * @desc_list: list of descs if called from dp_rx_process
103  *	       or NULL during dp rx initialization or out of buffer
104  *	       interrupt.
105  * @tail: tail of descs list
106  * Return: return success or failure
107  */
108 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
109 				struct dp_srng *dp_rxdma_srng,
110 				struct rx_desc_pool *rx_desc_pool,
111 				uint32_t num_req_buffers,
112 				union dp_rx_desc_list_elem_t **desc_list,
113 				union dp_rx_desc_list_elem_t **tail)
114 {
115 	uint32_t num_alloc_desc;
116 	uint16_t num_desc_to_free = 0;
117 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
118 	uint32_t num_entries_avail;
119 	uint32_t count;
120 	int sync_hw_ptr = 1;
121 	qdf_dma_addr_t paddr;
122 	qdf_nbuf_t rx_netbuf;
123 	void *rxdma_ring_entry;
124 	union dp_rx_desc_list_elem_t *next;
125 	QDF_STATUS ret;
126 	uint16_t buf_size = rx_desc_pool->buf_size;
127 	uint8_t buf_alignment = rx_desc_pool->buf_alignment;
128 
129 	void *rxdma_srng;
130 
131 	rxdma_srng = dp_rxdma_srng->hal_srng;
132 
133 	if (!rxdma_srng) {
134 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
135 				  "rxdma srng not initialized");
136 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
137 		return QDF_STATUS_E_FAILURE;
138 	}
139 
140 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
141 		"requested %d buffers for replenish", num_req_buffers);
142 
143 	hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
144 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
145 						   rxdma_srng,
146 						   sync_hw_ptr);
147 
148 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
149 		"no of available entries in rxdma ring: %d",
150 		num_entries_avail);
151 
152 	if (!(*desc_list) && (num_entries_avail >
153 		((dp_rxdma_srng->num_entries * 3) / 4))) {
154 		num_req_buffers = num_entries_avail;
155 	} else if (num_entries_avail < num_req_buffers) {
156 		num_desc_to_free = num_req_buffers - num_entries_avail;
157 		num_req_buffers = num_entries_avail;
158 	}
159 
160 	if (qdf_unlikely(!num_req_buffers)) {
161 		num_desc_to_free = num_req_buffers;
162 		hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
163 		goto free_descs;
164 	}
165 
166 	/*
167 	 * if desc_list is NULL, allocate the descs from freelist
168 	 */
169 	if (!(*desc_list)) {
170 		num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
171 							  rx_desc_pool,
172 							  num_req_buffers,
173 							  desc_list,
174 							  tail);
175 
176 		if (!num_alloc_desc) {
177 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
178 				"no free rx_descs in freelist");
179 			DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
180 					num_req_buffers);
181 			hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
182 			return QDF_STATUS_E_NOMEM;
183 		}
184 
185 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
186 			"%d rx desc allocated", num_alloc_desc);
187 		num_req_buffers = num_alloc_desc;
188 	}
189 
190 
191 	count = 0;
192 
193 	while (count < num_req_buffers) {
194 		rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
195 					buf_size,
196 					RX_BUFFER_RESERVATION,
197 					buf_alignment,
198 					FALSE);
199 
200 		if (qdf_unlikely(!rx_netbuf)) {
201 			DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
202 			break;
203 		}
204 
205 		ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
206 					  QDF_DMA_FROM_DEVICE);
207 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
208 			qdf_nbuf_free(rx_netbuf);
209 			DP_STATS_INC(dp_pdev, replenish.map_err, 1);
210 			continue;
211 		}
212 
213 		paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
214 
215 		dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, rx_netbuf, true);
216 		/*
217 		 * check if the physical address of nbuf->data is
218 		 * less then 0x50000000 then free the nbuf and try
219 		 * allocating new nbuf. We can try for 100 times.
220 		 * this is a temp WAR till we fix it properly.
221 		 */
222 		ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, rx_desc_pool);
223 		if (ret == QDF_STATUS_E_FAILURE) {
224 			DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
225 			break;
226 		}
227 
228 		count++;
229 
230 		rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
231 							 rxdma_srng);
232 		qdf_assert_always(rxdma_ring_entry);
233 
234 		next = (*desc_list)->next;
235 
236 		dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf);
237 
238 		/* rx_desc.in_use should be zero at this time*/
239 		qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
240 
241 		(*desc_list)->rx_desc.in_use = 1;
242 
243 		dp_verbose_debug("rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d",
244 				 rx_netbuf, qdf_nbuf_data(rx_netbuf),
245 				 (unsigned long long)paddr,
246 				 (*desc_list)->rx_desc.cookie);
247 
248 		hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
249 						(*desc_list)->rx_desc.cookie,
250 						rx_desc_pool->owner);
251 
252 		*desc_list = next;
253 
254 	}
255 
256 	hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
257 
258 	dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
259 			 count, num_desc_to_free);
260 
261 	/* No need to count the number of bytes received during replenish.
262 	 * Therefore set replenish.pkts.bytes as 0.
263 	 */
264 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
265 
266 free_descs:
267 	DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
268 	/*
269 	 * add any available free desc back to the free list
270 	 */
271 	if (*desc_list)
272 		dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
273 			mac_id, rx_desc_pool);
274 
275 	return QDF_STATUS_SUCCESS;
276 }
277 
278 /*
279  * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
280  *				pkts to RAW mode simulation to
281  *				decapsulate the pkt.
282  *
283  * @vdev: vdev on which RAW mode is enabled
284  * @nbuf_list: list of RAW pkts to process
285  * @peer: peer object from which the pkt is rx
286  *
287  * Return: void
288  */
289 void
290 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
291 					struct dp_peer *peer)
292 {
293 	qdf_nbuf_t deliver_list_head = NULL;
294 	qdf_nbuf_t deliver_list_tail = NULL;
295 	qdf_nbuf_t nbuf;
296 
297 	nbuf = nbuf_list;
298 	while (nbuf) {
299 		qdf_nbuf_t next = qdf_nbuf_next(nbuf);
300 
301 		DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
302 
303 		DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
304 		DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf));
305 		/*
306 		 * reset the chfrag_start and chfrag_end bits in nbuf cb
307 		 * as this is a non-amsdu pkt and RAW mode simulation expects
308 		 * these bit s to be 0 for non-amsdu pkt.
309 		 */
310 		if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
311 			 qdf_nbuf_is_rx_chfrag_end(nbuf)) {
312 			qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
313 			qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
314 		}
315 
316 		nbuf = next;
317 	}
318 
319 	vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
320 				 &deliver_list_tail, peer->mac_addr.raw);
321 
322 	vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
323 }
324 
325 
326 #ifdef DP_LFR
327 /*
328  * In case of LFR, data of a new peer might be sent up
329  * even before peer is added.
330  */
331 static inline struct dp_vdev *
332 dp_get_vdev_from_peer(struct dp_soc *soc,
333 			uint16_t peer_id,
334 			struct dp_peer *peer,
335 			struct hal_rx_mpdu_desc_info mpdu_desc_info)
336 {
337 	struct dp_vdev *vdev;
338 	uint8_t vdev_id;
339 
340 	if (unlikely(!peer)) {
341 		if (peer_id != HTT_INVALID_PEER) {
342 			vdev_id = DP_PEER_METADATA_VDEV_ID_GET(
343 					mpdu_desc_info.peer_meta_data);
344 			QDF_TRACE(QDF_MODULE_ID_DP,
345 				QDF_TRACE_LEVEL_DEBUG,
346 				FL("PeerID %d not found use vdevID %d"),
347 				peer_id, vdev_id);
348 			vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
349 								  vdev_id);
350 		} else {
351 			QDF_TRACE(QDF_MODULE_ID_DP,
352 				QDF_TRACE_LEVEL_DEBUG,
353 				FL("Invalid PeerID %d"),
354 				peer_id);
355 			return NULL;
356 		}
357 	} else {
358 		vdev = peer->vdev;
359 	}
360 	return vdev;
361 }
362 #else
363 static inline struct dp_vdev *
364 dp_get_vdev_from_peer(struct dp_soc *soc,
365 			uint16_t peer_id,
366 			struct dp_peer *peer,
367 			struct hal_rx_mpdu_desc_info mpdu_desc_info)
368 {
369 	if (unlikely(!peer)) {
370 		QDF_TRACE(QDF_MODULE_ID_DP,
371 			QDF_TRACE_LEVEL_DEBUG,
372 			FL("Peer not found for peerID %d"),
373 			peer_id);
374 		return NULL;
375 	} else {
376 		return peer->vdev;
377 	}
378 }
379 #endif
380 
381 #ifndef FEATURE_WDS
382 static void
383 dp_rx_da_learn(struct dp_soc *soc,
384 	       uint8_t *rx_tlv_hdr,
385 	       struct dp_peer *ta_peer,
386 	       qdf_nbuf_t nbuf)
387 {
388 }
389 #endif
390 /*
391  * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
392  *
393  * @soc: core txrx main context
394  * @ta_peer	: source peer entry
395  * @rx_tlv_hdr	: start address of rx tlvs
396  * @nbuf	: nbuf that has to be intrabss forwarded
397  *
398  * Return: bool: true if it is forwarded else false
399  */
400 static bool
401 dp_rx_intrabss_fwd(struct dp_soc *soc,
402 			struct dp_peer *ta_peer,
403 			uint8_t *rx_tlv_hdr,
404 			qdf_nbuf_t nbuf,
405 			struct hal_rx_msdu_metadata msdu_metadata)
406 {
407 	uint16_t len;
408 	uint8_t is_frag;
409 	struct dp_peer *da_peer;
410 	struct dp_ast_entry *ast_entry;
411 	qdf_nbuf_t nbuf_copy;
412 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
413 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
414 	struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
415 					tid_stats.tid_rx_stats[ring_id][tid];
416 
417 	/* check if the destination peer is available in peer table
418 	 * and also check if the source peer and destination peer
419 	 * belong to the same vap and destination peer is not bss peer.
420 	 */
421 
422 	if ((qdf_nbuf_is_da_valid(nbuf) && !qdf_nbuf_is_da_mcbc(nbuf))) {
423 
424 		ast_entry = soc->ast_table[msdu_metadata.da_idx];
425 		if (!ast_entry)
426 			return false;
427 
428 		if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) {
429 			ast_entry->is_active = TRUE;
430 			return false;
431 		}
432 
433 		da_peer = ast_entry->peer;
434 
435 		if (!da_peer)
436 			return false;
437 		/* TA peer cannot be same as peer(DA) on which AST is present
438 		 * this indicates a change in topology and that AST entries
439 		 * are yet to be updated.
440 		 */
441 		if (da_peer == ta_peer)
442 			return false;
443 
444 		if (da_peer->vdev == ta_peer->vdev && !da_peer->bss_peer) {
445 			len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
446 			is_frag = qdf_nbuf_is_frag(nbuf);
447 			memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
448 
449 			/* linearize the nbuf just before we send to
450 			 * dp_tx_send()
451 			 */
452 			if (qdf_unlikely(is_frag)) {
453 				if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
454 					return false;
455 
456 				nbuf = qdf_nbuf_unshare(nbuf);
457 				if (!nbuf) {
458 					DP_STATS_INC_PKT(ta_peer,
459 							 rx.intra_bss.fail,
460 							 1,
461 							 len);
462 					/* return true even though the pkt is
463 					 * not forwarded. Basically skb_unshare
464 					 * failed and we want to continue with
465 					 * next nbuf.
466 					 */
467 					tid_stats->fail_cnt[INTRABSS_DROP]++;
468 					return true;
469 				}
470 			}
471 
472 			if (!dp_tx_send((struct cdp_soc_t *)soc,
473 					ta_peer->vdev->vdev_id, nbuf)) {
474 				DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
475 						 len);
476 				return true;
477 			} else {
478 				DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
479 						len);
480 				tid_stats->fail_cnt[INTRABSS_DROP]++;
481 				return false;
482 			}
483 		}
484 	}
485 	/* if it is a broadcast pkt (eg: ARP) and it is not its own
486 	 * source, then clone the pkt and send the cloned pkt for
487 	 * intra BSS forwarding and original pkt up the network stack
488 	 * Note: how do we handle multicast pkts. do we forward
489 	 * all multicast pkts as is or let a higher layer module
490 	 * like igmpsnoop decide whether to forward or not with
491 	 * Mcast enhancement.
492 	 */
493 	else if (qdf_unlikely((qdf_nbuf_is_da_mcbc(nbuf) &&
494 			       !ta_peer->bss_peer))) {
495 		if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
496 			goto end;
497 
498 		nbuf_copy = qdf_nbuf_copy(nbuf);
499 		if (!nbuf_copy)
500 			goto end;
501 
502 		len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
503 		memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
504 
505 		/* Set cb->ftype to intrabss FWD */
506 		qdf_nbuf_set_tx_ftype(nbuf_copy, CB_FTYPE_INTRABSS_FWD);
507 		if (dp_tx_send((struct cdp_soc_t *)soc,
508 			       ta_peer->vdev->vdev_id, nbuf_copy)) {
509 			DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
510 			tid_stats->fail_cnt[INTRABSS_DROP]++;
511 			qdf_nbuf_free(nbuf_copy);
512 		} else {
513 			DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
514 			tid_stats->intrabss_cnt++;
515 		}
516 	}
517 
518 end:
519 	/* return false as we have to still send the original pkt
520 	 * up the stack
521 	 */
522 	return false;
523 }
524 
525 #ifdef MESH_MODE_SUPPORT
526 
527 /**
528  * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
529  *
530  * @vdev: DP Virtual device handle
531  * @nbuf: Buffer pointer
532  * @rx_tlv_hdr: start of rx tlv header
533  * @peer: pointer to peer
534  *
535  * This function allocated memory for mesh receive stats and fill the
536  * required stats. Stores the memory address in skb cb.
537  *
538  * Return: void
539  */
540 
541 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
542 				uint8_t *rx_tlv_hdr, struct dp_peer *peer)
543 {
544 	struct mesh_recv_hdr_s *rx_info = NULL;
545 	uint32_t pkt_type;
546 	uint32_t nss;
547 	uint32_t rate_mcs;
548 	uint32_t bw;
549 
550 	/* fill recv mesh stats */
551 	rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
552 
553 	/* upper layers are resposible to free this memory */
554 
555 	if (!rx_info) {
556 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
557 			"Memory allocation failed for mesh rx stats");
558 		DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
559 		return;
560 	}
561 
562 	rx_info->rs_flags = MESH_RXHDR_VER1;
563 	if (qdf_nbuf_is_rx_chfrag_start(nbuf))
564 		rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
565 
566 	if (qdf_nbuf_is_rx_chfrag_end(nbuf))
567 		rx_info->rs_flags |= MESH_RX_LAST_MSDU;
568 
569 	if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) {
570 		rx_info->rs_flags |= MESH_RX_DECRYPTED;
571 		rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr);
572 		if (vdev->osif_get_key)
573 			vdev->osif_get_key(vdev->osif_vdev,
574 					&rx_info->rs_decryptkey[0],
575 					&peer->mac_addr.raw[0],
576 					rx_info->rs_keyix);
577 	}
578 
579 	rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr);
580 	rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr);
581 	pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
582 	rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
583 	bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
584 	nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr);
585 	rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
586 				(bw << 24);
587 
588 	qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
589 
590 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
591 		FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"),
592 						rx_info->rs_flags,
593 						rx_info->rs_rssi,
594 						rx_info->rs_channel,
595 						rx_info->rs_ratephy1,
596 						rx_info->rs_keyix);
597 
598 }
599 
600 /**
601  * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
602  *
603  * @vdev: DP Virtual device handle
604  * @nbuf: Buffer pointer
605  * @rx_tlv_hdr: start of rx tlv header
606  *
607  * This checks if the received packet is matching any filter out
608  * catogery and and drop the packet if it matches.
609  *
610  * Return: status(0 indicates drop, 1 indicate to no drop)
611  */
612 
613 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
614 					uint8_t *rx_tlv_hdr)
615 {
616 	union dp_align_mac_addr mac_addr;
617 	struct dp_soc *soc = vdev->pdev->soc;
618 
619 	if (qdf_unlikely(vdev->mesh_rx_filter)) {
620 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
621 			if (hal_rx_mpdu_get_fr_ds(soc->hal_soc,
622 						  rx_tlv_hdr))
623 				return  QDF_STATUS_SUCCESS;
624 
625 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
626 			if (hal_rx_mpdu_get_to_ds(soc->hal_soc,
627 						  rx_tlv_hdr))
628 				return  QDF_STATUS_SUCCESS;
629 
630 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
631 			if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc,
632 						   rx_tlv_hdr) &&
633 			    !hal_rx_mpdu_get_to_ds(soc->hal_soc,
634 						   rx_tlv_hdr))
635 				return  QDF_STATUS_SUCCESS;
636 
637 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
638 			if (hal_rx_mpdu_get_addr1(soc->hal_soc,
639 						  rx_tlv_hdr,
640 					&mac_addr.raw[0]))
641 				return QDF_STATUS_E_FAILURE;
642 
643 			if (!qdf_mem_cmp(&mac_addr.raw[0],
644 					&vdev->mac_addr.raw[0],
645 					QDF_MAC_ADDR_SIZE))
646 				return  QDF_STATUS_SUCCESS;
647 		}
648 
649 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
650 			if (hal_rx_mpdu_get_addr2(soc->hal_soc,
651 						  rx_tlv_hdr,
652 						  &mac_addr.raw[0]))
653 				return QDF_STATUS_E_FAILURE;
654 
655 			if (!qdf_mem_cmp(&mac_addr.raw[0],
656 					&vdev->mac_addr.raw[0],
657 					QDF_MAC_ADDR_SIZE))
658 				return  QDF_STATUS_SUCCESS;
659 		}
660 	}
661 
662 	return QDF_STATUS_E_FAILURE;
663 }
664 
665 #else
666 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
667 				uint8_t *rx_tlv_hdr, struct dp_peer *peer)
668 {
669 }
670 
671 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
672 					uint8_t *rx_tlv_hdr)
673 {
674 	return QDF_STATUS_E_FAILURE;
675 }
676 
677 #endif
678 
679 #ifdef FEATURE_NAC_RSSI
680 /**
681  * dp_rx_nac_filter(): Function to perform filtering of non-associated
682  * clients
683  * @pdev: DP pdev handle
684  * @rx_pkt_hdr: Rx packet Header
685  *
686  * return: dp_vdev*
687  */
688 static
689 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
690 		uint8_t *rx_pkt_hdr)
691 {
692 	struct ieee80211_frame *wh;
693 	struct dp_neighbour_peer *peer = NULL;
694 
695 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
696 
697 	if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
698 		return NULL;
699 
700 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
701 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
702 				neighbour_peer_list_elem) {
703 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
704 				wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) {
705 			QDF_TRACE(
706 				QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
707 				FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"),
708 				peer->neighbour_peers_macaddr.raw[0],
709 				peer->neighbour_peers_macaddr.raw[1],
710 				peer->neighbour_peers_macaddr.raw[2],
711 				peer->neighbour_peers_macaddr.raw[3],
712 				peer->neighbour_peers_macaddr.raw[4],
713 				peer->neighbour_peers_macaddr.raw[5]);
714 
715 				qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
716 
717 			return pdev->monitor_vdev;
718 		}
719 	}
720 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
721 
722 	return NULL;
723 }
724 
725 /**
726  * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
727  * @soc: DP SOC handle
728  * @mpdu: mpdu for which peer is invalid
729  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
730  * pool_id has same mapping)
731  *
732  * return: integer type
733  */
734 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
735 				   uint8_t mac_id)
736 {
737 	struct dp_invalid_peer_msg msg;
738 	struct dp_vdev *vdev = NULL;
739 	struct dp_pdev *pdev = NULL;
740 	struct ieee80211_frame *wh;
741 	qdf_nbuf_t curr_nbuf, next_nbuf;
742 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
743 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
744 
745 	rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
746 
747 	if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) {
748 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
749 			  "Drop decapped frames");
750 		goto free;
751 	}
752 
753 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
754 
755 	if (!DP_FRAME_IS_DATA(wh)) {
756 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
757 			  "NAWDS valid only for data frames");
758 		goto free;
759 	}
760 
761 	if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
762 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
763 			"Invalid nbuf length");
764 		goto free;
765 	}
766 
767 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
768 
769 	if (!pdev || qdf_unlikely(pdev->is_pdev_down)) {
770 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
771 			  "PDEV %s", !pdev ? "not found" : "down");
772 		goto free;
773 	}
774 
775 	if (pdev->filter_neighbour_peers) {
776 		/* Next Hop scenario not yet handle */
777 		vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
778 		if (vdev) {
779 			dp_rx_mon_deliver(soc, pdev->pdev_id,
780 					  pdev->invalid_peer_head_msdu,
781 					  pdev->invalid_peer_tail_msdu);
782 
783 			pdev->invalid_peer_head_msdu = NULL;
784 			pdev->invalid_peer_tail_msdu = NULL;
785 
786 			return 0;
787 		}
788 	}
789 
790 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
791 
792 		if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
793 				QDF_MAC_ADDR_SIZE) == 0) {
794 			goto out;
795 		}
796 	}
797 
798 	if (!vdev) {
799 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
800 			"VDEV not found");
801 		goto free;
802 	}
803 
804 out:
805 	msg.wh = wh;
806 	qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN);
807 	msg.nbuf = mpdu;
808 	msg.vdev_id = vdev->vdev_id;
809 	if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer)
810 		pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(
811 				(struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc,
812 				pdev->pdev_id, &msg);
813 
814 free:
815 	/* Drop and free packet */
816 	curr_nbuf = mpdu;
817 	while (curr_nbuf) {
818 		next_nbuf = qdf_nbuf_next(curr_nbuf);
819 		qdf_nbuf_free(curr_nbuf);
820 		curr_nbuf = next_nbuf;
821 	}
822 
823 	return 0;
824 }
825 
826 /**
827  * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
828  * @soc: DP SOC handle
829  * @mpdu: mpdu for which peer is invalid
830  * @mpdu_done: if an mpdu is completed
831  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
832  * pool_id has same mapping)
833  *
834  * return: integer type
835  */
836 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
837 					qdf_nbuf_t mpdu, bool mpdu_done,
838 					uint8_t mac_id)
839 {
840 	/* Only trigger the process when mpdu is completed */
841 	if (mpdu_done)
842 		dp_rx_process_invalid_peer(soc, mpdu, mac_id);
843 }
844 #else
845 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
846 				   uint8_t mac_id)
847 {
848 	qdf_nbuf_t curr_nbuf, next_nbuf;
849 	struct dp_pdev *pdev;
850 	struct dp_vdev *vdev = NULL;
851 	struct ieee80211_frame *wh;
852 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
853 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
854 
855 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
856 
857 	if (!DP_FRAME_IS_DATA(wh)) {
858 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP,
859 				   "only for data frames");
860 		goto free;
861 	}
862 
863 	if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
864 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
865 			  "Invalid nbuf length");
866 		goto free;
867 	}
868 
869 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
870 	if (!pdev) {
871 		QDF_TRACE(QDF_MODULE_ID_DP,
872 			  QDF_TRACE_LEVEL_ERROR,
873 			  "PDEV not found");
874 		goto free;
875 	}
876 
877 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
878 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
879 		if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
880 				QDF_MAC_ADDR_SIZE) == 0) {
881 			qdf_spin_unlock_bh(&pdev->vdev_list_lock);
882 			goto out;
883 		}
884 	}
885 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
886 
887 	if (!vdev) {
888 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
889 			  "VDEV not found");
890 		goto free;
891 	}
892 
893 out:
894 	if (soc->cdp_soc.ol_ops->rx_invalid_peer)
895 		soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh);
896 free:
897 	/* reset the head and tail pointers */
898 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
899 	if (pdev) {
900 		pdev->invalid_peer_head_msdu = NULL;
901 		pdev->invalid_peer_tail_msdu = NULL;
902 	}
903 
904 	/* Drop and free packet */
905 	curr_nbuf = mpdu;
906 	while (curr_nbuf) {
907 		next_nbuf = qdf_nbuf_next(curr_nbuf);
908 		qdf_nbuf_free(curr_nbuf);
909 		curr_nbuf = next_nbuf;
910 	}
911 
912 	/* Reset the head and tail pointers */
913 	pdev = dp_get_pdev_for_mac_id(soc, mac_id);
914 	if (pdev) {
915 		pdev->invalid_peer_head_msdu = NULL;
916 		pdev->invalid_peer_tail_msdu = NULL;
917 	}
918 
919 	return 0;
920 }
921 
922 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
923 					qdf_nbuf_t mpdu, bool mpdu_done,
924 					uint8_t mac_id)
925 {
926 	/* Process the nbuf */
927 	dp_rx_process_invalid_peer(soc, mpdu, mac_id);
928 }
929 #endif
930 
931 #ifdef RECEIVE_OFFLOAD
932 /**
933  * dp_rx_print_offload_info() - Print offload info from RX TLV
934  * @soc: dp soc handle
935  * @rx_tlv: RX TLV for which offload information is to be printed
936  *
937  * Return: None
938  */
939 static void dp_rx_print_offload_info(struct dp_soc *soc, uint8_t *rx_tlv)
940 {
941 	dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------");
942 	dp_verbose_debug("lro_eligible 0x%x", HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv));
943 	dp_verbose_debug("pure_ack 0x%x", HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv));
944 	dp_verbose_debug("chksum 0x%x", hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
945 								  rx_tlv));
946 	dp_verbose_debug("TCP seq num 0x%x", HAL_RX_TLV_GET_TCP_SEQ(rx_tlv));
947 	dp_verbose_debug("TCP ack num 0x%x", HAL_RX_TLV_GET_TCP_ACK(rx_tlv));
948 	dp_verbose_debug("TCP window 0x%x", HAL_RX_TLV_GET_TCP_WIN(rx_tlv));
949 	dp_verbose_debug("TCP protocol 0x%x", HAL_RX_TLV_GET_TCP_PROTO(rx_tlv));
950 	dp_verbose_debug("TCP offset 0x%x", HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv));
951 	dp_verbose_debug("toeplitz 0x%x", HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv));
952 	dp_verbose_debug("---------------------------------------------------------");
953 }
954 
955 /**
956  * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
957  * @soc: DP SOC handle
958  * @rx_tlv: RX TLV received for the msdu
959  * @msdu: msdu for which GRO info needs to be filled
960  * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
961  *
962  * Return: None
963  */
964 static
965 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
966 			 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
967 {
968 	if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
969 		return;
970 
971 	/* Filling up RX offload info only for TCP packets */
972 	if (!HAL_RX_TLV_GET_TCP_PROTO(rx_tlv))
973 		return;
974 
975 	*rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1;
976 
977 	QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) =
978 		 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv);
979 	QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) =
980 			HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv);
981 	QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
982 			hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
983 						  rx_tlv);
984 	QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) =
985 			 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv);
986 	QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) =
987 			 HAL_RX_TLV_GET_TCP_ACK(rx_tlv);
988 	QDF_NBUF_CB_RX_TCP_WIN(msdu) =
989 			 HAL_RX_TLV_GET_TCP_WIN(rx_tlv);
990 	QDF_NBUF_CB_RX_TCP_PROTO(msdu) =
991 			 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv);
992 	QDF_NBUF_CB_RX_IPV6_PROTO(msdu) =
993 			 HAL_RX_TLV_GET_IPV6(rx_tlv);
994 	QDF_NBUF_CB_RX_TCP_OFFSET(msdu) =
995 			 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv);
996 	QDF_NBUF_CB_RX_FLOW_ID(msdu) =
997 			 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv);
998 
999 	dp_rx_print_offload_info(soc, rx_tlv);
1000 }
1001 #else
1002 static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
1003 				qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
1004 {
1005 }
1006 #endif /* RECEIVE_OFFLOAD */
1007 
1008 /**
1009  * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
1010  *
1011  * @nbuf: pointer to msdu.
1012  * @mpdu_len: mpdu length
1013  *
1014  * Return: returns true if nbuf is last msdu of mpdu else retuns false.
1015  */
1016 static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len)
1017 {
1018 	bool last_nbuf;
1019 
1020 	if (*mpdu_len > (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN)) {
1021 		qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE);
1022 		last_nbuf = false;
1023 	} else {
1024 		qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN));
1025 		last_nbuf = true;
1026 	}
1027 
1028 	*mpdu_len -= (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN);
1029 
1030 	return last_nbuf;
1031 }
1032 
1033 /**
1034  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
1035  *		     multiple nbufs.
1036  * @nbuf: pointer to the first msdu of an amsdu.
1037  *
1038  * This function implements the creation of RX frag_list for cases
1039  * where an MSDU is spread across multiple nbufs.
1040  *
1041  * Return: returns the head nbuf which contains complete frag_list.
1042  */
1043 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf)
1044 {
1045 	qdf_nbuf_t parent, frag_list, next = NULL;
1046 	uint16_t frag_list_len = 0;
1047 	uint16_t mpdu_len;
1048 	bool last_nbuf;
1049 
1050 	/*
1051 	 * Use msdu len got from REO entry descriptor instead since
1052 	 * there is case the RX PKT TLV is corrupted while msdu_len
1053 	 * from REO descriptor is right for non-raw RX scatter msdu.
1054 	 */
1055 	mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1056 	/*
1057 	 * this is a case where the complete msdu fits in one single nbuf.
1058 	 * in this case HW sets both start and end bit and we only need to
1059 	 * reset these bits for RAW mode simulator to decap the pkt
1060 	 */
1061 	if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
1062 					qdf_nbuf_is_rx_chfrag_end(nbuf)) {
1063 		qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN);
1064 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1065 		return nbuf;
1066 	}
1067 
1068 	/*
1069 	 * This is a case where we have multiple msdus (A-MSDU) spread across
1070 	 * multiple nbufs. here we create a fraglist out of these nbufs.
1071 	 *
1072 	 * the moment we encounter a nbuf with continuation bit set we
1073 	 * know for sure we have an MSDU which is spread across multiple
1074 	 * nbufs. We loop through and reap nbufs till we reach last nbuf.
1075 	 */
1076 	parent = nbuf;
1077 	frag_list = nbuf->next;
1078 	nbuf = nbuf->next;
1079 
1080 	/*
1081 	 * set the start bit in the first nbuf we encounter with continuation
1082 	 * bit set. This has the proper mpdu length set as it is the first
1083 	 * msdu of the mpdu. this becomes the parent nbuf and the subsequent
1084 	 * nbufs will form the frag_list of the parent nbuf.
1085 	 */
1086 	qdf_nbuf_set_rx_chfrag_start(parent, 1);
1087 	last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len);
1088 
1089 	/*
1090 	 * this is where we set the length of the fragments which are
1091 	 * associated to the parent nbuf. We iterate through the frag_list
1092 	 * till we hit the last_nbuf of the list.
1093 	 */
1094 	do {
1095 		last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len);
1096 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1097 		frag_list_len += qdf_nbuf_len(nbuf);
1098 
1099 		if (last_nbuf) {
1100 			next = nbuf->next;
1101 			nbuf->next = NULL;
1102 			break;
1103 		}
1104 
1105 		nbuf = nbuf->next;
1106 	} while (!last_nbuf);
1107 
1108 	qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
1109 	qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
1110 	parent->next = next;
1111 
1112 	qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN);
1113 	return parent;
1114 }
1115 
1116 /**
1117  * dp_rx_compute_delay() - Compute and fill in all timestamps
1118  *				to pass in correct fields
1119  *
1120  * @vdev: pdev handle
1121  * @tx_desc: tx descriptor
1122  * @tid: tid value
1123  * Return: none
1124  */
1125 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1126 {
1127 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
1128 	int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get());
1129 	uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
1130 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
1131 	uint32_t interframe_delay =
1132 		(uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp);
1133 
1134 	dp_update_delay_stats(vdev->pdev, to_stack, tid,
1135 			      CDP_DELAY_STATS_REAP_STACK, ring_id);
1136 	/*
1137 	 * Update interframe delay stats calculated at deliver_data_ol point.
1138 	 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so
1139 	 * interframe delay will not be calculate correctly for 1st frame.
1140 	 * On the other side, this will help in avoiding extra per packet check
1141 	 * of vdev->prev_rx_deliver_tstamp.
1142 	 */
1143 	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
1144 			      CDP_DELAY_STATS_RX_INTERFRAME, ring_id);
1145 	vdev->prev_rx_deliver_tstamp = current_ts;
1146 }
1147 
1148 /**
1149  * dp_rx_drop_nbuf_list() - drop an nbuf list
1150  * @pdev: dp pdev reference
1151  * @buf_list: buffer list to be dropepd
1152  *
1153  * Return: int (number of bufs dropped)
1154  */
1155 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
1156 				       qdf_nbuf_t buf_list)
1157 {
1158 	struct cdp_tid_rx_stats *stats = NULL;
1159 	uint8_t tid = 0, ring_id = 0;
1160 	int num_dropped = 0;
1161 	qdf_nbuf_t buf, next_buf;
1162 
1163 	buf = buf_list;
1164 	while (buf) {
1165 		ring_id = QDF_NBUF_CB_RX_CTX_ID(buf);
1166 		next_buf = qdf_nbuf_queue_next(buf);
1167 		tid = qdf_nbuf_get_tid_val(buf);
1168 		if (qdf_likely(pdev)) {
1169 			stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
1170 			stats->fail_cnt[INVALID_PEER_VDEV]++;
1171 			stats->delivered_to_stack--;
1172 		}
1173 		qdf_nbuf_free(buf);
1174 		buf = next_buf;
1175 		num_dropped++;
1176 	}
1177 
1178 	return num_dropped;
1179 }
1180 
1181 #ifdef PEER_CACHE_RX_PKTS
1182 /**
1183  * dp_rx_flush_rx_cached() - flush cached rx frames
1184  * @peer: peer
1185  * @drop: flag to drop frames or forward to net stack
1186  *
1187  * Return: None
1188  */
1189 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
1190 {
1191 	struct dp_peer_cached_bufq *bufqi;
1192 	struct dp_rx_cached_buf *cache_buf = NULL;
1193 	ol_txrx_rx_fp data_rx = NULL;
1194 	int num_buff_elem;
1195 	QDF_STATUS status;
1196 
1197 	if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
1198 		qdf_atomic_dec(&peer->flush_in_progress);
1199 		return;
1200 	}
1201 
1202 	qdf_spin_lock_bh(&peer->peer_info_lock);
1203 	if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx)
1204 		data_rx = peer->vdev->osif_rx;
1205 	else
1206 		drop = true;
1207 	qdf_spin_unlock_bh(&peer->peer_info_lock);
1208 
1209 	bufqi = &peer->bufq_info;
1210 
1211 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1212 	qdf_list_remove_front(&bufqi->cached_bufq,
1213 			      (qdf_list_node_t **)&cache_buf);
1214 	while (cache_buf) {
1215 		num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(
1216 								cache_buf->buf);
1217 		bufqi->entries -= num_buff_elem;
1218 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
1219 		if (drop) {
1220 			bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1221 							      cache_buf->buf);
1222 		} else {
1223 			/* Flush the cached frames to OSIF DEV */
1224 			status = data_rx(peer->vdev->osif_vdev, cache_buf->buf);
1225 			if (status != QDF_STATUS_SUCCESS)
1226 				bufqi->dropped = dp_rx_drop_nbuf_list(
1227 							peer->vdev->pdev,
1228 							cache_buf->buf);
1229 		}
1230 		qdf_mem_free(cache_buf);
1231 		cache_buf = NULL;
1232 		qdf_spin_lock_bh(&bufqi->bufq_lock);
1233 		qdf_list_remove_front(&bufqi->cached_bufq,
1234 				      (qdf_list_node_t **)&cache_buf);
1235 	}
1236 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1237 	qdf_atomic_dec(&peer->flush_in_progress);
1238 }
1239 
1240 /**
1241  * dp_rx_enqueue_rx() - cache rx frames
1242  * @peer: peer
1243  * @rx_buf_list: cache buffer list
1244  *
1245  * Return: None
1246  */
1247 static QDF_STATUS
1248 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1249 {
1250 	struct dp_rx_cached_buf *cache_buf;
1251 	struct dp_peer_cached_bufq *bufqi = &peer->bufq_info;
1252 	int num_buff_elem;
1253 
1254 	dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries,
1255 		    bufqi->dropped);
1256 	if (!peer->valid) {
1257 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1258 						      rx_buf_list);
1259 		return QDF_STATUS_E_INVAL;
1260 	}
1261 
1262 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1263 	if (bufqi->entries >= bufqi->thresh) {
1264 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1265 						      rx_buf_list);
1266 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
1267 		return QDF_STATUS_E_RESOURCES;
1268 	}
1269 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1270 
1271 	num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list);
1272 
1273 	cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf));
1274 	if (!cache_buf) {
1275 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1276 			  "Failed to allocate buf to cache rx frames");
1277 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1278 						      rx_buf_list);
1279 		return QDF_STATUS_E_NOMEM;
1280 	}
1281 
1282 	cache_buf->buf = rx_buf_list;
1283 
1284 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1285 	qdf_list_insert_back(&bufqi->cached_bufq,
1286 			     &cache_buf->node);
1287 	bufqi->entries += num_buff_elem;
1288 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1289 
1290 	return QDF_STATUS_SUCCESS;
1291 }
1292 
1293 static inline
1294 bool dp_rx_is_peer_cache_bufq_supported(void)
1295 {
1296 	return true;
1297 }
1298 #else
1299 static inline
1300 bool dp_rx_is_peer_cache_bufq_supported(void)
1301 {
1302 	return false;
1303 }
1304 
1305 static inline QDF_STATUS
1306 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1307 {
1308 	return QDF_STATUS_SUCCESS;
1309 }
1310 #endif
1311 
1312 void dp_rx_deliver_to_stack(struct dp_soc *soc,
1313 			    struct dp_vdev *vdev,
1314 			    struct dp_peer *peer,
1315 			    qdf_nbuf_t nbuf_head,
1316 			    qdf_nbuf_t nbuf_tail)
1317 {
1318 	int num_nbuf = 0;
1319 
1320 	if (qdf_unlikely(!vdev || vdev->delete.pending)) {
1321 		num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head);
1322 		/*
1323 		 * This is a special case where vdev is invalid,
1324 		 * so we cannot know the pdev to which this packet
1325 		 * belonged. Hence we update the soc rx error stats.
1326 		 */
1327 		DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf);
1328 		return;
1329 	}
1330 
1331 	/*
1332 	 * highly unlikely to have a vdev without a registered rx
1333 	 * callback function. if so let us free the nbuf_list.
1334 	 */
1335 	if (qdf_unlikely(!vdev->osif_rx)) {
1336 		if (peer && dp_rx_is_peer_cache_bufq_supported()) {
1337 			dp_rx_enqueue_rx(peer, nbuf_head);
1338 		} else {
1339 			num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev,
1340 							nbuf_head);
1341 			DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf);
1342 		}
1343 		return;
1344 	}
1345 
1346 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
1347 			(vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
1348 		vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
1349 				&nbuf_tail, peer->mac_addr.raw);
1350 	}
1351 	vdev->osif_rx(vdev->osif_vdev, nbuf_head);
1352 }
1353 
1354 /**
1355  * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
1356  * @nbuf: pointer to the first msdu of an amsdu.
1357  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1358  *
1359  * The ipsumed field of the skb is set based on whether HW validated the
1360  * IP/TCP/UDP checksum.
1361  *
1362  * Return: void
1363  */
1364 static inline void dp_rx_cksum_offload(struct dp_pdev *pdev,
1365 				       qdf_nbuf_t nbuf,
1366 				       uint8_t *rx_tlv_hdr)
1367 {
1368 	qdf_nbuf_rx_cksum_t cksum = {0};
1369 	bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr);
1370 	bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr);
1371 
1372 	if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) {
1373 		cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
1374 		qdf_nbuf_set_rx_cksum(nbuf, &cksum);
1375 	} else {
1376 		DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err);
1377 		DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er);
1378 	}
1379 }
1380 
1381 #ifdef VDEV_PEER_PROTOCOL_COUNT
1382 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer) \
1383 { \
1384 	qdf_nbuf_t nbuf_local; \
1385 	struct dp_peer *peer_local; \
1386 	struct dp_vdev *vdev_local = vdev_hdl; \
1387 	do { \
1388 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
1389 			break; \
1390 		nbuf_local = nbuf; \
1391 		peer_local = peer; \
1392 		if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \
1393 			break; \
1394 		else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \
1395 			break; \
1396 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
1397 						       (nbuf_local), \
1398 						       (peer_local), 0, 1); \
1399 	} while (0); \
1400 }
1401 #else
1402 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer)
1403 #endif
1404 
1405 /**
1406  * dp_rx_msdu_stats_update() - update per msdu stats.
1407  * @soc: core txrx main context
1408  * @nbuf: pointer to the first msdu of an amsdu.
1409  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1410  * @peer: pointer to the peer object.
1411  * @ring_id: reo dest ring number on which pkt is reaped.
1412  * @tid_stats: per tid rx stats.
1413  *
1414  * update all the per msdu stats for that nbuf.
1415  * Return: void
1416  */
1417 static void dp_rx_msdu_stats_update(struct dp_soc *soc,
1418 				    qdf_nbuf_t nbuf,
1419 				    uint8_t *rx_tlv_hdr,
1420 				    struct dp_peer *peer,
1421 				    uint8_t ring_id,
1422 				    struct cdp_tid_rx_stats *tid_stats)
1423 {
1424 	bool is_ampdu, is_not_amsdu;
1425 	uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
1426 	struct dp_vdev *vdev = peer->vdev;
1427 	qdf_ether_header_t *eh;
1428 	uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1429 
1430 	dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, peer);
1431 	is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
1432 			qdf_nbuf_is_rx_chfrag_end(nbuf);
1433 
1434 	DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len);
1435 	DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu);
1436 	DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu);
1437 	DP_STATS_INCC(peer, rx.rx_retries, 1, qdf_nbuf_is_rx_retry_flag(nbuf));
1438 
1439 	tid_stats->msdu_cnt++;
1440 	if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
1441 			 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
1442 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1443 		DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len);
1444 		tid_stats->mcast_msdu_cnt++;
1445 		if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
1446 			DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len);
1447 			tid_stats->bcast_msdu_cnt++;
1448 		}
1449 	}
1450 
1451 	/*
1452 	 * currently we can return from here as we have similar stats
1453 	 * updated at per ppdu level instead of msdu level
1454 	 */
1455 	if (!soc->process_rx_status)
1456 		return;
1457 
1458 	is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr);
1459 	DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu);
1460 	DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
1461 
1462 	sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr);
1463 	mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
1464 	tid = qdf_nbuf_get_tid_val(nbuf);
1465 	bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
1466 	reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
1467 							      rx_tlv_hdr);
1468 	nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
1469 	pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
1470 
1471 	DP_STATS_INC(peer, rx.bw[bw], 1);
1472 	/*
1473 	 * only if nss > 0 and pkt_type is 11N/AC/AX,
1474 	 * then increase index [nss - 1] in array counter.
1475 	 */
1476 	if (nss > 0 && (pkt_type == DOT11_N ||
1477 			pkt_type == DOT11_AC ||
1478 			pkt_type == DOT11_AX))
1479 		DP_STATS_INC(peer, rx.nss[nss - 1], 1);
1480 
1481 	DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
1482 	DP_STATS_INCC(peer, rx.err.mic_err, 1,
1483 		      hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr));
1484 	DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
1485 		      hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr));
1486 
1487 	DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
1488 	DP_STATS_INC(peer, rx.reception_type[reception_type], 1);
1489 
1490 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1491 		      ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1492 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1493 		      ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1494 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1495 		      ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1496 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1497 		      ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1498 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1499 		      ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1500 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1501 		      ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1502 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1503 		      ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1504 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1505 		      ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1506 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1507 		      ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX)));
1508 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1509 		      ((mcs < MAX_MCS) && (pkt_type == DOT11_AX)));
1510 
1511 	if ((soc->process_rx_status) &&
1512 	    hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) {
1513 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
1514 		if (!vdev->pdev)
1515 			return;
1516 
1517 		dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
1518 				     &peer->stats, peer->peer_ids[0],
1519 				     UPDATE_PEER_STATS,
1520 				     vdev->pdev->pdev_id);
1521 #endif
1522 
1523 	}
1524 }
1525 
1526 static inline bool is_sa_da_idx_valid(struct dp_soc *soc,
1527 				      uint8_t *rx_tlv_hdr,
1528 				      qdf_nbuf_t nbuf,
1529 				      struct hal_rx_msdu_metadata msdu_info)
1530 {
1531 	if ((qdf_nbuf_is_sa_valid(nbuf) &&
1532 	    (msdu_info.sa_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) ||
1533 	    (!qdf_nbuf_is_da_mcbc(nbuf) &&
1534 	     qdf_nbuf_is_da_valid(nbuf) &&
1535 	     (msdu_info.da_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))))
1536 		return false;
1537 
1538 	return true;
1539 }
1540 
1541 #ifndef WDS_VENDOR_EXTENSION
1542 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
1543 			   struct dp_vdev *vdev,
1544 			   struct dp_peer *peer)
1545 {
1546 	return 1;
1547 }
1548 #endif
1549 
1550 #ifdef RX_DESC_DEBUG_CHECK
1551 /**
1552  * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
1553  *				  corruption
1554  *
1555  * @ring_desc: REO ring descriptor
1556  * @rx_desc: Rx descriptor
1557  *
1558  * Return: NONE
1559  */
1560 static inline
1561 void dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc,
1562 				  struct dp_rx_desc *rx_desc)
1563 {
1564 	struct hal_buf_info hbi;
1565 
1566 	hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
1567 	/* Sanity check for possible buffer paddr corruption */
1568 	qdf_assert_always((&hbi)->paddr ==
1569 			  qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0));
1570 }
1571 #else
1572 static inline
1573 void dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc,
1574 				  struct dp_rx_desc *rx_desc)
1575 {
1576 }
1577 #endif
1578 
1579 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1580 static inline
1581 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
1582 {
1583 	bool limit_hit = false;
1584 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
1585 
1586 	limit_hit =
1587 		(num_reaped >= cfg->rx_reap_loop_pkt_limit) ? true : false;
1588 
1589 	if (limit_hit)
1590 		DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1)
1591 
1592 	return limit_hit;
1593 }
1594 
1595 static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1596 {
1597 	return soc->wlan_cfg_ctx->rx_enable_eol_data_check;
1598 }
1599 
1600 #else
1601 static inline
1602 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
1603 {
1604 	return false;
1605 }
1606 
1607 static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1608 {
1609 	return false;
1610 }
1611 
1612 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
1613 
1614 /**
1615  * dp_is_special_data() - check is the pkt special like eapol, dhcp, etc
1616  *
1617  * @nbuf: pkt skb pointer
1618  *
1619  * Return: true if matched, false if not
1620  */
1621 static inline
1622 bool dp_is_special_data(qdf_nbuf_t nbuf)
1623 {
1624 	if (qdf_nbuf_is_ipv4_arp_pkt(nbuf) ||
1625 	    qdf_nbuf_is_ipv4_dhcp_pkt(nbuf) ||
1626 	    qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
1627 	    qdf_nbuf_is_ipv6_dhcp_pkt(nbuf))
1628 		return true;
1629 	else
1630 		return false;
1631 }
1632 
1633 #ifdef DP_RX_PKT_NO_PEER_DELIVER
1634 /**
1635  * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
1636  *				      no corresbonding peer found
1637  * @soc: core txrx main context
1638  * @nbuf: pkt skb pointer
1639  *
1640  * This function will try to deliver some RX special frames to stack
1641  * even there is no peer matched found. for instance, LFR case, some
1642  * eapol data will be sent to host before peer_map done.
1643  *
1644  * Return: None
1645  */
1646 static inline
1647 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
1648 {
1649 	uint16_t peer_id;
1650 	uint8_t vdev_id;
1651 	struct dp_vdev *vdev;
1652 	uint32_t l2_hdr_offset = 0;
1653 	uint16_t msdu_len = 0;
1654 	uint32_t pkt_len = 0;
1655 	uint8_t *rx_tlv_hdr;
1656 
1657 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
1658 	if (peer_id > soc->max_peers)
1659 		goto deliver_fail;
1660 
1661 	vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
1662 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
1663 	if (!vdev || vdev->delete.pending || !vdev->osif_rx)
1664 		goto deliver_fail;
1665 
1666 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
1667 	l2_hdr_offset =
1668 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
1669 
1670 	msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1671 	pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
1672 
1673 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
1674 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1675 	} else {
1676 		qdf_nbuf_set_pktlen(nbuf, pkt_len);
1677 		qdf_nbuf_pull_head(nbuf,
1678 				   RX_PKT_TLVS_LEN +
1679 				   l2_hdr_offset);
1680 	}
1681 
1682 	/* only allow special frames */
1683 	if (!dp_is_special_data(nbuf))
1684 		goto deliver_fail;
1685 
1686 	vdev->osif_rx(vdev->osif_vdev, nbuf);
1687 	DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1);
1688 	return;
1689 
1690 deliver_fail:
1691 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1692 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
1693 	qdf_nbuf_free(nbuf);
1694 }
1695 #else
1696 static inline
1697 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
1698 {
1699 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1700 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
1701 	qdf_nbuf_free(nbuf);
1702 }
1703 #endif
1704 
1705 /**
1706  * dp_rx_srng_get_num_pending() - get number of pending entries
1707  * @hal_soc: hal soc opaque pointer
1708  * @hal_ring: opaque pointer to the HAL Rx Ring
1709  * @num_entries: number of entries in the hal_ring.
1710  * @near_full: pointer to a boolean. This is set if ring is near full.
1711  *
1712  * The function returns the number of entries in a destination ring which are
1713  * yet to be reaped. The function also checks if the ring is near full.
1714  * If more than half of the ring needs to be reaped, the ring is considered
1715  * approaching full.
1716  * The function useses hal_srng_dst_num_valid_locked to get the number of valid
1717  * entries. It should not be called within a SRNG lock. HW pointer value is
1718  * synced into cached_hp.
1719  *
1720  * Return: Number of pending entries if any
1721  */
1722 static
1723 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
1724 				    hal_ring_handle_t hal_ring_hdl,
1725 				    uint32_t num_entries,
1726 				    bool *near_full)
1727 {
1728 	uint32_t num_pending = 0;
1729 
1730 	num_pending = hal_srng_dst_num_valid_locked(hal_soc,
1731 						    hal_ring_hdl,
1732 						    true);
1733 
1734 	if (num_entries && (num_pending >= num_entries >> 1))
1735 		*near_full = true;
1736 	else
1737 		*near_full = false;
1738 
1739 	return num_pending;
1740 }
1741 
1742 /**
1743  * dp_rx_process() - Brain of the Rx processing functionality
1744  *		     Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
1745  * @int_ctx: per interrupt context
1746  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1747  * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
1748  * @quota: No. of units (packets) that can be serviced in one shot.
1749  *
1750  * This function implements the core of Rx functionality. This is
1751  * expected to handle only non-error frames.
1752  *
1753  * Return: uint32_t: No. of elements processed
1754  */
1755 uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
1756 			    uint8_t reo_ring_num, uint32_t quota)
1757 {
1758 	hal_ring_desc_t ring_desc;
1759 	hal_soc_handle_t hal_soc;
1760 	struct dp_rx_desc *rx_desc = NULL;
1761 	qdf_nbuf_t nbuf, next;
1762 	bool near_full;
1763 	union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT];
1764 	union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT];
1765 	uint32_t num_pending;
1766 	uint32_t rx_bufs_used = 0, rx_buf_cookie;
1767 	uint16_t msdu_len = 0;
1768 	uint16_t peer_id;
1769 	uint8_t vdev_id;
1770 	struct dp_peer *peer;
1771 	struct dp_vdev *vdev;
1772 	uint32_t pkt_len = 0;
1773 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
1774 	struct hal_rx_msdu_desc_info msdu_desc_info;
1775 	enum hal_reo_error_status error;
1776 	uint32_t peer_mdata;
1777 	uint8_t *rx_tlv_hdr;
1778 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT];
1779 	uint8_t mac_id = 0;
1780 	struct dp_pdev *rx_pdev;
1781 	struct dp_srng *dp_rxdma_srng;
1782 	struct rx_desc_pool *rx_desc_pool;
1783 	struct dp_soc *soc = int_ctx->soc;
1784 	uint8_t ring_id = 0;
1785 	uint8_t core_id = 0;
1786 	struct cdp_tid_rx_stats *tid_stats;
1787 	qdf_nbuf_t nbuf_head;
1788 	qdf_nbuf_t nbuf_tail;
1789 	qdf_nbuf_t deliver_list_head;
1790 	qdf_nbuf_t deliver_list_tail;
1791 	uint32_t num_rx_bufs_reaped = 0;
1792 	uint32_t intr_id;
1793 	struct hif_opaque_softc *scn;
1794 	int32_t tid = 0;
1795 	bool is_prev_msdu_last = true;
1796 	uint32_t num_entries_avail = 0;
1797 	uint32_t rx_ol_pkt_cnt = 0;
1798 	uint32_t num_entries = 0;
1799 	struct hal_rx_msdu_metadata msdu_metadata;
1800 
1801 	DP_HIST_INIT();
1802 
1803 	qdf_assert_always(soc && hal_ring_hdl);
1804 	hal_soc = soc->hal_soc;
1805 	qdf_assert_always(hal_soc);
1806 
1807 	scn = soc->hif_handle;
1808 	hif_pm_runtime_mark_dp_rx_busy(scn);
1809 	intr_id = int_ctx->dp_intr_id;
1810 	num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl);
1811 
1812 more_data:
1813 	/* reset local variables here to be re-used in the function */
1814 	nbuf_head = NULL;
1815 	nbuf_tail = NULL;
1816 	deliver_list_head = NULL;
1817 	deliver_list_tail = NULL;
1818 	peer = NULL;
1819 	vdev = NULL;
1820 	num_rx_bufs_reaped = 0;
1821 
1822 	qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped));
1823 	qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
1824 	qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
1825 	qdf_mem_zero(head, sizeof(head));
1826 	qdf_mem_zero(tail, sizeof(tail));
1827 
1828 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
1829 
1830 		/*
1831 		 * Need API to convert from hal_ring pointer to
1832 		 * Ring Type / Ring Id combo
1833 		 */
1834 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
1835 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1836 			FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
1837 		goto done;
1838 	}
1839 
1840 	/*
1841 	 * start reaping the buffers from reo ring and queue
1842 	 * them in per vdev queue.
1843 	 * Process the received pkts in a different per vdev loop.
1844 	 */
1845 	while (qdf_likely(quota &&
1846 			  (ring_desc = hal_srng_dst_peek(hal_soc,
1847 							 hal_ring_hdl)))) {
1848 
1849 		error = HAL_RX_ERROR_STATUS_GET(ring_desc);
1850 		ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1851 
1852 		if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
1853 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1854 			FL("HAL RING 0x%pK:error %d"), hal_ring_hdl, error);
1855 			DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1);
1856 			/* Don't know how to deal with this -- assert */
1857 			qdf_assert(0);
1858 		}
1859 
1860 		rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
1861 
1862 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
1863 		qdf_assert(rx_desc);
1864 
1865 		/*
1866 		 * this is a unlikely scenario where the host is reaping
1867 		 * a descriptor which it already reaped just a while ago
1868 		 * but is yet to replenish it back to HW.
1869 		 * In this case host will dump the last 128 descriptors
1870 		 * including the software descriptor rx_desc and assert.
1871 		 */
1872 
1873 		if (qdf_unlikely(!rx_desc->in_use)) {
1874 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
1875 			dp_info_rl("Reaping rx_desc not in use!");
1876 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1877 						   ring_desc, rx_desc);
1878 			/* ignore duplicate RX desc and continue to process */
1879 			/* Pop out the descriptor */
1880 			hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
1881 			continue;
1882 		}
1883 
1884 		if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) {
1885 			dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie);
1886 			DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1);
1887 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1888 						   ring_desc, rx_desc);
1889 		}
1890 
1891 		dp_rx_desc_nbuf_sanity_check(ring_desc, rx_desc);
1892 
1893 		/* TODO */
1894 		/*
1895 		 * Need a separate API for unmapping based on
1896 		 * phyiscal address
1897 		 */
1898 		qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
1899 					QDF_DMA_FROM_DEVICE);
1900 		rx_desc->unmapped = 1;
1901 
1902 		core_id = smp_processor_id();
1903 		DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1);
1904 
1905 		/* Get MPDU DESC info */
1906 		hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
1907 
1908 		/* Get MSDU DESC info */
1909 		hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info);
1910 
1911 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
1912 			qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1);
1913 
1914 		if (qdf_unlikely(msdu_desc_info.msdu_flags &
1915 				 HAL_MSDU_F_MSDU_CONTINUATION)) {
1916 			/* previous msdu has end bit set, so current one is
1917 			 * the new MPDU
1918 			 */
1919 			if (is_prev_msdu_last) {
1920 				/* Get number of entries available in HW ring */
1921 				num_entries_avail =
1922 				hal_srng_dst_num_valid(hal_soc,
1923 						       hal_ring_hdl, 1);
1924 
1925 				/* For new MPDU check if we can read complete
1926 				 * MPDU by comparing the number of buffers
1927 				 * available and number of buffers needed to
1928 				 * reap this MPDU
1929 				 */
1930 				if (((msdu_desc_info.msdu_len /
1931 				     (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN) +
1932 				     1)) > num_entries_avail) {
1933 					DP_STATS_INC(
1934 						soc,
1935 						rx.msdu_scatter_wait_break,
1936 						1);
1937 					break;
1938 				}
1939 				is_prev_msdu_last = false;
1940 			}
1941 
1942 		}
1943 
1944 		if (qdf_unlikely(mpdu_desc_info.mpdu_flags &
1945 				 HAL_MPDU_F_RAW_AMPDU))
1946 			qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1);
1947 
1948 		if (!is_prev_msdu_last &&
1949 		    msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
1950 			is_prev_msdu_last = true;
1951 
1952 		/* Pop out the descriptor*/
1953 		hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
1954 
1955 		rx_bufs_reaped[rx_desc->pool_id]++;
1956 		peer_mdata = mpdu_desc_info.peer_meta_data;
1957 		QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) =
1958 			DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
1959 		QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) =
1960 			DP_PEER_METADATA_VDEV_ID_GET(peer_mdata);
1961 
1962 		/*
1963 		 * save msdu flags first, last and continuation msdu in
1964 		 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
1965 		 * length to nbuf->cb. This ensures the info required for
1966 		 * per pkt processing is always in the same cache line.
1967 		 * This helps in improving throughput for smaller pkt
1968 		 * sizes.
1969 		 */
1970 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
1971 			qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
1972 
1973 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
1974 			qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
1975 
1976 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
1977 			qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
1978 
1979 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
1980 			qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1);
1981 
1982 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
1983 			qdf_nbuf_set_da_valid(rx_desc->nbuf, 1);
1984 
1985 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
1986 			qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1);
1987 
1988 		qdf_nbuf_set_tid_val(rx_desc->nbuf,
1989 				     HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc));
1990 
1991 		QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len;
1992 
1993 		QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num;
1994 
1995 		DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
1996 
1997 		/*
1998 		 * if continuation bit is set then we have MSDU spread
1999 		 * across multiple buffers, let us not decrement quota
2000 		 * till we reap all buffers of that MSDU.
2001 		 */
2002 		if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)))
2003 			quota -= 1;
2004 
2005 		dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
2006 						&tail[rx_desc->pool_id],
2007 						rx_desc);
2008 
2009 		num_rx_bufs_reaped++;
2010 		if (dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped))
2011 			break;
2012 	}
2013 done:
2014 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
2015 
2016 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2017 		/*
2018 		 * continue with next mac_id if no pkts were reaped
2019 		 * from that pool
2020 		 */
2021 		if (!rx_bufs_reaped[mac_id])
2022 			continue;
2023 
2024 		dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2025 
2026 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
2027 
2028 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2029 					rx_desc_pool, rx_bufs_reaped[mac_id],
2030 					&head[mac_id], &tail[mac_id]);
2031 	}
2032 
2033 	dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]);
2034 	/* Peer can be NULL is case of LFR */
2035 	if (qdf_likely(peer))
2036 		vdev = NULL;
2037 
2038 	/*
2039 	 * BIG loop where each nbuf is dequeued from global queue,
2040 	 * processed and queued back on a per vdev basis. These nbufs
2041 	 * are sent to stack as and when we run out of nbufs
2042 	 * or a new nbuf dequeued from global queue has a different
2043 	 * vdev when compared to previous nbuf.
2044 	 */
2045 	nbuf = nbuf_head;
2046 	while (nbuf) {
2047 		next = nbuf->next;
2048 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
2049 		vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
2050 
2051 		if (deliver_list_head && vdev && (vdev->vdev_id != vdev_id)) {
2052 			dp_rx_deliver_to_stack(soc, vdev, peer,
2053 					       deliver_list_head,
2054 					       deliver_list_tail);
2055 			deliver_list_head = NULL;
2056 			deliver_list_tail = NULL;
2057 		}
2058 
2059 		/* Get TID from struct cb->tid_val, save to tid */
2060 		if (qdf_nbuf_is_rx_chfrag_start(nbuf))
2061 			tid = qdf_nbuf_get_tid_val(nbuf);
2062 
2063 		peer_id =  QDF_NBUF_CB_RX_PEER_ID(nbuf);
2064 		peer = dp_peer_find_by_id(soc, peer_id);
2065 
2066 		if (peer) {
2067 			QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
2068 			qdf_dp_trace_set_track(nbuf, QDF_RX);
2069 			QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
2070 			QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
2071 				QDF_NBUF_RX_PKT_DATA_TRACK;
2072 		}
2073 
2074 		rx_bufs_used++;
2075 
2076 		if (qdf_likely(peer)) {
2077 			vdev = peer->vdev;
2078 		} else {
2079 			nbuf->next = NULL;
2080 			dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2081 			nbuf = next;
2082 			continue;
2083 		}
2084 
2085 		if (qdf_unlikely(!vdev)) {
2086 			qdf_nbuf_free(nbuf);
2087 			nbuf = next;
2088 			DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
2089 			dp_peer_unref_del_find_by_id(peer);
2090 			continue;
2091 		}
2092 
2093 		rx_pdev = vdev->pdev;
2094 		DP_RX_TID_SAVE(nbuf, tid);
2095 		if (qdf_unlikely(rx_pdev->delay_stats_flag))
2096 			qdf_nbuf_set_timestamp(nbuf);
2097 
2098 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
2099 		tid_stats =
2100 			&rx_pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
2101 
2102 		/*
2103 		 * Check if DMA completed -- msdu_done is the last bit
2104 		 * to be written
2105 		 */
2106 		if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) &&
2107 				 !hal_rx_attn_msdu_done_get(rx_tlv_hdr))) {
2108 			dp_err("MSDU DONE failure");
2109 			DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
2110 			hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
2111 					     QDF_TRACE_LEVEL_INFO);
2112 			tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
2113 			qdf_nbuf_free(nbuf);
2114 			qdf_assert(0);
2115 			nbuf = next;
2116 			continue;
2117 		}
2118 
2119 		DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
2120 		/*
2121 		 * First IF condition:
2122 		 * 802.11 Fragmented pkts are reinjected to REO
2123 		 * HW block as SG pkts and for these pkts we only
2124 		 * need to pull the RX TLVS header length.
2125 		 * Second IF condition:
2126 		 * The below condition happens when an MSDU is spread
2127 		 * across multiple buffers. This can happen in two cases
2128 		 * 1. The nbuf size is smaller then the received msdu.
2129 		 *    ex: we have set the nbuf size to 2048 during
2130 		 *        nbuf_alloc. but we received an msdu which is
2131 		 *        2304 bytes in size then this msdu is spread
2132 		 *        across 2 nbufs.
2133 		 *
2134 		 * 2. AMSDUs when RAW mode is enabled.
2135 		 *    ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
2136 		 *        across 1st nbuf and 2nd nbuf and last MSDU is
2137 		 *        spread across 2nd nbuf and 3rd nbuf.
2138 		 *
2139 		 * for these scenarios let us create a skb frag_list and
2140 		 * append these buffers till the last MSDU of the AMSDU
2141 		 * Third condition:
2142 		 * This is the most likely case, we receive 802.3 pkts
2143 		 * decapsulated by HW, here we need to set the pkt length.
2144 		 */
2145 		hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata);
2146 		if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
2147 			bool is_mcbc, is_sa_vld, is_da_vld;
2148 
2149 			is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
2150 								 rx_tlv_hdr);
2151 			is_sa_vld =
2152 				hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
2153 								rx_tlv_hdr);
2154 			is_da_vld =
2155 				hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
2156 								rx_tlv_hdr);
2157 
2158 			qdf_nbuf_set_da_mcbc(nbuf, is_mcbc);
2159 			qdf_nbuf_set_da_valid(nbuf, is_da_vld);
2160 			qdf_nbuf_set_sa_valid(nbuf, is_sa_vld);
2161 
2162 			qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
2163 		} else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
2164 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
2165 			nbuf = dp_rx_sg_create(nbuf);
2166 			next = nbuf->next;
2167 
2168 			if (qdf_nbuf_is_raw_frame(nbuf)) {
2169 				DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
2170 				DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len);
2171 			} else {
2172 				qdf_nbuf_free(nbuf);
2173 				DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
2174 				dp_info_rl("scatter msdu len %d, dropped",
2175 					   msdu_len);
2176 				nbuf = next;
2177 				dp_peer_unref_del_find_by_id(peer);
2178 				continue;
2179 			}
2180 		} else {
2181 
2182 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
2183 			pkt_len = msdu_len +
2184 				  msdu_metadata.l3_hdr_pad +
2185 				  RX_PKT_TLVS_LEN;
2186 
2187 			qdf_nbuf_set_pktlen(nbuf, pkt_len);
2188 			qdf_nbuf_pull_head(nbuf,
2189 					   RX_PKT_TLVS_LEN +
2190 					   msdu_metadata.l3_hdr_pad);
2191 		}
2192 
2193 		/*
2194 		 * process frame for mulitpass phrase processing
2195 		 */
2196 		if (qdf_unlikely(vdev->multipass_en)) {
2197 			if (dp_rx_multipass_process(peer, nbuf, tid) == false) {
2198 				DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1);
2199 				qdf_nbuf_free(nbuf);
2200 				nbuf = next;
2201 				dp_peer_unref_del_find_by_id(peer);
2202 				continue;
2203 			}
2204 		}
2205 
2206 		if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
2207 			QDF_TRACE(QDF_MODULE_ID_DP,
2208 					QDF_TRACE_LEVEL_ERROR,
2209 					FL("Policy Check Drop pkt"));
2210 			tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
2211 			/* Drop & free packet */
2212 			qdf_nbuf_free(nbuf);
2213 			/* Statistics */
2214 			nbuf = next;
2215 			dp_peer_unref_del_find_by_id(peer);
2216 			continue;
2217 		}
2218 
2219 		if (qdf_unlikely(peer && (peer->nawds_enabled) &&
2220 				 (qdf_nbuf_is_da_mcbc(nbuf)) &&
2221 				 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc,
2222 								rx_tlv_hdr) ==
2223 				  false))) {
2224 			tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
2225 			DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
2226 			qdf_nbuf_free(nbuf);
2227 			nbuf = next;
2228 			dp_peer_unref_del_find_by_id(peer);
2229 			continue;
2230 		}
2231 
2232 		if (soc->process_rx_status)
2233 			dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
2234 
2235 		/* Update the protocol tag in SKB based on CCE metadata */
2236 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
2237 					  reo_ring_num, false, true);
2238 
2239 		/* Update the flow tag in SKB based on FSE metadata */
2240 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
2241 
2242 		dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer,
2243 					ring_id, tid_stats);
2244 
2245 		if (qdf_unlikely(vdev->mesh_vdev)) {
2246 			if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
2247 					== QDF_STATUS_SUCCESS) {
2248 				QDF_TRACE(QDF_MODULE_ID_DP,
2249 						QDF_TRACE_LEVEL_INFO_MED,
2250 						FL("mesh pkt filtered"));
2251 				tid_stats->fail_cnt[MESH_FILTER_DROP]++;
2252 				DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
2253 					     1);
2254 
2255 				qdf_nbuf_free(nbuf);
2256 				nbuf = next;
2257 				dp_peer_unref_del_find_by_id(peer);
2258 				continue;
2259 			}
2260 			dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
2261 		}
2262 
2263 		if (qdf_likely(vdev->rx_decap_type ==
2264 			       htt_cmn_pkt_type_ethernet) &&
2265 		    qdf_likely(!vdev->mesh_vdev)) {
2266 			/* WDS Destination Address Learning */
2267 			dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf);
2268 
2269 			/* Due to HW issue, sometimes we see that the sa_idx
2270 			 * and da_idx are invalid with sa_valid and da_valid
2271 			 * bits set
2272 			 *
2273 			 * in this case we also see that value of
2274 			 * sa_sw_peer_id is set as 0
2275 			 *
2276 			 * Drop the packet if sa_idx and da_idx OOB or
2277 			 * sa_sw_peerid is 0
2278 			 */
2279 			if (!is_sa_da_idx_valid(soc, rx_tlv_hdr, nbuf,
2280 						msdu_metadata)) {
2281 				qdf_nbuf_free(nbuf);
2282 				nbuf = next;
2283 				DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
2284 				dp_peer_unref_del_find_by_id(peer);
2285 				continue;
2286 			}
2287 			/* WDS Source Port Learning */
2288 			if (qdf_likely(vdev->wds_enabled))
2289 				dp_rx_wds_srcport_learn(soc,
2290 							rx_tlv_hdr,
2291 							peer,
2292 							nbuf,
2293 							msdu_metadata);
2294 
2295 			/* Intrabss-fwd */
2296 			if (dp_rx_check_ap_bridge(vdev))
2297 				if (dp_rx_intrabss_fwd(soc,
2298 							peer,
2299 							rx_tlv_hdr,
2300 							nbuf,
2301 							msdu_metadata)) {
2302 					nbuf = next;
2303 					dp_peer_unref_del_find_by_id(peer);
2304 					tid_stats->intrabss_cnt++;
2305 					continue; /* Get next desc */
2306 				}
2307 		}
2308 
2309 		dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt);
2310 
2311 		DP_RX_LIST_APPEND(deliver_list_head,
2312 				  deliver_list_tail,
2313 				  nbuf);
2314 		DP_STATS_INC_PKT(peer, rx.to_stack, 1,
2315 				 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
2316 
2317 		tid_stats->delivered_to_stack++;
2318 		nbuf = next;
2319 		dp_peer_unref_del_find_by_id(peer);
2320 	}
2321 
2322 	if (qdf_likely(deliver_list_head)) {
2323 		if (qdf_likely(peer))
2324 			dp_rx_deliver_to_stack(soc, vdev, peer,
2325 					       deliver_list_head,
2326 					       deliver_list_tail);
2327 		else {
2328 			nbuf = deliver_list_head;
2329 			while (nbuf) {
2330 				next = nbuf->next;
2331 				nbuf->next = NULL;
2332 				dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2333 				nbuf = next;
2334 			}
2335 		}
2336 	}
2337 
2338 	if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
2339 		if (quota) {
2340 			num_pending =
2341 				dp_rx_srng_get_num_pending(hal_soc,
2342 							   hal_ring_hdl,
2343 							   num_entries,
2344 							   &near_full);
2345 			if (num_pending) {
2346 				DP_STATS_INC(soc, rx.hp_oos2, 1);
2347 
2348 				if (!hif_exec_should_yield(scn, intr_id))
2349 					goto more_data;
2350 
2351 				if (qdf_unlikely(near_full)) {
2352 					DP_STATS_INC(soc, rx.near_full, 1);
2353 					goto more_data;
2354 				}
2355 			}
2356 		}
2357 
2358 		if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) {
2359 			vdev->osif_gro_flush(vdev->osif_vdev,
2360 					     reo_ring_num);
2361 		}
2362 	}
2363 
2364 	/* Update histogram statistics by looping through pdev's */
2365 	DP_RX_HIST_STATS_PER_PDEV();
2366 
2367 	return rx_bufs_used; /* Assume no scale factor for now */
2368 }
2369 
2370 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
2371 {
2372 	QDF_STATUS ret;
2373 
2374 	if (vdev->osif_rx_flush) {
2375 		ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id);
2376 		if (!ret) {
2377 			dp_err("Failed to flush rx pkts for vdev %d\n",
2378 			       vdev->vdev_id);
2379 			return ret;
2380 		}
2381 	}
2382 
2383 	return QDF_STATUS_SUCCESS;
2384 }
2385 
2386 /**
2387  * dp_rx_pdev_detach() - detach dp rx
2388  * @pdev: core txrx pdev context
2389  *
2390  * This function will detach DP RX into main device context
2391  * will free DP Rx resources.
2392  *
2393  * Return: void
2394  */
2395 void
2396 dp_rx_pdev_detach(struct dp_pdev *pdev)
2397 {
2398 	uint8_t mac_for_pdev = pdev->lmac_id;
2399 	struct dp_soc *soc = pdev->soc;
2400 	struct rx_desc_pool *rx_desc_pool;
2401 
2402 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2403 
2404 	if (rx_desc_pool->pool_size != 0) {
2405 		if (!dp_is_soc_reinit(soc))
2406 			dp_rx_desc_nbuf_and_pool_free(soc, mac_for_pdev,
2407 						      rx_desc_pool);
2408 		else
2409 			dp_rx_desc_nbuf_free(soc, rx_desc_pool);
2410 	}
2411 
2412 	return;
2413 }
2414 
2415 static QDF_STATUS
2416 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, qdf_nbuf_t *nbuf,
2417 			   struct dp_pdev *dp_pdev,
2418 			   struct rx_desc_pool *rx_desc_pool)
2419 {
2420 	qdf_dma_addr_t paddr;
2421 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
2422 
2423 	*nbuf = qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size,
2424 			       RX_BUFFER_RESERVATION,
2425 			       rx_desc_pool->buf_alignment, FALSE);
2426 	if (!(*nbuf)) {
2427 		dp_err("nbuf alloc failed");
2428 		DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
2429 		return ret;
2430 	}
2431 
2432 	ret = qdf_nbuf_map_single(dp_soc->osdev, *nbuf,
2433 				  QDF_DMA_FROM_DEVICE);
2434 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
2435 		qdf_nbuf_free(*nbuf);
2436 		dp_err("nbuf map failed");
2437 		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
2438 		return ret;
2439 	}
2440 
2441 	paddr = qdf_nbuf_get_frag_paddr(*nbuf, 0);
2442 
2443 	ret = check_x86_paddr(dp_soc, nbuf, &paddr, rx_desc_pool);
2444 	if (ret == QDF_STATUS_E_FAILURE) {
2445 		qdf_nbuf_unmap_single(dp_soc->osdev, *nbuf,
2446 				      QDF_DMA_FROM_DEVICE);
2447 		qdf_nbuf_free(*nbuf);
2448 		dp_err("nbuf check x86 failed");
2449 		DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
2450 		return ret;
2451 	}
2452 
2453 	return QDF_STATUS_SUCCESS;
2454 }
2455 
2456 QDF_STATUS
2457 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
2458 			  struct dp_srng *dp_rxdma_srng,
2459 			  struct rx_desc_pool *rx_desc_pool,
2460 			  uint32_t num_req_buffers)
2461 {
2462 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
2463 	hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng;
2464 	union dp_rx_desc_list_elem_t *next;
2465 	void *rxdma_ring_entry;
2466 	qdf_dma_addr_t paddr;
2467 	qdf_nbuf_t *rx_nbuf_arr;
2468 	uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0;
2469 	uint32_t buffer_index, nbuf_ptrs_per_page;
2470 	qdf_nbuf_t nbuf;
2471 	QDF_STATUS ret;
2472 	int page_idx, total_pages;
2473 	union dp_rx_desc_list_elem_t *desc_list = NULL;
2474 	union dp_rx_desc_list_elem_t *tail = NULL;
2475 
2476 	if (qdf_unlikely(!rxdma_srng)) {
2477 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
2478 		return QDF_STATUS_E_FAILURE;
2479 	}
2480 
2481 	dp_debug("requested %u RX buffers for driver attach", num_req_buffers);
2482 
2483 	nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool,
2484 					    num_req_buffers, &desc_list, &tail);
2485 	if (!nr_descs) {
2486 		dp_err("no free rx_descs in freelist");
2487 		DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
2488 		return QDF_STATUS_E_NOMEM;
2489 	}
2490 
2491 	dp_debug("got %u RX descs for driver attach", nr_descs);
2492 
2493 	/*
2494 	 * Try to allocate pointers to the nbuf one page at a time.
2495 	 * Take pointers that can fit in one page of memory and
2496 	 * iterate through the total descriptors that need to be
2497 	 * allocated in order of pages. Reuse the pointers that
2498 	 * have been allocated to fit in one page across each
2499 	 * iteration to index into the nbuf.
2500 	 */
2501 	total_pages = (nr_descs * sizeof(*rx_nbuf_arr)) / PAGE_SIZE;
2502 
2503 	/*
2504 	 * Add an extra page to store the remainder if any
2505 	 */
2506 	if ((nr_descs * sizeof(*rx_nbuf_arr)) % PAGE_SIZE)
2507 		total_pages++;
2508 	rx_nbuf_arr = qdf_mem_malloc(PAGE_SIZE);
2509 	if (!rx_nbuf_arr) {
2510 		dp_err("failed to allocate nbuf array");
2511 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
2512 		QDF_BUG(0);
2513 		return QDF_STATUS_E_NOMEM;
2514 	}
2515 	nbuf_ptrs_per_page = PAGE_SIZE / sizeof(*rx_nbuf_arr);
2516 
2517 	for (page_idx = 0; page_idx < total_pages; page_idx++) {
2518 		qdf_mem_zero(rx_nbuf_arr, PAGE_SIZE);
2519 
2520 		for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) {
2521 			/*
2522 			 * The last page of buffer pointers may not be required
2523 			 * completely based on the number of descriptors. Below
2524 			 * check will ensure we are allocating only the
2525 			 * required number of descriptors.
2526 			 */
2527 			if (nr_nbuf_total >= nr_descs)
2528 				break;
2529 			ret = dp_pdev_nbuf_alloc_and_map(dp_soc,
2530 							 &rx_nbuf_arr[nr_nbuf],
2531 							 dp_pdev, rx_desc_pool);
2532 			if (QDF_IS_STATUS_ERROR(ret))
2533 				break;
2534 
2535 			nr_nbuf_total++;
2536 		}
2537 
2538 		hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
2539 
2540 		for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) {
2541 			rxdma_ring_entry =
2542 				hal_srng_src_get_next(dp_soc->hal_soc,
2543 						      rxdma_srng);
2544 			qdf_assert_always(rxdma_ring_entry);
2545 
2546 			next = desc_list->next;
2547 			nbuf = rx_nbuf_arr[buffer_index];
2548 			paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
2549 
2550 			dp_rx_desc_prep(&desc_list->rx_desc, nbuf);
2551 			desc_list->rx_desc.in_use = 1;
2552 
2553 			hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
2554 						     desc_list->rx_desc.cookie,
2555 						     rx_desc_pool->owner);
2556 
2557 			dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, nbuf, true);
2558 
2559 			desc_list = next;
2560 		}
2561 
2562 		hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
2563 	}
2564 
2565 	dp_info("filled %u RX buffers for driver attach", nr_nbuf_total);
2566 	qdf_mem_free(rx_nbuf_arr);
2567 
2568 	if (!nr_nbuf_total) {
2569 		dp_err("No nbuf's allocated");
2570 		QDF_BUG(0);
2571 		return QDF_STATUS_E_RESOURCES;
2572 	}
2573 
2574 	/* No need to count the number of bytes received during replenish.
2575 	 * Therefore set replenish.pkts.bytes as 0.
2576 	 */
2577 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0);
2578 
2579 	return QDF_STATUS_SUCCESS;
2580 }
2581 
2582 /**
2583  * dp_rx_attach() - attach DP RX
2584  * @pdev: core txrx pdev context
2585  *
2586  * This function will attach a DP RX instance into the main
2587  * device (SOC) context. Will allocate dp rx resource and
2588  * initialize resources.
2589  *
2590  * Return: QDF_STATUS_SUCCESS: success
2591  *         QDF_STATUS_E_RESOURCES: Error return
2592  */
2593 QDF_STATUS
2594 dp_rx_pdev_attach(struct dp_pdev *pdev)
2595 {
2596 	uint8_t pdev_id = pdev->pdev_id;
2597 	struct dp_soc *soc = pdev->soc;
2598 	uint32_t rxdma_entries;
2599 	uint32_t rx_sw_desc_weight;
2600 	struct dp_srng *dp_rxdma_srng;
2601 	struct rx_desc_pool *rx_desc_pool;
2602 	QDF_STATUS ret_val;
2603 	int mac_for_pdev;
2604 
2605 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
2606 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2607 			  "nss-wifi<4> skip Rx refil %d", pdev_id);
2608 		return QDF_STATUS_SUCCESS;
2609 	}
2610 
2611 	pdev = soc->pdev_list[pdev_id];
2612 	mac_for_pdev = pdev->lmac_id;
2613 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
2614 
2615 	rxdma_entries = dp_rxdma_srng->num_entries;
2616 
2617 	soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
2618 
2619 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2620 	rx_sw_desc_weight = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx);
2621 
2622 	dp_rx_desc_pool_alloc(soc, mac_for_pdev,
2623 			      rx_sw_desc_weight * rxdma_entries,
2624 			      rx_desc_pool);
2625 
2626 	rx_desc_pool->owner = DP_WBM2SW_RBM;
2627 	rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
2628 	rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
2629 
2630 	/* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */
2631 
2632 	ret_val = dp_rx_fst_attach(soc, pdev);
2633 	if ((ret_val != QDF_STATUS_SUCCESS) &&
2634 	    (ret_val != QDF_STATUS_E_NOSUPPORT)) {
2635 		QDF_TRACE(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_ERROR,
2636 			  "RX Flow Search Table attach failed: pdev %d err %d",
2637 			  pdev_id, ret_val);
2638 		return ret_val;
2639 	}
2640 
2641 	return dp_pdev_rx_buffers_attach(soc, mac_for_pdev, dp_rxdma_srng,
2642 					 rx_desc_pool, rxdma_entries - 1);
2643 }
2644 
2645 /*
2646  * dp_rx_nbuf_prepare() - prepare RX nbuf
2647  * @soc: core txrx main context
2648  * @pdev: core txrx pdev context
2649  *
2650  * This function alloc & map nbuf for RX dma usage, retry it if failed
2651  * until retry times reaches max threshold or succeeded.
2652  *
2653  * Return: qdf_nbuf_t pointer if succeeded, NULL if failed.
2654  */
2655 qdf_nbuf_t
2656 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev)
2657 {
2658 	uint8_t *buf;
2659 	int32_t nbuf_retry_count;
2660 	QDF_STATUS ret;
2661 	qdf_nbuf_t nbuf = NULL;
2662 
2663 	for (nbuf_retry_count = 0; nbuf_retry_count <
2664 		QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD;
2665 			nbuf_retry_count++) {
2666 		/* Allocate a new skb */
2667 		nbuf = qdf_nbuf_alloc(soc->osdev,
2668 					RX_DATA_BUFFER_SIZE,
2669 					RX_BUFFER_RESERVATION,
2670 					RX_DATA_BUFFER_ALIGNMENT,
2671 					FALSE);
2672 
2673 		if (!nbuf) {
2674 			DP_STATS_INC(pdev,
2675 				replenish.nbuf_alloc_fail, 1);
2676 			continue;
2677 		}
2678 
2679 		buf = qdf_nbuf_data(nbuf);
2680 
2681 		memset(buf, 0, RX_DATA_BUFFER_SIZE);
2682 
2683 		ret = qdf_nbuf_map_single(soc->osdev, nbuf,
2684 				    QDF_DMA_FROM_DEVICE);
2685 
2686 		/* nbuf map failed */
2687 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
2688 			qdf_nbuf_free(nbuf);
2689 			DP_STATS_INC(pdev, replenish.map_err, 1);
2690 			continue;
2691 		}
2692 		/* qdf_nbuf alloc and map succeeded */
2693 		break;
2694 	}
2695 
2696 	/* qdf_nbuf still alloc or map failed */
2697 	if (qdf_unlikely(nbuf_retry_count >=
2698 			QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD))
2699 		return NULL;
2700 
2701 	return nbuf;
2702 }
2703