xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.c (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_rx.h"
22 #include "dp_peer.h"
23 #include "hal_rx.h"
24 #include "hal_api.h"
25 #include "qdf_nbuf.h"
26 #ifdef MESH_MODE_SUPPORT
27 #include "if_meta_hdr.h"
28 #endif
29 #include "dp_internal.h"
30 #include "dp_rx_mon.h"
31 #include "dp_ipa.h"
32 #ifdef FEATURE_WDS
33 #include "dp_txrx_wds.h"
34 #endif
35 
36 #ifdef ATH_RX_PRI_SAVE
37 #define DP_RX_TID_SAVE(_nbuf, _tid) \
38 	(qdf_nbuf_set_priority(_nbuf, _tid))
39 #else
40 #define DP_RX_TID_SAVE(_nbuf, _tid)
41 #endif
42 
43 #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING
44 static inline
45 bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
46 {
47 	if (ta_peer->vdev->opmode == wlan_op_mode_ndi &&
48 	    qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) {
49 		DP_STATS_INC(ta_peer, rx.intra_bss.mdns_no_fwd, 1);
50 		return false;
51 	}
52 		return true;
53 }
54 #else
55 static inline
56 bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
57 {
58 	return true;
59 }
60 #endif
61 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
62 {
63 	return vdev->ap_bridge_enabled;
64 }
65 
66 #ifdef DUP_RX_DESC_WAR
67 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
68 				hal_ring_handle_t hal_ring,
69 				hal_ring_desc_t ring_desc,
70 				struct dp_rx_desc *rx_desc)
71 {
72 	void *hal_soc = soc->hal_soc;
73 
74 	hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc);
75 	dp_rx_desc_dump(rx_desc);
76 }
77 #else
78 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
79 				hal_ring_handle_t hal_ring_hdl,
80 				hal_ring_desc_t ring_desc,
81 				struct dp_rx_desc *rx_desc)
82 {
83 	hal_soc_handle_t hal_soc = soc->hal_soc;
84 
85 	dp_rx_desc_dump(rx_desc);
86 	hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc);
87 	hal_srng_dump_ring(hal_soc, hal_ring_hdl);
88 	qdf_assert_always(0);
89 }
90 #endif
91 
92 /*
93  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
94  *			       called during dp rx initialization
95  *			       and at the end of dp_rx_process.
96  *
97  * @soc: core txrx main context
98  * @mac_id: mac_id which is one of 3 mac_ids
99  * @dp_rxdma_srng: dp rxdma circular ring
100  * @rx_desc_pool: Pointer to free Rx descriptor pool
101  * @num_req_buffers: number of buffer to be replenished
102  * @desc_list: list of descs if called from dp_rx_process
103  *	       or NULL during dp rx initialization or out of buffer
104  *	       interrupt.
105  * @tail: tail of descs list
106  * Return: return success or failure
107  */
108 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
109 				struct dp_srng *dp_rxdma_srng,
110 				struct rx_desc_pool *rx_desc_pool,
111 				uint32_t num_req_buffers,
112 				union dp_rx_desc_list_elem_t **desc_list,
113 				union dp_rx_desc_list_elem_t **tail)
114 {
115 	uint32_t num_alloc_desc;
116 	uint16_t num_desc_to_free = 0;
117 	struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id);
118 	uint32_t num_entries_avail;
119 	uint32_t count;
120 	int sync_hw_ptr = 1;
121 	qdf_dma_addr_t paddr;
122 	qdf_nbuf_t rx_netbuf;
123 	void *rxdma_ring_entry;
124 	union dp_rx_desc_list_elem_t *next;
125 	QDF_STATUS ret;
126 
127 	void *rxdma_srng;
128 
129 	rxdma_srng = dp_rxdma_srng->hal_srng;
130 
131 	if (!rxdma_srng) {
132 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
133 				  "rxdma srng not initialized");
134 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
135 		return QDF_STATUS_E_FAILURE;
136 	}
137 
138 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
139 		"requested %d buffers for replenish", num_req_buffers);
140 
141 	hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
142 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
143 						   rxdma_srng,
144 						   sync_hw_ptr);
145 
146 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
147 		"no of available entries in rxdma ring: %d",
148 		num_entries_avail);
149 
150 	if (!(*desc_list) && (num_entries_avail >
151 		((dp_rxdma_srng->num_entries * 3) / 4))) {
152 		num_req_buffers = num_entries_avail;
153 	} else if (num_entries_avail < num_req_buffers) {
154 		num_desc_to_free = num_req_buffers - num_entries_avail;
155 		num_req_buffers = num_entries_avail;
156 	}
157 
158 	if (qdf_unlikely(!num_req_buffers)) {
159 		num_desc_to_free = num_req_buffers;
160 		hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
161 		goto free_descs;
162 	}
163 
164 	/*
165 	 * if desc_list is NULL, allocate the descs from freelist
166 	 */
167 	if (!(*desc_list)) {
168 		num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
169 							  rx_desc_pool,
170 							  num_req_buffers,
171 							  desc_list,
172 							  tail);
173 
174 		if (!num_alloc_desc) {
175 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
176 				"no free rx_descs in freelist");
177 			DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
178 					num_req_buffers);
179 			hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
180 			return QDF_STATUS_E_NOMEM;
181 		}
182 
183 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
184 			"%d rx desc allocated", num_alloc_desc);
185 		num_req_buffers = num_alloc_desc;
186 	}
187 
188 
189 	count = 0;
190 
191 	while (count < num_req_buffers) {
192 		rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
193 					RX_BUFFER_SIZE,
194 					RX_BUFFER_RESERVATION,
195 					RX_BUFFER_ALIGNMENT,
196 					FALSE);
197 
198 		if (qdf_unlikely(!rx_netbuf)) {
199 			DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
200 			break;
201 		}
202 
203 		ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
204 					  QDF_DMA_FROM_DEVICE);
205 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
206 			qdf_nbuf_free(rx_netbuf);
207 			DP_STATS_INC(dp_pdev, replenish.map_err, 1);
208 			continue;
209 		}
210 
211 		paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
212 
213 		dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, rx_netbuf, true);
214 		/*
215 		 * check if the physical address of nbuf->data is
216 		 * less then 0x50000000 then free the nbuf and try
217 		 * allocating new nbuf. We can try for 100 times.
218 		 * this is a temp WAR till we fix it properly.
219 		 */
220 		ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, dp_pdev);
221 		if (ret == QDF_STATUS_E_FAILURE) {
222 			DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
223 			break;
224 		}
225 
226 		count++;
227 
228 		rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
229 							 rxdma_srng);
230 		qdf_assert_always(rxdma_ring_entry);
231 
232 		next = (*desc_list)->next;
233 
234 		dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf);
235 
236 		/* rx_desc.in_use should be zero at this time*/
237 		qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
238 
239 		(*desc_list)->rx_desc.in_use = 1;
240 
241 		dp_verbose_debug("rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d",
242 				 rx_netbuf, qdf_nbuf_data(rx_netbuf),
243 				 (unsigned long long)paddr,
244 				 (*desc_list)->rx_desc.cookie);
245 
246 		hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
247 						(*desc_list)->rx_desc.cookie,
248 						rx_desc_pool->owner);
249 
250 		*desc_list = next;
251 
252 	}
253 
254 	hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
255 
256 	dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
257 			 count, num_desc_to_free);
258 
259 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count,
260 			 (RX_BUFFER_SIZE * count));
261 
262 free_descs:
263 	DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
264 	/*
265 	 * add any available free desc back to the free list
266 	 */
267 	if (*desc_list)
268 		dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
269 			mac_id, rx_desc_pool);
270 
271 	return QDF_STATUS_SUCCESS;
272 }
273 
274 /*
275  * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
276  *				pkts to RAW mode simulation to
277  *				decapsulate the pkt.
278  *
279  * @vdev: vdev on which RAW mode is enabled
280  * @nbuf_list: list of RAW pkts to process
281  * @peer: peer object from which the pkt is rx
282  *
283  * Return: void
284  */
285 void
286 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
287 					struct dp_peer *peer)
288 {
289 	qdf_nbuf_t deliver_list_head = NULL;
290 	qdf_nbuf_t deliver_list_tail = NULL;
291 	qdf_nbuf_t nbuf;
292 
293 	nbuf = nbuf_list;
294 	while (nbuf) {
295 		qdf_nbuf_t next = qdf_nbuf_next(nbuf);
296 
297 		DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
298 
299 		DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
300 		DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf));
301 		/*
302 		 * reset the chfrag_start and chfrag_end bits in nbuf cb
303 		 * as this is a non-amsdu pkt and RAW mode simulation expects
304 		 * these bit s to be 0 for non-amsdu pkt.
305 		 */
306 		if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
307 			 qdf_nbuf_is_rx_chfrag_end(nbuf)) {
308 			qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
309 			qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
310 		}
311 
312 		nbuf = next;
313 	}
314 
315 	vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
316 				 &deliver_list_tail, (struct cdp_peer*) peer);
317 
318 	vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
319 }
320 
321 
322 #ifdef DP_LFR
323 /*
324  * In case of LFR, data of a new peer might be sent up
325  * even before peer is added.
326  */
327 static inline struct dp_vdev *
328 dp_get_vdev_from_peer(struct dp_soc *soc,
329 			uint16_t peer_id,
330 			struct dp_peer *peer,
331 			struct hal_rx_mpdu_desc_info mpdu_desc_info)
332 {
333 	struct dp_vdev *vdev;
334 	uint8_t vdev_id;
335 
336 	if (unlikely(!peer)) {
337 		if (peer_id != HTT_INVALID_PEER) {
338 			vdev_id = DP_PEER_METADATA_ID_GET(
339 					mpdu_desc_info.peer_meta_data);
340 			QDF_TRACE(QDF_MODULE_ID_DP,
341 				QDF_TRACE_LEVEL_DEBUG,
342 				FL("PeerID %d not found use vdevID %d"),
343 				peer_id, vdev_id);
344 			vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
345 							vdev_id);
346 		} else {
347 			QDF_TRACE(QDF_MODULE_ID_DP,
348 				QDF_TRACE_LEVEL_DEBUG,
349 				FL("Invalid PeerID %d"),
350 				peer_id);
351 			return NULL;
352 		}
353 	} else {
354 		vdev = peer->vdev;
355 	}
356 	return vdev;
357 }
358 #else
359 static inline struct dp_vdev *
360 dp_get_vdev_from_peer(struct dp_soc *soc,
361 			uint16_t peer_id,
362 			struct dp_peer *peer,
363 			struct hal_rx_mpdu_desc_info mpdu_desc_info)
364 {
365 	if (unlikely(!peer)) {
366 		QDF_TRACE(QDF_MODULE_ID_DP,
367 			QDF_TRACE_LEVEL_DEBUG,
368 			FL("Peer not found for peerID %d"),
369 			peer_id);
370 		return NULL;
371 	} else {
372 		return peer->vdev;
373 	}
374 }
375 #endif
376 
377 #ifndef FEATURE_WDS
378 static void
379 dp_rx_da_learn(struct dp_soc *soc,
380 	       uint8_t *rx_tlv_hdr,
381 	       struct dp_peer *ta_peer,
382 	       qdf_nbuf_t nbuf)
383 {
384 }
385 #endif
386 /*
387  * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
388  *
389  * @soc: core txrx main context
390  * @ta_peer	: source peer entry
391  * @rx_tlv_hdr	: start address of rx tlvs
392  * @nbuf	: nbuf that has to be intrabss forwarded
393  *
394  * Return: bool: true if it is forwarded else false
395  */
396 static bool
397 dp_rx_intrabss_fwd(struct dp_soc *soc,
398 			struct dp_peer *ta_peer,
399 			uint8_t *rx_tlv_hdr,
400 			qdf_nbuf_t nbuf)
401 {
402 	uint16_t da_idx;
403 	uint16_t len;
404 	uint8_t is_frag;
405 	struct dp_peer *da_peer;
406 	struct dp_ast_entry *ast_entry;
407 	qdf_nbuf_t nbuf_copy;
408 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
409 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
410 	struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
411 					tid_stats.tid_rx_stats[ring_id][tid];
412 
413 	/* check if the destination peer is available in peer table
414 	 * and also check if the source peer and destination peer
415 	 * belong to the same vap and destination peer is not bss peer.
416 	 */
417 
418 	if ((qdf_nbuf_is_da_valid(nbuf) && !qdf_nbuf_is_da_mcbc(nbuf))) {
419 		da_idx = hal_rx_msdu_end_da_idx_get(soc->hal_soc, rx_tlv_hdr);
420 
421 		ast_entry = soc->ast_table[da_idx];
422 		if (!ast_entry)
423 			return false;
424 
425 		if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) {
426 			ast_entry->is_active = TRUE;
427 			return false;
428 		}
429 
430 		da_peer = ast_entry->peer;
431 
432 		if (!da_peer)
433 			return false;
434 		/* TA peer cannot be same as peer(DA) on which AST is present
435 		 * this indicates a change in topology and that AST entries
436 		 * are yet to be updated.
437 		 */
438 		if (da_peer == ta_peer)
439 			return false;
440 
441 		if (da_peer->vdev == ta_peer->vdev && !da_peer->bss_peer) {
442 			len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
443 			is_frag = qdf_nbuf_is_frag(nbuf);
444 			memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
445 
446 			/* linearize the nbuf just before we send to
447 			 * dp_tx_send()
448 			 */
449 			if (qdf_unlikely(is_frag)) {
450 				if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
451 					return false;
452 
453 				nbuf = qdf_nbuf_unshare(nbuf);
454 				if (!nbuf) {
455 					DP_STATS_INC_PKT(ta_peer,
456 							 rx.intra_bss.fail,
457 							 1,
458 							 len);
459 					/* return true even though the pkt is
460 					 * not forwarded. Basically skb_unshare
461 					 * failed and we want to continue with
462 					 * next nbuf.
463 					 */
464 					tid_stats->fail_cnt[INTRABSS_DROP]++;
465 					return true;
466 				}
467 			}
468 
469 			if (!dp_tx_send(dp_vdev_to_cdp_vdev(ta_peer->vdev),
470 					nbuf)) {
471 				DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
472 						 len);
473 				return true;
474 			} else {
475 				DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
476 						len);
477 				tid_stats->fail_cnt[INTRABSS_DROP]++;
478 				return false;
479 			}
480 		}
481 	}
482 	/* if it is a broadcast pkt (eg: ARP) and it is not its own
483 	 * source, then clone the pkt and send the cloned pkt for
484 	 * intra BSS forwarding and original pkt up the network stack
485 	 * Note: how do we handle multicast pkts. do we forward
486 	 * all multicast pkts as is or let a higher layer module
487 	 * like igmpsnoop decide whether to forward or not with
488 	 * Mcast enhancement.
489 	 */
490 	else if (qdf_unlikely((qdf_nbuf_is_da_mcbc(nbuf) &&
491 			       !ta_peer->bss_peer))) {
492 		if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
493 			goto end;
494 
495 		nbuf_copy = qdf_nbuf_copy(nbuf);
496 		if (!nbuf_copy)
497 			goto end;
498 
499 		len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
500 		memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
501 
502 		if (dp_tx_send(dp_vdev_to_cdp_vdev(ta_peer->vdev), nbuf_copy)) {
503 			DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
504 			tid_stats->fail_cnt[INTRABSS_DROP]++;
505 			qdf_nbuf_free(nbuf_copy);
506 		} else {
507 			DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
508 			tid_stats->intrabss_cnt++;
509 		}
510 	}
511 
512 end:
513 	/* return false as we have to still send the original pkt
514 	 * up the stack
515 	 */
516 	return false;
517 }
518 
519 #ifdef MESH_MODE_SUPPORT
520 
521 /**
522  * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
523  *
524  * @vdev: DP Virtual device handle
525  * @nbuf: Buffer pointer
526  * @rx_tlv_hdr: start of rx tlv header
527  * @peer: pointer to peer
528  *
529  * This function allocated memory for mesh receive stats and fill the
530  * required stats. Stores the memory address in skb cb.
531  *
532  * Return: void
533  */
534 
535 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
536 				uint8_t *rx_tlv_hdr, struct dp_peer *peer)
537 {
538 	struct mesh_recv_hdr_s *rx_info = NULL;
539 	uint32_t pkt_type;
540 	uint32_t nss;
541 	uint32_t rate_mcs;
542 	uint32_t bw;
543 
544 	/* fill recv mesh stats */
545 	rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
546 
547 	/* upper layers are resposible to free this memory */
548 
549 	if (!rx_info) {
550 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
551 			"Memory allocation failed for mesh rx stats");
552 		DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
553 		return;
554 	}
555 
556 	rx_info->rs_flags = MESH_RXHDR_VER1;
557 	if (qdf_nbuf_is_rx_chfrag_start(nbuf))
558 		rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
559 
560 	if (qdf_nbuf_is_rx_chfrag_end(nbuf))
561 		rx_info->rs_flags |= MESH_RX_LAST_MSDU;
562 
563 	if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) {
564 		rx_info->rs_flags |= MESH_RX_DECRYPTED;
565 		rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr);
566 		if (vdev->osif_get_key)
567 			vdev->osif_get_key(vdev->osif_vdev,
568 					&rx_info->rs_decryptkey[0],
569 					&peer->mac_addr.raw[0],
570 					rx_info->rs_keyix);
571 	}
572 
573 	rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr);
574 	rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr);
575 	pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
576 	rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
577 	bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
578 	nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr);
579 	rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
580 				(bw << 24);
581 
582 	qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
583 
584 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
585 		FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"),
586 						rx_info->rs_flags,
587 						rx_info->rs_rssi,
588 						rx_info->rs_channel,
589 						rx_info->rs_ratephy1,
590 						rx_info->rs_keyix);
591 
592 }
593 
594 /**
595  * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
596  *
597  * @vdev: DP Virtual device handle
598  * @nbuf: Buffer pointer
599  * @rx_tlv_hdr: start of rx tlv header
600  *
601  * This checks if the received packet is matching any filter out
602  * catogery and and drop the packet if it matches.
603  *
604  * Return: status(0 indicates drop, 1 indicate to no drop)
605  */
606 
607 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
608 					uint8_t *rx_tlv_hdr)
609 {
610 	union dp_align_mac_addr mac_addr;
611 	struct dp_soc *soc = vdev->pdev->soc;
612 
613 	if (qdf_unlikely(vdev->mesh_rx_filter)) {
614 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
615 			if (hal_rx_mpdu_get_fr_ds(soc->hal_soc,
616 						  rx_tlv_hdr))
617 				return  QDF_STATUS_SUCCESS;
618 
619 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
620 			if (hal_rx_mpdu_get_to_ds(soc->hal_soc,
621 						  rx_tlv_hdr))
622 				return  QDF_STATUS_SUCCESS;
623 
624 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
625 			if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc,
626 						   rx_tlv_hdr) &&
627 			    !hal_rx_mpdu_get_to_ds(soc->hal_soc,
628 						   rx_tlv_hdr))
629 				return  QDF_STATUS_SUCCESS;
630 
631 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
632 			if (hal_rx_mpdu_get_addr1(soc->hal_soc,
633 						  rx_tlv_hdr,
634 					&mac_addr.raw[0]))
635 				return QDF_STATUS_E_FAILURE;
636 
637 			if (!qdf_mem_cmp(&mac_addr.raw[0],
638 					&vdev->mac_addr.raw[0],
639 					QDF_MAC_ADDR_SIZE))
640 				return  QDF_STATUS_SUCCESS;
641 		}
642 
643 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
644 			if (hal_rx_mpdu_get_addr2(soc->hal_soc,
645 						  rx_tlv_hdr,
646 						  &mac_addr.raw[0]))
647 				return QDF_STATUS_E_FAILURE;
648 
649 			if (!qdf_mem_cmp(&mac_addr.raw[0],
650 					&vdev->mac_addr.raw[0],
651 					QDF_MAC_ADDR_SIZE))
652 				return  QDF_STATUS_SUCCESS;
653 		}
654 	}
655 
656 	return QDF_STATUS_E_FAILURE;
657 }
658 
659 #else
660 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
661 				uint8_t *rx_tlv_hdr, struct dp_peer *peer)
662 {
663 }
664 
665 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
666 					uint8_t *rx_tlv_hdr)
667 {
668 	return QDF_STATUS_E_FAILURE;
669 }
670 
671 #endif
672 
673 #ifdef FEATURE_NAC_RSSI
674 /**
675  * dp_rx_nac_filter(): Function to perform filtering of non-associated
676  * clients
677  * @pdev: DP pdev handle
678  * @rx_pkt_hdr: Rx packet Header
679  *
680  * return: dp_vdev*
681  */
682 static
683 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
684 		uint8_t *rx_pkt_hdr)
685 {
686 	struct ieee80211_frame *wh;
687 	struct dp_neighbour_peer *peer = NULL;
688 
689 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
690 
691 	if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
692 		return NULL;
693 
694 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
695 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
696 				neighbour_peer_list_elem) {
697 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
698 				wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) {
699 			QDF_TRACE(
700 				QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
701 				FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"),
702 				peer->neighbour_peers_macaddr.raw[0],
703 				peer->neighbour_peers_macaddr.raw[1],
704 				peer->neighbour_peers_macaddr.raw[2],
705 				peer->neighbour_peers_macaddr.raw[3],
706 				peer->neighbour_peers_macaddr.raw[4],
707 				peer->neighbour_peers_macaddr.raw[5]);
708 
709 				qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
710 
711 			return pdev->monitor_vdev;
712 		}
713 	}
714 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
715 
716 	return NULL;
717 }
718 
719 /**
720  * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
721  * @soc: DP SOC handle
722  * @mpdu: mpdu for which peer is invalid
723  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
724  * pool_id has same mapping)
725  *
726  * return: integer type
727  */
728 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
729 				   uint8_t mac_id)
730 {
731 	struct dp_invalid_peer_msg msg;
732 	struct dp_vdev *vdev = NULL;
733 	struct dp_pdev *pdev = NULL;
734 	struct ieee80211_frame *wh;
735 	qdf_nbuf_t curr_nbuf, next_nbuf;
736 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
737 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
738 
739 	rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
740 
741 	if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) {
742 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
743 			  "Drop decapped frames");
744 		goto free;
745 	}
746 
747 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
748 
749 	if (!DP_FRAME_IS_DATA(wh)) {
750 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
751 			  "NAWDS valid only for data frames");
752 		goto free;
753 	}
754 
755 	if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
756 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
757 			"Invalid nbuf length");
758 		goto free;
759 	}
760 
761 	pdev = dp_get_pdev_for_mac_id(soc, mac_id);
762 
763 	if (!pdev || qdf_unlikely(pdev->is_pdev_down)) {
764 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
765 			  "PDEV %s", !pdev ? "not found" : "down");
766 		goto free;
767 	}
768 
769 	if (pdev->filter_neighbour_peers) {
770 		/* Next Hop scenario not yet handle */
771 		vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
772 		if (vdev) {
773 			dp_rx_mon_deliver(soc, pdev->pdev_id,
774 					  pdev->invalid_peer_head_msdu,
775 					  pdev->invalid_peer_tail_msdu);
776 
777 			pdev->invalid_peer_head_msdu = NULL;
778 			pdev->invalid_peer_tail_msdu = NULL;
779 
780 			return 0;
781 		}
782 	}
783 
784 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
785 
786 		if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
787 				QDF_MAC_ADDR_SIZE) == 0) {
788 			goto out;
789 		}
790 	}
791 
792 	if (!vdev) {
793 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
794 			"VDEV not found");
795 		goto free;
796 	}
797 
798 out:
799 	msg.wh = wh;
800 	qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN);
801 	msg.nbuf = mpdu;
802 	msg.vdev_id = vdev->vdev_id;
803 	if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer)
804 		pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(pdev->ctrl_pdev,
805 							&msg);
806 
807 free:
808 	/* Drop and free packet */
809 	curr_nbuf = mpdu;
810 	while (curr_nbuf) {
811 		next_nbuf = qdf_nbuf_next(curr_nbuf);
812 		qdf_nbuf_free(curr_nbuf);
813 		curr_nbuf = next_nbuf;
814 	}
815 
816 	return 0;
817 }
818 
819 /**
820  * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
821  * @soc: DP SOC handle
822  * @mpdu: mpdu for which peer is invalid
823  * @mpdu_done: if an mpdu is completed
824  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
825  * pool_id has same mapping)
826  *
827  * return: integer type
828  */
829 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
830 					qdf_nbuf_t mpdu, bool mpdu_done,
831 					uint8_t mac_id)
832 {
833 	/* Only trigger the process when mpdu is completed */
834 	if (mpdu_done)
835 		dp_rx_process_invalid_peer(soc, mpdu, mac_id);
836 }
837 #else
838 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
839 				   uint8_t mac_id)
840 {
841 	qdf_nbuf_t curr_nbuf, next_nbuf;
842 	struct dp_pdev *pdev;
843 	struct dp_vdev *vdev = NULL;
844 	struct ieee80211_frame *wh;
845 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
846 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
847 
848 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
849 
850 	if (!DP_FRAME_IS_DATA(wh)) {
851 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP,
852 				   "only for data frames");
853 		goto free;
854 	}
855 
856 	if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
857 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
858 			  "Invalid nbuf length");
859 		goto free;
860 	}
861 
862 	pdev = dp_get_pdev_for_mac_id(soc, mac_id);
863 	if (!pdev) {
864 		QDF_TRACE(QDF_MODULE_ID_DP,
865 			  QDF_TRACE_LEVEL_ERROR,
866 			  "PDEV not found");
867 		goto free;
868 	}
869 
870 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
871 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
872 		if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
873 				QDF_MAC_ADDR_SIZE) == 0) {
874 			qdf_spin_unlock_bh(&pdev->vdev_list_lock);
875 			goto out;
876 		}
877 	}
878 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
879 
880 	if (!vdev) {
881 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
882 			  "VDEV not found");
883 		goto free;
884 	}
885 
886 out:
887 	if (soc->cdp_soc.ol_ops->rx_invalid_peer)
888 		soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh);
889 free:
890 	/* reset the head and tail pointers */
891 	pdev = dp_get_pdev_for_mac_id(soc, mac_id);
892 	if (pdev) {
893 		pdev->invalid_peer_head_msdu = NULL;
894 		pdev->invalid_peer_tail_msdu = NULL;
895 	}
896 
897 	/* Drop and free packet */
898 	curr_nbuf = mpdu;
899 	while (curr_nbuf) {
900 		next_nbuf = qdf_nbuf_next(curr_nbuf);
901 		qdf_nbuf_free(curr_nbuf);
902 		curr_nbuf = next_nbuf;
903 	}
904 
905 	return 0;
906 }
907 
908 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
909 					qdf_nbuf_t mpdu, bool mpdu_done,
910 					uint8_t mac_id)
911 {
912 	/* Process the nbuf */
913 	dp_rx_process_invalid_peer(soc, mpdu, mac_id);
914 }
915 #endif
916 
917 #ifdef RECEIVE_OFFLOAD
918 /**
919  * dp_rx_print_offload_info() - Print offload info from RX TLV
920  * @soc: dp soc handle
921  * @rx_tlv: RX TLV for which offload information is to be printed
922  *
923  * Return: None
924  */
925 static void dp_rx_print_offload_info(struct dp_soc *soc, uint8_t *rx_tlv)
926 {
927 	dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------");
928 	dp_verbose_debug("lro_eligible 0x%x", HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv));
929 	dp_verbose_debug("pure_ack 0x%x", HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv));
930 	dp_verbose_debug("chksum 0x%x", hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
931 								  rx_tlv));
932 	dp_verbose_debug("TCP seq num 0x%x", HAL_RX_TLV_GET_TCP_SEQ(rx_tlv));
933 	dp_verbose_debug("TCP ack num 0x%x", HAL_RX_TLV_GET_TCP_ACK(rx_tlv));
934 	dp_verbose_debug("TCP window 0x%x", HAL_RX_TLV_GET_TCP_WIN(rx_tlv));
935 	dp_verbose_debug("TCP protocol 0x%x", HAL_RX_TLV_GET_TCP_PROTO(rx_tlv));
936 	dp_verbose_debug("TCP offset 0x%x", HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv));
937 	dp_verbose_debug("toeplitz 0x%x", HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv));
938 	dp_verbose_debug("---------------------------------------------------------");
939 }
940 
941 /**
942  * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
943  * @soc: DP SOC handle
944  * @rx_tlv: RX TLV received for the msdu
945  * @msdu: msdu for which GRO info needs to be filled
946  * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
947  *
948  * Return: None
949  */
950 static
951 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
952 			 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
953 {
954 	if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
955 		return;
956 
957 	/* Filling up RX offload info only for TCP packets */
958 	if (!HAL_RX_TLV_GET_TCP_PROTO(rx_tlv))
959 		return;
960 
961 	*rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1;
962 
963 	QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) =
964 		 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv);
965 	QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) =
966 			HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv);
967 	QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
968 			hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
969 						  rx_tlv);
970 	QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) =
971 			 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv);
972 	QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) =
973 			 HAL_RX_TLV_GET_TCP_ACK(rx_tlv);
974 	QDF_NBUF_CB_RX_TCP_WIN(msdu) =
975 			 HAL_RX_TLV_GET_TCP_WIN(rx_tlv);
976 	QDF_NBUF_CB_RX_TCP_PROTO(msdu) =
977 			 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv);
978 	QDF_NBUF_CB_RX_IPV6_PROTO(msdu) =
979 			 HAL_RX_TLV_GET_IPV6(rx_tlv);
980 	QDF_NBUF_CB_RX_TCP_OFFSET(msdu) =
981 			 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv);
982 	QDF_NBUF_CB_RX_FLOW_ID(msdu) =
983 			 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv);
984 
985 	dp_rx_print_offload_info(soc, rx_tlv);
986 }
987 #else
988 static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
989 				qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
990 {
991 }
992 #endif /* RECEIVE_OFFLOAD */
993 
994 /**
995  * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
996  *
997  * @nbuf: pointer to msdu.
998  * @mpdu_len: mpdu length
999  *
1000  * Return: returns true if nbuf is last msdu of mpdu else retuns false.
1001  */
1002 static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len)
1003 {
1004 	bool last_nbuf;
1005 
1006 	if (*mpdu_len > (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN)) {
1007 		qdf_nbuf_set_pktlen(nbuf, RX_BUFFER_SIZE);
1008 		last_nbuf = false;
1009 	} else {
1010 		qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN));
1011 		last_nbuf = true;
1012 	}
1013 
1014 	*mpdu_len -= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN);
1015 
1016 	return last_nbuf;
1017 }
1018 
1019 /**
1020  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
1021  *		     multiple nbufs.
1022  * @nbuf: pointer to the first msdu of an amsdu.
1023  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1024  *
1025  *
1026  * This function implements the creation of RX frag_list for cases
1027  * where an MSDU is spread across multiple nbufs.
1028  *
1029  * Return: returns the head nbuf which contains complete frag_list.
1030  */
1031 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
1032 {
1033 	qdf_nbuf_t parent, next, frag_list;
1034 	uint16_t frag_list_len = 0;
1035 	uint16_t mpdu_len;
1036 	bool last_nbuf;
1037 
1038 	mpdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
1039 	/*
1040 	 * this is a case where the complete msdu fits in one single nbuf.
1041 	 * in this case HW sets both start and end bit and we only need to
1042 	 * reset these bits for RAW mode simulator to decap the pkt
1043 	 */
1044 	if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
1045 					qdf_nbuf_is_rx_chfrag_end(nbuf)) {
1046 		qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN);
1047 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1048 		return nbuf;
1049 	}
1050 
1051 	/*
1052 	 * This is a case where we have multiple msdus (A-MSDU) spread across
1053 	 * multiple nbufs. here we create a fraglist out of these nbufs.
1054 	 *
1055 	 * the moment we encounter a nbuf with continuation bit set we
1056 	 * know for sure we have an MSDU which is spread across multiple
1057 	 * nbufs. We loop through and reap nbufs till we reach last nbuf.
1058 	 */
1059 	parent = nbuf;
1060 	frag_list = nbuf->next;
1061 	nbuf = nbuf->next;
1062 
1063 	/*
1064 	 * set the start bit in the first nbuf we encounter with continuation
1065 	 * bit set. This has the proper mpdu length set as it is the first
1066 	 * msdu of the mpdu. this becomes the parent nbuf and the subsequent
1067 	 * nbufs will form the frag_list of the parent nbuf.
1068 	 */
1069 	qdf_nbuf_set_rx_chfrag_start(parent, 1);
1070 	last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len);
1071 
1072 	/*
1073 	 * this is where we set the length of the fragments which are
1074 	 * associated to the parent nbuf. We iterate through the frag_list
1075 	 * till we hit the last_nbuf of the list.
1076 	 */
1077 	do {
1078 		last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len);
1079 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1080 		frag_list_len += qdf_nbuf_len(nbuf);
1081 
1082 		if (last_nbuf) {
1083 			next = nbuf->next;
1084 			nbuf->next = NULL;
1085 			break;
1086 		}
1087 
1088 		nbuf = nbuf->next;
1089 	} while (!last_nbuf);
1090 
1091 	qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
1092 	qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
1093 	parent->next = next;
1094 
1095 	qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN);
1096 	return parent;
1097 }
1098 
1099 /**
1100  * dp_rx_compute_delay() - Compute and fill in all timestamps
1101  *				to pass in correct fields
1102  *
1103  * @vdev: pdev handle
1104  * @tx_desc: tx descriptor
1105  * @tid: tid value
1106  * Return: none
1107  */
1108 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1109 {
1110 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
1111 	int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get());
1112 	uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
1113 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
1114 	uint32_t interframe_delay =
1115 		(uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp);
1116 
1117 	dp_update_delay_stats(vdev->pdev, to_stack, tid,
1118 			      CDP_DELAY_STATS_REAP_STACK, ring_id);
1119 	/*
1120 	 * Update interframe delay stats calculated at deliver_data_ol point.
1121 	 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so
1122 	 * interframe delay will not be calculate correctly for 1st frame.
1123 	 * On the other side, this will help in avoiding extra per packet check
1124 	 * of vdev->prev_rx_deliver_tstamp.
1125 	 */
1126 	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
1127 			      CDP_DELAY_STATS_RX_INTERFRAME, ring_id);
1128 	vdev->prev_rx_deliver_tstamp = current_ts;
1129 }
1130 
1131 /**
1132  * dp_rx_drop_nbuf_list() - drop an nbuf list
1133  * @pdev: dp pdev reference
1134  * @buf_list: buffer list to be dropepd
1135  *
1136  * Return: int (number of bufs dropped)
1137  */
1138 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
1139 				       qdf_nbuf_t buf_list)
1140 {
1141 	struct cdp_tid_rx_stats *stats = NULL;
1142 	uint8_t tid = 0, ring_id = 0;
1143 	int num_dropped = 0;
1144 	qdf_nbuf_t buf, next_buf;
1145 
1146 	buf = buf_list;
1147 	while (buf) {
1148 		ring_id = QDF_NBUF_CB_RX_CTX_ID(buf);
1149 		next_buf = qdf_nbuf_queue_next(buf);
1150 		tid = qdf_nbuf_get_tid_val(buf);
1151 		stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
1152 		stats->fail_cnt[INVALID_PEER_VDEV]++;
1153 		stats->delivered_to_stack--;
1154 		qdf_nbuf_free(buf);
1155 		buf = next_buf;
1156 		num_dropped++;
1157 	}
1158 
1159 	return num_dropped;
1160 }
1161 
1162 #ifdef PEER_CACHE_RX_PKTS
1163 /**
1164  * dp_rx_flush_rx_cached() - flush cached rx frames
1165  * @peer: peer
1166  * @drop: flag to drop frames or forward to net stack
1167  *
1168  * Return: None
1169  */
1170 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
1171 {
1172 	struct dp_peer_cached_bufq *bufqi;
1173 	struct dp_rx_cached_buf *cache_buf = NULL;
1174 	ol_txrx_rx_fp data_rx = NULL;
1175 	int num_buff_elem;
1176 	QDF_STATUS status;
1177 
1178 	if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
1179 		qdf_atomic_dec(&peer->flush_in_progress);
1180 		return;
1181 	}
1182 
1183 	qdf_spin_lock_bh(&peer->peer_info_lock);
1184 	if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx)
1185 		data_rx = peer->vdev->osif_rx;
1186 	else
1187 		drop = true;
1188 	qdf_spin_unlock_bh(&peer->peer_info_lock);
1189 
1190 	bufqi = &peer->bufq_info;
1191 
1192 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1193 	qdf_list_remove_front(&bufqi->cached_bufq,
1194 			      (qdf_list_node_t **)&cache_buf);
1195 	while (cache_buf) {
1196 		num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(
1197 								cache_buf->buf);
1198 		bufqi->entries -= num_buff_elem;
1199 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
1200 		if (drop) {
1201 			bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1202 							      cache_buf->buf);
1203 		} else {
1204 			/* Flush the cached frames to OSIF DEV */
1205 			status = data_rx(peer->vdev->osif_vdev, cache_buf->buf);
1206 			if (status != QDF_STATUS_SUCCESS)
1207 				bufqi->dropped = dp_rx_drop_nbuf_list(
1208 							peer->vdev->pdev,
1209 							cache_buf->buf);
1210 		}
1211 		qdf_mem_free(cache_buf);
1212 		cache_buf = NULL;
1213 		qdf_spin_lock_bh(&bufqi->bufq_lock);
1214 		qdf_list_remove_front(&bufqi->cached_bufq,
1215 				      (qdf_list_node_t **)&cache_buf);
1216 	}
1217 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1218 	qdf_atomic_dec(&peer->flush_in_progress);
1219 }
1220 
1221 /**
1222  * dp_rx_enqueue_rx() - cache rx frames
1223  * @peer: peer
1224  * @rx_buf_list: cache buffer list
1225  *
1226  * Return: None
1227  */
1228 static QDF_STATUS
1229 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1230 {
1231 	struct dp_rx_cached_buf *cache_buf;
1232 	struct dp_peer_cached_bufq *bufqi = &peer->bufq_info;
1233 	int num_buff_elem;
1234 
1235 	QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_TXRX, "bufq->curr %d bufq->drops %d",
1236 			   bufqi->entries, bufqi->dropped);
1237 
1238 	if (!peer->valid) {
1239 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1240 						      rx_buf_list);
1241 		return QDF_STATUS_E_INVAL;
1242 	}
1243 
1244 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1245 	if (bufqi->entries >= bufqi->thresh) {
1246 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1247 						      rx_buf_list);
1248 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
1249 		return QDF_STATUS_E_RESOURCES;
1250 	}
1251 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1252 
1253 	num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list);
1254 
1255 	cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf));
1256 	if (!cache_buf) {
1257 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1258 			  "Failed to allocate buf to cache rx frames");
1259 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1260 						      rx_buf_list);
1261 		return QDF_STATUS_E_NOMEM;
1262 	}
1263 
1264 	cache_buf->buf = rx_buf_list;
1265 
1266 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1267 	qdf_list_insert_back(&bufqi->cached_bufq,
1268 			     &cache_buf->node);
1269 	bufqi->entries += num_buff_elem;
1270 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1271 
1272 	return QDF_STATUS_SUCCESS;
1273 }
1274 
1275 static inline
1276 bool dp_rx_is_peer_cache_bufq_supported(void)
1277 {
1278 	return true;
1279 }
1280 #else
1281 static inline
1282 bool dp_rx_is_peer_cache_bufq_supported(void)
1283 {
1284 	return false;
1285 }
1286 
1287 static inline QDF_STATUS
1288 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1289 {
1290 	return QDF_STATUS_SUCCESS;
1291 }
1292 #endif
1293 
1294 static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev,
1295 						struct dp_peer *peer,
1296 						qdf_nbuf_t nbuf_head,
1297 						qdf_nbuf_t nbuf_tail)
1298 {
1299 	/*
1300 	 * highly unlikely to have a vdev without a registered rx
1301 	 * callback function. if so let us free the nbuf_list.
1302 	 */
1303 	if (qdf_unlikely(!vdev->osif_rx)) {
1304 		if (dp_rx_is_peer_cache_bufq_supported())
1305 			dp_rx_enqueue_rx(peer, nbuf_head);
1306 		else
1307 			dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
1308 
1309 		return;
1310 	}
1311 
1312 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
1313 			(vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
1314 		vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
1315 				&nbuf_tail, (struct cdp_peer *) peer);
1316 	}
1317 
1318 	vdev->osif_rx(vdev->osif_vdev, nbuf_head);
1319 }
1320 
1321 /**
1322  * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
1323  * @nbuf: pointer to the first msdu of an amsdu.
1324  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1325  *
1326  * The ipsumed field of the skb is set based on whether HW validated the
1327  * IP/TCP/UDP checksum.
1328  *
1329  * Return: void
1330  */
1331 static inline void dp_rx_cksum_offload(struct dp_pdev *pdev,
1332 				       qdf_nbuf_t nbuf,
1333 				       uint8_t *rx_tlv_hdr)
1334 {
1335 	qdf_nbuf_rx_cksum_t cksum = {0};
1336 	bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr);
1337 	bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr);
1338 
1339 	if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) {
1340 		cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
1341 		qdf_nbuf_set_rx_cksum(nbuf, &cksum);
1342 	} else {
1343 		DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err);
1344 		DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er);
1345 	}
1346 }
1347 
1348 /**
1349  * dp_rx_msdu_stats_update() - update per msdu stats.
1350  * @soc: core txrx main context
1351  * @nbuf: pointer to the first msdu of an amsdu.
1352  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1353  * @peer: pointer to the peer object.
1354  * @ring_id: reo dest ring number on which pkt is reaped.
1355  * @tid_stats: per tid rx stats.
1356  *
1357  * update all the per msdu stats for that nbuf.
1358  * Return: void
1359  */
1360 static void dp_rx_msdu_stats_update(struct dp_soc *soc,
1361 				    qdf_nbuf_t nbuf,
1362 				    uint8_t *rx_tlv_hdr,
1363 				    struct dp_peer *peer,
1364 				    uint8_t ring_id,
1365 				    struct cdp_tid_rx_stats *tid_stats)
1366 {
1367 	bool is_ampdu, is_not_amsdu;
1368 	uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
1369 	struct dp_vdev *vdev = peer->vdev;
1370 	qdf_ether_header_t *eh;
1371 	uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1372 
1373 	is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
1374 			qdf_nbuf_is_rx_chfrag_end(nbuf);
1375 
1376 	DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len);
1377 	DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu);
1378 	DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu);
1379 	DP_STATS_INCC(peer, rx.rx_retries, 1, qdf_nbuf_is_rx_retry_flag(nbuf));
1380 
1381 	tid_stats->msdu_cnt++;
1382 	if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
1383 			 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
1384 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1385 		DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len);
1386 		tid_stats->mcast_msdu_cnt++;
1387 		if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
1388 			DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len);
1389 			tid_stats->bcast_msdu_cnt++;
1390 		}
1391 	}
1392 
1393 	/*
1394 	 * currently we can return from here as we have similar stats
1395 	 * updated at per ppdu level instead of msdu level
1396 	 */
1397 	if (!soc->process_rx_status)
1398 		return;
1399 
1400 	is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr);
1401 	DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu);
1402 	DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
1403 
1404 	sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr);
1405 	mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
1406 	tid = qdf_nbuf_get_tid_val(nbuf);
1407 	bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
1408 	reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
1409 							      rx_tlv_hdr);
1410 	nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
1411 	pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
1412 
1413 	DP_STATS_INC(peer, rx.bw[bw], 1);
1414 	/*
1415 	 * only if nss > 0 and pkt_type is 11N/AC/AX,
1416 	 * then increase index [nss - 1] in array counter.
1417 	 */
1418 	if (nss > 0 && (pkt_type == DOT11_N ||
1419 			pkt_type == DOT11_AC ||
1420 			pkt_type == DOT11_AX))
1421 		DP_STATS_INC(peer, rx.nss[nss - 1], 1);
1422 
1423 	DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
1424 	DP_STATS_INCC(peer, rx.err.mic_err, 1,
1425 		      hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr));
1426 	DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
1427 		      hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr));
1428 
1429 	DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
1430 	DP_STATS_INC(peer, rx.reception_type[reception_type], 1);
1431 
1432 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1433 		      ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1434 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1435 		      ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1436 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1437 		      ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1438 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1439 		      ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1440 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1441 		      ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1442 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1443 		      ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1444 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1445 		      ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1446 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1447 		      ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1448 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1449 		      ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX)));
1450 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1451 		      ((mcs < MAX_MCS) && (pkt_type == DOT11_AX)));
1452 
1453 	if ((soc->process_rx_status) &&
1454 	    hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) {
1455 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
1456 		if (!vdev->pdev)
1457 			return;
1458 
1459 		dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
1460 				     &peer->stats, peer->peer_ids[0],
1461 				     UPDATE_PEER_STATS,
1462 				     vdev->pdev->pdev_id);
1463 #endif
1464 
1465 	}
1466 }
1467 
1468 static inline bool is_sa_da_idx_valid(struct dp_soc *soc,
1469 				      uint8_t *rx_tlv_hdr,
1470 				      qdf_nbuf_t nbuf)
1471 {
1472 	if ((qdf_nbuf_is_sa_valid(nbuf) &&
1473 	     (hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr) >
1474 		wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) ||
1475 	    (!qdf_nbuf_is_da_mcbc(nbuf) &&
1476 	     qdf_nbuf_is_da_valid(nbuf) &&
1477 	     (hal_rx_msdu_end_da_idx_get(soc->hal_soc, rx_tlv_hdr) >
1478 	      wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))))
1479 		return false;
1480 
1481 	return true;
1482 }
1483 
1484 #ifndef WDS_VENDOR_EXTENSION
1485 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
1486 			   struct dp_vdev *vdev,
1487 			   struct dp_peer *peer)
1488 {
1489 	return 1;
1490 }
1491 #endif
1492 
1493 #ifdef RX_DESC_DEBUG_CHECK
1494 /**
1495  * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
1496  *				  corruption
1497  *
1498  * @ring_desc: REO ring descriptor
1499  * @rx_desc: Rx descriptor
1500  *
1501  * Return: NONE
1502  */
1503 static inline
1504 void dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc,
1505 				  struct dp_rx_desc *rx_desc)
1506 {
1507 	struct hal_buf_info hbi;
1508 
1509 	hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
1510 	/* Sanity check for possible buffer paddr corruption */
1511 	qdf_assert_always((&hbi)->paddr ==
1512 			  qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0));
1513 }
1514 #else
1515 static inline
1516 void dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc,
1517 				  struct dp_rx_desc *rx_desc)
1518 {
1519 }
1520 #endif
1521 
1522 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1523 static inline
1524 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
1525 {
1526 	bool limit_hit = false;
1527 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
1528 
1529 	limit_hit =
1530 		(num_reaped >= cfg->rx_reap_loop_pkt_limit) ? true : false;
1531 
1532 	if (limit_hit)
1533 		DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1)
1534 
1535 	return limit_hit;
1536 }
1537 
1538 static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1539 {
1540 	return soc->wlan_cfg_ctx->rx_enable_eol_data_check;
1541 }
1542 
1543 #else
1544 static inline
1545 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
1546 {
1547 	return false;
1548 }
1549 
1550 static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1551 {
1552 	return false;
1553 }
1554 
1555 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
1556 
1557 /**
1558  * dp_is_special_data() - check is the pkt special like eapol, dhcp, etc
1559  *
1560  * @nbuf: pkt skb pointer
1561  *
1562  * Return: true if matched, false if not
1563  */
1564 static inline
1565 bool dp_is_special_data(qdf_nbuf_t nbuf)
1566 {
1567 	if (qdf_nbuf_is_ipv4_arp_pkt(nbuf) ||
1568 	    qdf_nbuf_is_ipv4_dhcp_pkt(nbuf) ||
1569 	    qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
1570 	    qdf_nbuf_is_ipv6_dhcp_pkt(nbuf))
1571 		return true;
1572 	else
1573 		return false;
1574 }
1575 
1576 #ifdef DP_RX_PKT_NO_PEER_DELIVER
1577 /**
1578  * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
1579  *				      no corresbonding peer found
1580  * @soc: core txrx main context
1581  * @nbuf: pkt skb pointer
1582  *
1583  * This function will try to deliver some RX special frames to stack
1584  * even there is no peer matched found. for instance, LFR case, some
1585  * eapol data will be sent to host before peer_map done.
1586  *
1587  * Return: None
1588  */
1589 static inline
1590 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
1591 {
1592 	uint32_t peer_mdata;
1593 	uint16_t peer_id;
1594 	uint8_t vdev_id;
1595 	struct dp_vdev *vdev;
1596 	uint32_t l2_hdr_offset = 0;
1597 	uint16_t msdu_len = 0;
1598 	uint32_t pkt_len = 0;
1599 	uint8_t *rx_tlv_hdr;
1600 
1601 	peer_mdata =  QDF_NBUF_CB_RX_PEER_ID(nbuf);
1602 
1603 	peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
1604 	if (peer_id > soc->max_peers)
1605 		goto deliver_fail;
1606 
1607 	vdev_id = DP_PEER_METADATA_ID_GET(peer_mdata);
1608 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
1609 	if (!vdev || !vdev->osif_rx)
1610 		goto deliver_fail;
1611 
1612 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
1613 	l2_hdr_offset =
1614 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
1615 
1616 	msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1617 	pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
1618 
1619 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
1620 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1621 	} else {
1622 		qdf_nbuf_set_pktlen(nbuf, pkt_len);
1623 		qdf_nbuf_pull_head(nbuf,
1624 				   RX_PKT_TLVS_LEN +
1625 				   l2_hdr_offset);
1626 	}
1627 
1628 	/* only allow special frames */
1629 	if (!dp_is_special_data(nbuf))
1630 		goto deliver_fail;
1631 
1632 	vdev->osif_rx(vdev->osif_vdev, nbuf);
1633 	DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1);
1634 	return;
1635 
1636 deliver_fail:
1637 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1638 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
1639 	qdf_nbuf_free(nbuf);
1640 }
1641 #else
1642 static inline
1643 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
1644 {
1645 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1646 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
1647 	qdf_nbuf_free(nbuf);
1648 }
1649 #endif
1650 
1651 /**
1652  * dp_rx_srng_get_num_pending() - get number of pending entries
1653  * @hal_soc: hal soc opaque pointer
1654  * @hal_ring: opaque pointer to the HAL Rx Ring
1655  * @num_entries: number of entries in the hal_ring.
1656  * @near_full: pointer to a boolean. This is set if ring is near full.
1657  *
1658  * The function returns the number of entries in a destination ring which are
1659  * yet to be reaped. The function also checks if the ring is near full.
1660  * If more than half of the ring needs to be reaped, the ring is considered
1661  * approaching full.
1662  * The function useses hal_srng_dst_num_valid_locked to get the number of valid
1663  * entries. It should not be called within a SRNG lock. HW pointer value is
1664  * synced into cached_hp.
1665  *
1666  * Return: Number of pending entries if any
1667  */
1668 static
1669 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
1670 				    hal_ring_handle_t hal_ring_hdl,
1671 				    uint32_t num_entries,
1672 				    bool *near_full)
1673 {
1674 	uint32_t num_pending = 0;
1675 
1676 	num_pending = hal_srng_dst_num_valid_locked(hal_soc,
1677 						    hal_ring_hdl,
1678 						    true);
1679 
1680 	if (num_entries && (num_pending >= num_entries >> 1))
1681 		*near_full = true;
1682 	else
1683 		*near_full = false;
1684 
1685 	return num_pending;
1686 }
1687 
1688 /**
1689  * dp_rx_process() - Brain of the Rx processing functionality
1690  *		     Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
1691  * @int_ctx: per interrupt context
1692  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1693  * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
1694  * @quota: No. of units (packets) that can be serviced in one shot.
1695  *
1696  * This function implements the core of Rx functionality. This is
1697  * expected to handle only non-error frames.
1698  *
1699  * Return: uint32_t: No. of elements processed
1700  */
1701 uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
1702 			    uint8_t reo_ring_num, uint32_t quota)
1703 {
1704 	hal_ring_desc_t ring_desc;
1705 	hal_soc_handle_t hal_soc;
1706 	struct dp_rx_desc *rx_desc = NULL;
1707 	qdf_nbuf_t nbuf, next;
1708 	bool near_full;
1709 	union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT];
1710 	union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT];
1711 	uint32_t num_pending;
1712 	uint32_t rx_bufs_used = 0, rx_buf_cookie;
1713 	uint32_t l2_hdr_offset = 0;
1714 	uint16_t msdu_len = 0;
1715 	uint16_t peer_id;
1716 	struct dp_peer *peer;
1717 	struct dp_vdev *vdev;
1718 	uint32_t pkt_len = 0;
1719 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
1720 	struct hal_rx_msdu_desc_info msdu_desc_info;
1721 	enum hal_reo_error_status error;
1722 	uint32_t peer_mdata;
1723 	uint8_t *rx_tlv_hdr;
1724 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT];
1725 	uint8_t mac_id = 0;
1726 	struct dp_pdev *pdev;
1727 	struct dp_pdev *rx_pdev;
1728 	struct dp_srng *dp_rxdma_srng;
1729 	struct rx_desc_pool *rx_desc_pool;
1730 	struct dp_soc *soc = int_ctx->soc;
1731 	uint8_t ring_id = 0;
1732 	uint8_t core_id = 0;
1733 	struct cdp_tid_rx_stats *tid_stats;
1734 	qdf_nbuf_t nbuf_head;
1735 	qdf_nbuf_t nbuf_tail;
1736 	qdf_nbuf_t deliver_list_head;
1737 	qdf_nbuf_t deliver_list_tail;
1738 	uint32_t num_rx_bufs_reaped = 0;
1739 	uint32_t intr_id;
1740 	struct hif_opaque_softc *scn;
1741 	int32_t tid = 0;
1742 	bool is_prev_msdu_last = true;
1743 	uint32_t num_entries_avail = 0;
1744 	uint32_t rx_ol_pkt_cnt = 0;
1745 	uint32_t num_entries = 0;
1746 
1747 	DP_HIST_INIT();
1748 
1749 	qdf_assert_always(soc && hal_ring_hdl);
1750 	hal_soc = soc->hal_soc;
1751 	qdf_assert_always(hal_soc);
1752 
1753 	scn = soc->hif_handle;
1754 	hif_pm_runtime_mark_dp_rx_busy(scn);
1755 	intr_id = int_ctx->dp_intr_id;
1756 	num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl);
1757 
1758 more_data:
1759 	/* reset local variables here to be re-used in the function */
1760 	nbuf_head = NULL;
1761 	nbuf_tail = NULL;
1762 	deliver_list_head = NULL;
1763 	deliver_list_tail = NULL;
1764 	peer = NULL;
1765 	vdev = NULL;
1766 	num_rx_bufs_reaped = 0;
1767 
1768 	qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped));
1769 	qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
1770 	qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
1771 	qdf_mem_zero(head, sizeof(head));
1772 	qdf_mem_zero(tail, sizeof(tail));
1773 
1774 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
1775 
1776 		/*
1777 		 * Need API to convert from hal_ring pointer to
1778 		 * Ring Type / Ring Id combo
1779 		 */
1780 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
1781 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1782 			FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
1783 		goto done;
1784 	}
1785 
1786 	/*
1787 	 * start reaping the buffers from reo ring and queue
1788 	 * them in per vdev queue.
1789 	 * Process the received pkts in a different per vdev loop.
1790 	 */
1791 	while (qdf_likely(quota &&
1792 			  (ring_desc = hal_srng_dst_peek(hal_soc,
1793 							 hal_ring_hdl)))) {
1794 
1795 		error = HAL_RX_ERROR_STATUS_GET(ring_desc);
1796 		ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1797 
1798 		if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
1799 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1800 			FL("HAL RING 0x%pK:error %d"), hal_ring_hdl, error);
1801 			DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1);
1802 			/* Don't know how to deal with this -- assert */
1803 			qdf_assert(0);
1804 		}
1805 
1806 		rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
1807 
1808 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
1809 		qdf_assert(rx_desc);
1810 
1811 		/*
1812 		 * this is a unlikely scenario where the host is reaping
1813 		 * a descriptor which it already reaped just a while ago
1814 		 * but is yet to replenish it back to HW.
1815 		 * In this case host will dump the last 128 descriptors
1816 		 * including the software descriptor rx_desc and assert.
1817 		 */
1818 
1819 		if (qdf_unlikely(!rx_desc->in_use)) {
1820 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
1821 			dp_info_rl("Reaping rx_desc not in use!");
1822 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1823 						   ring_desc, rx_desc);
1824 			/* ignore duplicate RX desc and continue to process */
1825 			/* Pop out the descriptor */
1826 			hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
1827 			continue;
1828 		}
1829 
1830 		if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) {
1831 			dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie);
1832 			DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1);
1833 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1834 						   ring_desc, rx_desc);
1835 		}
1836 
1837 		dp_rx_desc_nbuf_sanity_check(ring_desc, rx_desc);
1838 
1839 		/* TODO */
1840 		/*
1841 		 * Need a separate API for unmapping based on
1842 		 * phyiscal address
1843 		 */
1844 		qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
1845 					QDF_DMA_FROM_DEVICE);
1846 		rx_desc->unmapped = 1;
1847 
1848 		core_id = smp_processor_id();
1849 		DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1);
1850 
1851 		/* Get MPDU DESC info */
1852 		hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
1853 
1854 		/* Get MSDU DESC info */
1855 		hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info);
1856 
1857 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
1858 			qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1);
1859 
1860 		if (qdf_unlikely(mpdu_desc_info.mpdu_flags &
1861 				HAL_MPDU_F_RAW_AMPDU)) {
1862 			/* previous msdu has end bit set, so current one is
1863 			 * the new MPDU
1864 			 */
1865 			if (is_prev_msdu_last) {
1866 				is_prev_msdu_last = false;
1867 				/* Get number of entries available in HW ring */
1868 				num_entries_avail =
1869 				hal_srng_dst_num_valid(hal_soc,
1870 						       hal_ring_hdl, 1);
1871 
1872 				/* For new MPDU check if we can read complete
1873 				 * MPDU by comparing the number of buffers
1874 				 * available and number of buffers needed to
1875 				 * reap this MPDU
1876 				 */
1877 				if (((msdu_desc_info.msdu_len /
1878 				     (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN) + 1)) >
1879 				     num_entries_avail)
1880 					break;
1881 			} else {
1882 				if (msdu_desc_info.msdu_flags &
1883 				    HAL_MSDU_F_LAST_MSDU_IN_MPDU)
1884 					is_prev_msdu_last = true;
1885 			}
1886 			qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1);
1887 		}
1888 
1889 		/* Pop out the descriptor*/
1890 		hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
1891 
1892 		rx_bufs_reaped[rx_desc->pool_id]++;
1893 		peer_mdata = mpdu_desc_info.peer_meta_data;
1894 		QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) =
1895 			DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
1896 
1897 		/*
1898 		 * save msdu flags first, last and continuation msdu in
1899 		 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
1900 		 * length to nbuf->cb. This ensures the info required for
1901 		 * per pkt processing is always in the same cache line.
1902 		 * This helps in improving throughput for smaller pkt
1903 		 * sizes.
1904 		 */
1905 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
1906 			qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
1907 
1908 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
1909 			qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
1910 
1911 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
1912 			qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
1913 
1914 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
1915 			qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1);
1916 
1917 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
1918 			qdf_nbuf_set_da_valid(rx_desc->nbuf, 1);
1919 
1920 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
1921 			qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1);
1922 
1923 		qdf_nbuf_set_tid_val(rx_desc->nbuf,
1924 				     HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc));
1925 
1926 		QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len;
1927 
1928 		QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num;
1929 
1930 		DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
1931 
1932 		/*
1933 		 * if continuation bit is set then we have MSDU spread
1934 		 * across multiple buffers, let us not decrement quota
1935 		 * till we reap all buffers of that MSDU.
1936 		 */
1937 		if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)))
1938 			quota -= 1;
1939 
1940 		dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
1941 						&tail[rx_desc->pool_id],
1942 						rx_desc);
1943 
1944 		num_rx_bufs_reaped++;
1945 		if (dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped))
1946 			break;
1947 	}
1948 done:
1949 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
1950 
1951 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
1952 		/*
1953 		 * continue with next mac_id if no pkts were reaped
1954 		 * from that pool
1955 		 */
1956 		if (!rx_bufs_reaped[mac_id])
1957 			continue;
1958 
1959 		pdev = soc->pdev_list[mac_id];
1960 		dp_rxdma_srng = &pdev->rx_refill_buf_ring;
1961 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
1962 
1963 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
1964 					rx_desc_pool, rx_bufs_reaped[mac_id],
1965 					&head[mac_id], &tail[mac_id]);
1966 	}
1967 
1968 	dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]);
1969 	/* Peer can be NULL is case of LFR */
1970 	if (qdf_likely(peer))
1971 		vdev = NULL;
1972 
1973 	/*
1974 	 * BIG loop where each nbuf is dequeued from global queue,
1975 	 * processed and queued back on a per vdev basis. These nbufs
1976 	 * are sent to stack as and when we run out of nbufs
1977 	 * or a new nbuf dequeued from global queue has a different
1978 	 * vdev when compared to previous nbuf.
1979 	 */
1980 	nbuf = nbuf_head;
1981 	while (nbuf) {
1982 		next = nbuf->next;
1983 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
1984 		/* Get TID from struct cb->tid_val, save to tid */
1985 		if (qdf_nbuf_is_rx_chfrag_start(nbuf))
1986 			tid = qdf_nbuf_get_tid_val(nbuf);
1987 
1988 		peer_mdata =  QDF_NBUF_CB_RX_PEER_ID(nbuf);
1989 		peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
1990 		peer = dp_peer_find_by_id(soc, peer_id);
1991 
1992 		if (peer) {
1993 			QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
1994 			qdf_dp_trace_set_track(nbuf, QDF_RX);
1995 			QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
1996 			QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
1997 				QDF_NBUF_RX_PKT_DATA_TRACK;
1998 		}
1999 
2000 		rx_bufs_used++;
2001 
2002 		if (deliver_list_head && peer && (vdev != peer->vdev)) {
2003 			dp_rx_deliver_to_stack(vdev, peer, deliver_list_head,
2004 					deliver_list_tail);
2005 			deliver_list_head = NULL;
2006 			deliver_list_tail = NULL;
2007 		}
2008 
2009 		if (qdf_likely(peer)) {
2010 			vdev = peer->vdev;
2011 		} else {
2012 			nbuf->next = NULL;
2013 			dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2014 			nbuf = next;
2015 			continue;
2016 		}
2017 
2018 		if (qdf_unlikely(!vdev)) {
2019 			qdf_nbuf_free(nbuf);
2020 			nbuf = next;
2021 			DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
2022 			dp_peer_unref_del_find_by_id(peer);
2023 			continue;
2024 		}
2025 
2026 		rx_pdev = vdev->pdev;
2027 		DP_RX_TID_SAVE(nbuf, tid);
2028 		if (qdf_unlikely(rx_pdev->delay_stats_flag))
2029 			qdf_nbuf_set_timestamp(nbuf);
2030 
2031 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
2032 		tid_stats =
2033 			&rx_pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
2034 
2035 		/*
2036 		 * Check if DMA completed -- msdu_done is the last bit
2037 		 * to be written
2038 		 */
2039 		if (qdf_unlikely(!qdf_nbuf_is_raw_frame(nbuf) &&
2040 				 !hal_rx_attn_msdu_done_get(rx_tlv_hdr))) {
2041 			dp_err("MSDU DONE failure");
2042 			DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
2043 			hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
2044 					     QDF_TRACE_LEVEL_INFO);
2045 			tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
2046 			qdf_nbuf_free(nbuf);
2047 			qdf_assert(0);
2048 			nbuf = next;
2049 			continue;
2050 		}
2051 
2052 		DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
2053 		/*
2054 		 * First IF condition:
2055 		 * 802.11 Fragmented pkts are reinjected to REO
2056 		 * HW block as SG pkts and for these pkts we only
2057 		 * need to pull the RX TLVS header length.
2058 		 * Second IF condition:
2059 		 * The below condition happens when an MSDU is spread
2060 		 * across multiple buffers. This can happen in two cases
2061 		 * 1. The nbuf size is smaller then the received msdu.
2062 		 *    ex: we have set the nbuf size to 2048 during
2063 		 *        nbuf_alloc. but we received an msdu which is
2064 		 *        2304 bytes in size then this msdu is spread
2065 		 *        across 2 nbufs.
2066 		 *
2067 		 * 2. AMSDUs when RAW mode is enabled.
2068 		 *    ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
2069 		 *        across 1st nbuf and 2nd nbuf and last MSDU is
2070 		 *        spread across 2nd nbuf and 3rd nbuf.
2071 		 *
2072 		 * for these scenarios let us create a skb frag_list and
2073 		 * append these buffers till the last MSDU of the AMSDU
2074 		 * Third condition:
2075 		 * This is the most likely case, we receive 802.3 pkts
2076 		 * decapsulated by HW, here we need to set the pkt length.
2077 		 */
2078 		if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
2079 			bool is_mcbc, is_sa_vld, is_da_vld;
2080 
2081 			is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
2082 								 rx_tlv_hdr);
2083 			is_sa_vld =
2084 				hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
2085 								rx_tlv_hdr);
2086 			is_da_vld =
2087 				hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
2088 								rx_tlv_hdr);
2089 
2090 			qdf_nbuf_set_da_mcbc(nbuf, is_mcbc);
2091 			qdf_nbuf_set_da_valid(nbuf, is_da_vld);
2092 			qdf_nbuf_set_sa_valid(nbuf, is_sa_vld);
2093 
2094 			qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
2095 		} else if (qdf_nbuf_is_raw_frame(nbuf)) {
2096 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
2097 			nbuf = dp_rx_sg_create(nbuf, rx_tlv_hdr);
2098 
2099 			DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
2100 			DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len);
2101 
2102 			next = nbuf->next;
2103 		} else {
2104 			l2_hdr_offset =
2105 				hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
2106 								   rx_tlv_hdr);
2107 
2108 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
2109 			pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
2110 
2111 			qdf_nbuf_set_pktlen(nbuf, pkt_len);
2112 			qdf_nbuf_pull_head(nbuf,
2113 					   RX_PKT_TLVS_LEN +
2114 					   l2_hdr_offset);
2115 		}
2116 
2117 		/*
2118 		 * process frame for mulitpass phrase processing
2119 		 */
2120 		if (qdf_unlikely(vdev->multipass_en)) {
2121 			dp_rx_multipass_process(peer, nbuf, tid);
2122 		}
2123 
2124 		if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
2125 			QDF_TRACE(QDF_MODULE_ID_DP,
2126 					QDF_TRACE_LEVEL_ERROR,
2127 					FL("Policy Check Drop pkt"));
2128 			tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
2129 			/* Drop & free packet */
2130 			qdf_nbuf_free(nbuf);
2131 			/* Statistics */
2132 			nbuf = next;
2133 			dp_peer_unref_del_find_by_id(peer);
2134 			continue;
2135 		}
2136 
2137 		if (qdf_unlikely(peer && (peer->nawds_enabled) &&
2138 				 (qdf_nbuf_is_da_mcbc(nbuf)) &&
2139 				 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc,
2140 								rx_tlv_hdr) ==
2141 				  false))) {
2142 			tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
2143 			DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
2144 			qdf_nbuf_free(nbuf);
2145 			nbuf = next;
2146 			dp_peer_unref_del_find_by_id(peer);
2147 			continue;
2148 		}
2149 
2150 		if (soc->process_rx_status)
2151 			dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
2152 
2153 		/* Update the protocol tag in SKB based on CCE metadata */
2154 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
2155 					  reo_ring_num, false, true);
2156 
2157 		/* Update the flow tag in SKB based on FSE metadata */
2158 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
2159 
2160 		dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer,
2161 					ring_id, tid_stats);
2162 
2163 		if (qdf_unlikely(vdev->mesh_vdev)) {
2164 			if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
2165 					== QDF_STATUS_SUCCESS) {
2166 				QDF_TRACE(QDF_MODULE_ID_DP,
2167 						QDF_TRACE_LEVEL_INFO_MED,
2168 						FL("mesh pkt filtered"));
2169 				tid_stats->fail_cnt[MESH_FILTER_DROP]++;
2170 				DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
2171 					     1);
2172 
2173 				qdf_nbuf_free(nbuf);
2174 				nbuf = next;
2175 				dp_peer_unref_del_find_by_id(peer);
2176 				continue;
2177 			}
2178 			dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
2179 		}
2180 
2181 		if (qdf_likely(vdev->rx_decap_type ==
2182 			       htt_cmn_pkt_type_ethernet) &&
2183 		    qdf_likely(!vdev->mesh_vdev)) {
2184 			/* WDS Destination Address Learning */
2185 			dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf);
2186 
2187 			/* Due to HW issue, sometimes we see that the sa_idx
2188 			 * and da_idx are invalid with sa_valid and da_valid
2189 			 * bits set
2190 			 *
2191 			 * in this case we also see that value of
2192 			 * sa_sw_peer_id is set as 0
2193 			 *
2194 			 * Drop the packet if sa_idx and da_idx OOB or
2195 			 * sa_sw_peerid is 0
2196 			 */
2197 			if (!is_sa_da_idx_valid(soc, rx_tlv_hdr, nbuf)) {
2198 				qdf_nbuf_free(nbuf);
2199 				nbuf = next;
2200 				DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
2201 				dp_peer_unref_del_find_by_id(peer);
2202 				continue;
2203 			}
2204 			/* WDS Source Port Learning */
2205 			if (qdf_likely(vdev->wds_enabled))
2206 				dp_rx_wds_srcport_learn(soc, rx_tlv_hdr,
2207 							peer, nbuf);
2208 
2209 			/* Intrabss-fwd */
2210 			if (dp_rx_check_ap_bridge(vdev))
2211 				if (dp_rx_intrabss_fwd(soc,
2212 							peer,
2213 							rx_tlv_hdr,
2214 							nbuf)) {
2215 					nbuf = next;
2216 					dp_peer_unref_del_find_by_id(peer);
2217 					tid_stats->intrabss_cnt++;
2218 					continue; /* Get next desc */
2219 				}
2220 		}
2221 
2222 		dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt);
2223 
2224 		DP_RX_LIST_APPEND(deliver_list_head,
2225 				  deliver_list_tail,
2226 				  nbuf);
2227 		DP_STATS_INC_PKT(peer, rx.to_stack, 1,
2228 				 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
2229 
2230 		tid_stats->delivered_to_stack++;
2231 		nbuf = next;
2232 		dp_peer_unref_del_find_by_id(peer);
2233 	}
2234 
2235 	if (deliver_list_head && peer)
2236 		dp_rx_deliver_to_stack(vdev, peer, deliver_list_head,
2237 				       deliver_list_tail);
2238 
2239 	if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
2240 		if (quota) {
2241 			num_pending =
2242 				dp_rx_srng_get_num_pending(hal_soc,
2243 							   hal_ring_hdl,
2244 							   num_entries,
2245 							   &near_full);
2246 			if (num_pending) {
2247 				DP_STATS_INC(soc, rx.hp_oos2, 1);
2248 
2249 				if (!hif_exec_should_yield(scn, intr_id))
2250 					goto more_data;
2251 
2252 				if (qdf_unlikely(near_full)) {
2253 					DP_STATS_INC(soc, rx.near_full, 1);
2254 					goto more_data;
2255 				}
2256 			}
2257 		}
2258 
2259 		if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) {
2260 			vdev->osif_gro_flush(vdev->osif_vdev,
2261 					     reo_ring_num);
2262 		}
2263 	}
2264 
2265 	/* Update histogram statistics by looping through pdev's */
2266 	DP_RX_HIST_STATS_PER_PDEV();
2267 
2268 	return rx_bufs_used; /* Assume no scale factor for now */
2269 }
2270 
2271 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
2272 {
2273 	QDF_STATUS ret;
2274 
2275 	if (vdev->osif_rx_flush) {
2276 		ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id);
2277 		if (!ret) {
2278 			dp_err("Failed to flush rx pkts for vdev %d\n",
2279 			       vdev->vdev_id);
2280 			return ret;
2281 		}
2282 	}
2283 
2284 	return QDF_STATUS_SUCCESS;
2285 }
2286 
2287 /**
2288  * dp_rx_pdev_detach() - detach dp rx
2289  * @pdev: core txrx pdev context
2290  *
2291  * This function will detach DP RX into main device context
2292  * will free DP Rx resources.
2293  *
2294  * Return: void
2295  */
2296 void
2297 dp_rx_pdev_detach(struct dp_pdev *pdev)
2298 {
2299 	uint8_t pdev_id = pdev->pdev_id;
2300 	struct dp_soc *soc = pdev->soc;
2301 	struct rx_desc_pool *rx_desc_pool;
2302 
2303 	rx_desc_pool = &soc->rx_desc_buf[pdev_id];
2304 
2305 	if (rx_desc_pool->pool_size != 0) {
2306 		if (!dp_is_soc_reinit(soc))
2307 			dp_rx_desc_nbuf_and_pool_free(soc, pdev_id,
2308 						      rx_desc_pool);
2309 		else
2310 			dp_rx_desc_nbuf_free(soc, rx_desc_pool);
2311 	}
2312 
2313 	return;
2314 }
2315 
2316 static QDF_STATUS
2317 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, qdf_nbuf_t *nbuf,
2318 			   struct dp_pdev *dp_pdev)
2319 {
2320 	qdf_dma_addr_t paddr;
2321 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
2322 
2323 	*nbuf = qdf_nbuf_alloc(dp_soc->osdev, RX_BUFFER_SIZE,
2324 			      RX_BUFFER_RESERVATION, RX_BUFFER_ALIGNMENT,
2325 			      FALSE);
2326 	if (!(*nbuf)) {
2327 		dp_err("nbuf alloc failed");
2328 		DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
2329 		return ret;
2330 	}
2331 
2332 	ret = qdf_nbuf_map_single(dp_soc->osdev, *nbuf,
2333 				  QDF_DMA_FROM_DEVICE);
2334 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
2335 		qdf_nbuf_free(*nbuf);
2336 		dp_err("nbuf map failed");
2337 		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
2338 		return ret;
2339 	}
2340 
2341 	paddr = qdf_nbuf_get_frag_paddr(*nbuf, 0);
2342 
2343 	ret = check_x86_paddr(dp_soc, nbuf, &paddr, dp_pdev);
2344 	if (ret == QDF_STATUS_E_FAILURE) {
2345 		qdf_nbuf_unmap_single(dp_soc->osdev, *nbuf,
2346 				      QDF_DMA_FROM_DEVICE);
2347 		qdf_nbuf_free(*nbuf);
2348 		dp_err("nbuf check x86 failed");
2349 		DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
2350 		return ret;
2351 	}
2352 
2353 	return QDF_STATUS_SUCCESS;
2354 }
2355 
2356 QDF_STATUS
2357 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
2358 			  struct dp_srng *dp_rxdma_srng,
2359 			  struct rx_desc_pool *rx_desc_pool,
2360 			  uint32_t num_req_buffers)
2361 {
2362 	struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id);
2363 	hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng;
2364 	union dp_rx_desc_list_elem_t *next;
2365 	void *rxdma_ring_entry;
2366 	qdf_dma_addr_t paddr;
2367 	qdf_nbuf_t *rx_nbuf_arr;
2368 	uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0;
2369 	uint32_t buffer_index, nbuf_ptrs_per_page;
2370 	qdf_nbuf_t nbuf;
2371 	QDF_STATUS ret;
2372 	int page_idx, total_pages;
2373 	union dp_rx_desc_list_elem_t *desc_list = NULL;
2374 	union dp_rx_desc_list_elem_t *tail = NULL;
2375 
2376 	if (qdf_unlikely(!rxdma_srng)) {
2377 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
2378 		return QDF_STATUS_E_FAILURE;
2379 	}
2380 
2381 	dp_debug("requested %u RX buffers for driver attach", num_req_buffers);
2382 
2383 	nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool,
2384 					    num_req_buffers, &desc_list, &tail);
2385 	if (!nr_descs) {
2386 		dp_err("no free rx_descs in freelist");
2387 		DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
2388 		return QDF_STATUS_E_NOMEM;
2389 	}
2390 
2391 	dp_debug("got %u RX descs for driver attach", nr_descs);
2392 
2393 	/*
2394 	 * Try to allocate pointers to the nbuf one page at a time.
2395 	 * Take pointers that can fit in one page of memory and
2396 	 * iterate through the total descriptors that need to be
2397 	 * allocated in order of pages. Reuse the pointers that
2398 	 * have been allocated to fit in one page across each
2399 	 * iteration to index into the nbuf.
2400 	 */
2401 	total_pages = (nr_descs * sizeof(*rx_nbuf_arr)) / PAGE_SIZE;
2402 
2403 	/*
2404 	 * Add an extra page to store the remainder if any
2405 	 */
2406 	if ((nr_descs * sizeof(*rx_nbuf_arr)) % PAGE_SIZE)
2407 		total_pages++;
2408 	rx_nbuf_arr = qdf_mem_malloc(PAGE_SIZE);
2409 	if (!rx_nbuf_arr) {
2410 		dp_err("failed to allocate nbuf array");
2411 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
2412 		QDF_BUG(0);
2413 		return QDF_STATUS_E_NOMEM;
2414 	}
2415 	nbuf_ptrs_per_page = PAGE_SIZE / sizeof(*rx_nbuf_arr);
2416 
2417 	for (page_idx = 0; page_idx < total_pages; page_idx++) {
2418 		qdf_mem_zero(rx_nbuf_arr, PAGE_SIZE);
2419 
2420 		for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) {
2421 			/*
2422 			 * The last page of buffer pointers may not be required
2423 			 * completely based on the number of descriptors. Below
2424 			 * check will ensure we are allocating only the
2425 			 * required number of descriptors.
2426 			 */
2427 			if (nr_nbuf_total >= nr_descs)
2428 				break;
2429 			ret = dp_pdev_nbuf_alloc_and_map(dp_soc,
2430 							 &rx_nbuf_arr[nr_nbuf],
2431 							 dp_pdev);
2432 			if (QDF_IS_STATUS_ERROR(ret))
2433 				break;
2434 
2435 			nr_nbuf_total++;
2436 		}
2437 
2438 		hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
2439 
2440 		for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) {
2441 			rxdma_ring_entry =
2442 				hal_srng_src_get_next(dp_soc->hal_soc,
2443 						      rxdma_srng);
2444 			qdf_assert_always(rxdma_ring_entry);
2445 
2446 			next = desc_list->next;
2447 			nbuf = rx_nbuf_arr[buffer_index];
2448 			paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
2449 
2450 			dp_rx_desc_prep(&desc_list->rx_desc, nbuf);
2451 			desc_list->rx_desc.in_use = 1;
2452 
2453 			hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
2454 						     desc_list->rx_desc.cookie,
2455 						     rx_desc_pool->owner);
2456 
2457 			dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, nbuf, true);
2458 
2459 			desc_list = next;
2460 		}
2461 
2462 		hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
2463 	}
2464 
2465 	dp_info("filled %u RX buffers for driver attach", nr_nbuf_total);
2466 	qdf_mem_free(rx_nbuf_arr);
2467 
2468 	if (!nr_nbuf_total) {
2469 		dp_err("No nbuf's allocated");
2470 		QDF_BUG(0);
2471 		return QDF_STATUS_E_RESOURCES;
2472 	}
2473 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf,
2474 			 RX_BUFFER_SIZE * nr_nbuf_total);
2475 
2476 	return QDF_STATUS_SUCCESS;
2477 }
2478 
2479 /**
2480  * dp_rx_attach() - attach DP RX
2481  * @pdev: core txrx pdev context
2482  *
2483  * This function will attach a DP RX instance into the main
2484  * device (SOC) context. Will allocate dp rx resource and
2485  * initialize resources.
2486  *
2487  * Return: QDF_STATUS_SUCCESS: success
2488  *         QDF_STATUS_E_RESOURCES: Error return
2489  */
2490 QDF_STATUS
2491 dp_rx_pdev_attach(struct dp_pdev *pdev)
2492 {
2493 	uint8_t pdev_id = pdev->pdev_id;
2494 	struct dp_soc *soc = pdev->soc;
2495 	uint32_t rxdma_entries;
2496 	uint32_t rx_sw_desc_weight;
2497 	struct dp_srng *dp_rxdma_srng;
2498 	struct rx_desc_pool *rx_desc_pool;
2499 	QDF_STATUS ret_val;
2500 
2501 
2502 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
2503 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2504 			  "nss-wifi<4> skip Rx refil %d", pdev_id);
2505 		return QDF_STATUS_SUCCESS;
2506 	}
2507 
2508 	pdev = soc->pdev_list[pdev_id];
2509 	dp_rxdma_srng = &pdev->rx_refill_buf_ring;
2510 	rxdma_entries = dp_rxdma_srng->num_entries;
2511 
2512 	soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
2513 
2514 	rx_desc_pool = &soc->rx_desc_buf[pdev_id];
2515 	rx_sw_desc_weight = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx);
2516 
2517 	dp_rx_desc_pool_alloc(soc, pdev_id,
2518 			      rx_sw_desc_weight * rxdma_entries,
2519 			      rx_desc_pool);
2520 
2521 	rx_desc_pool->owner = DP_WBM2SW_RBM;
2522 	/* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */
2523 
2524 	ret_val = dp_rx_fst_attach(soc, pdev);
2525 	if ((ret_val != QDF_STATUS_SUCCESS) &&
2526 	    (ret_val != QDF_STATUS_E_NOSUPPORT)) {
2527 		QDF_TRACE(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_ERROR,
2528 			  "RX Flow Search Table attach failed: pdev %d err %d",
2529 			  pdev_id, ret_val);
2530 		return ret_val;
2531 	}
2532 
2533 	return dp_pdev_rx_buffers_attach(soc, pdev_id, dp_rxdma_srng,
2534 					 rx_desc_pool, rxdma_entries - 1);
2535 }
2536 
2537 /*
2538  * dp_rx_nbuf_prepare() - prepare RX nbuf
2539  * @soc: core txrx main context
2540  * @pdev: core txrx pdev context
2541  *
2542  * This function alloc & map nbuf for RX dma usage, retry it if failed
2543  * until retry times reaches max threshold or succeeded.
2544  *
2545  * Return: qdf_nbuf_t pointer if succeeded, NULL if failed.
2546  */
2547 qdf_nbuf_t
2548 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev)
2549 {
2550 	uint8_t *buf;
2551 	int32_t nbuf_retry_count;
2552 	QDF_STATUS ret;
2553 	qdf_nbuf_t nbuf = NULL;
2554 
2555 	for (nbuf_retry_count = 0; nbuf_retry_count <
2556 		QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD;
2557 			nbuf_retry_count++) {
2558 		/* Allocate a new skb */
2559 		nbuf = qdf_nbuf_alloc(soc->osdev,
2560 					RX_BUFFER_SIZE,
2561 					RX_BUFFER_RESERVATION,
2562 					RX_BUFFER_ALIGNMENT,
2563 					FALSE);
2564 
2565 		if (!nbuf) {
2566 			DP_STATS_INC(pdev,
2567 				replenish.nbuf_alloc_fail, 1);
2568 			continue;
2569 		}
2570 
2571 		buf = qdf_nbuf_data(nbuf);
2572 
2573 		memset(buf, 0, RX_BUFFER_SIZE);
2574 
2575 		ret = qdf_nbuf_map_single(soc->osdev, nbuf,
2576 				    QDF_DMA_FROM_DEVICE);
2577 
2578 		/* nbuf map failed */
2579 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
2580 			qdf_nbuf_free(nbuf);
2581 			DP_STATS_INC(pdev, replenish.map_err, 1);
2582 			continue;
2583 		}
2584 		/* qdf_nbuf alloc and map succeeded */
2585 		break;
2586 	}
2587 
2588 	/* qdf_nbuf still alloc or map failed */
2589 	if (qdf_unlikely(nbuf_retry_count >=
2590 			QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD))
2591 		return NULL;
2592 
2593 	return nbuf;
2594 }
2595