xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.c (revision 8967ce71a84a76351f8ebf239925d47f7c692f7e)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_rx.h"
22 #include "dp_peer.h"
23 #include "hal_rx.h"
24 #include "hal_api.h"
25 #include "qdf_nbuf.h"
26 #ifdef MESH_MODE_SUPPORT
27 #include "if_meta_hdr.h"
28 #endif
29 #include "dp_internal.h"
30 #include "dp_rx_mon.h"
31 #include "dp_ipa.h"
32 #ifdef FEATURE_WDS
33 #include "dp_txrx_wds.h"
34 #endif
35 
36 #ifdef ATH_RX_PRI_SAVE
37 #define DP_RX_TID_SAVE(_nbuf, _tid) \
38 	(qdf_nbuf_set_priority(_nbuf, _tid))
39 #else
40 #define DP_RX_TID_SAVE(_nbuf, _tid)
41 #endif
42 
43 #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING
44 static inline
45 bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
46 {
47 	if (ta_peer->vdev->opmode == wlan_op_mode_ndi &&
48 	    qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) {
49 		DP_STATS_INC(ta_peer, rx.intra_bss.mdns_no_fwd, 1);
50 		return false;
51 	}
52 		return true;
53 }
54 #else
55 static inline
56 bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
57 {
58 	return true;
59 }
60 #endif
61 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
62 {
63 	return vdev->ap_bridge_enabled;
64 }
65 
66 #ifdef DUP_RX_DESC_WAR
67 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
68 				hal_ring_handle_t hal_ring,
69 				hal_ring_desc_t ring_desc,
70 				struct dp_rx_desc *rx_desc)
71 {
72 	void *hal_soc = soc->hal_soc;
73 
74 	hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc);
75 	dp_rx_desc_dump(rx_desc);
76 }
77 #else
78 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
79 				hal_ring_handle_t hal_ring_hdl,
80 				hal_ring_desc_t ring_desc,
81 				struct dp_rx_desc *rx_desc)
82 {
83 	hal_soc_handle_t hal_soc = soc->hal_soc;
84 
85 	dp_rx_desc_dump(rx_desc);
86 	hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc);
87 	hal_srng_dump_ring(hal_soc, hal_ring_hdl);
88 	qdf_assert_always(0);
89 }
90 #endif
91 
92 /*
93  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
94  *			       called during dp rx initialization
95  *			       and at the end of dp_rx_process.
96  *
97  * @soc: core txrx main context
98  * @mac_id: mac_id which is one of 3 mac_ids
99  * @dp_rxdma_srng: dp rxdma circular ring
100  * @rx_desc_pool: Pointer to free Rx descriptor pool
101  * @num_req_buffers: number of buffer to be replenished
102  * @desc_list: list of descs if called from dp_rx_process
103  *	       or NULL during dp rx initialization or out of buffer
104  *	       interrupt.
105  * @tail: tail of descs list
106  * Return: return success or failure
107  */
108 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
109 				struct dp_srng *dp_rxdma_srng,
110 				struct rx_desc_pool *rx_desc_pool,
111 				uint32_t num_req_buffers,
112 				union dp_rx_desc_list_elem_t **desc_list,
113 				union dp_rx_desc_list_elem_t **tail)
114 {
115 	uint32_t num_alloc_desc;
116 	uint16_t num_desc_to_free = 0;
117 	struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id);
118 	uint32_t num_entries_avail;
119 	uint32_t count;
120 	int sync_hw_ptr = 1;
121 	qdf_dma_addr_t paddr;
122 	qdf_nbuf_t rx_netbuf;
123 	void *rxdma_ring_entry;
124 	union dp_rx_desc_list_elem_t *next;
125 	QDF_STATUS ret;
126 
127 	void *rxdma_srng;
128 
129 	rxdma_srng = dp_rxdma_srng->hal_srng;
130 
131 	if (!rxdma_srng) {
132 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
133 				  "rxdma srng not initialized");
134 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
135 		return QDF_STATUS_E_FAILURE;
136 	}
137 
138 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
139 		"requested %d buffers for replenish", num_req_buffers);
140 
141 	hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
142 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
143 						   rxdma_srng,
144 						   sync_hw_ptr);
145 
146 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
147 		"no of available entries in rxdma ring: %d",
148 		num_entries_avail);
149 
150 	if (!(*desc_list) && (num_entries_avail >
151 		((dp_rxdma_srng->num_entries * 3) / 4))) {
152 		num_req_buffers = num_entries_avail;
153 	} else if (num_entries_avail < num_req_buffers) {
154 		num_desc_to_free = num_req_buffers - num_entries_avail;
155 		num_req_buffers = num_entries_avail;
156 	}
157 
158 	if (qdf_unlikely(!num_req_buffers)) {
159 		num_desc_to_free = num_req_buffers;
160 		hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
161 		goto free_descs;
162 	}
163 
164 	/*
165 	 * if desc_list is NULL, allocate the descs from freelist
166 	 */
167 	if (!(*desc_list)) {
168 		num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
169 							  rx_desc_pool,
170 							  num_req_buffers,
171 							  desc_list,
172 							  tail);
173 
174 		if (!num_alloc_desc) {
175 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
176 				"no free rx_descs in freelist");
177 			DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
178 					num_req_buffers);
179 			hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
180 			return QDF_STATUS_E_NOMEM;
181 		}
182 
183 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
184 			"%d rx desc allocated", num_alloc_desc);
185 		num_req_buffers = num_alloc_desc;
186 	}
187 
188 
189 	count = 0;
190 
191 	while (count < num_req_buffers) {
192 		rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
193 					RX_BUFFER_SIZE,
194 					RX_BUFFER_RESERVATION,
195 					RX_BUFFER_ALIGNMENT,
196 					FALSE);
197 
198 		if (qdf_unlikely(!rx_netbuf)) {
199 			DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
200 			break;
201 		}
202 
203 		ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
204 					  QDF_DMA_FROM_DEVICE);
205 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
206 			qdf_nbuf_free(rx_netbuf);
207 			DP_STATS_INC(dp_pdev, replenish.map_err, 1);
208 			continue;
209 		}
210 
211 		paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
212 
213 		dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, rx_netbuf, true);
214 		/*
215 		 * check if the physical address of nbuf->data is
216 		 * less then 0x50000000 then free the nbuf and try
217 		 * allocating new nbuf. We can try for 100 times.
218 		 * this is a temp WAR till we fix it properly.
219 		 */
220 		ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, dp_pdev);
221 		if (ret == QDF_STATUS_E_FAILURE) {
222 			DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
223 			break;
224 		}
225 
226 		count++;
227 
228 		rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
229 							 rxdma_srng);
230 		qdf_assert_always(rxdma_ring_entry);
231 
232 		next = (*desc_list)->next;
233 
234 		dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf);
235 
236 		/* rx_desc.in_use should be zero at this time*/
237 		qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
238 
239 		(*desc_list)->rx_desc.in_use = 1;
240 
241 		dp_verbose_debug("rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d",
242 				 rx_netbuf, qdf_nbuf_data(rx_netbuf),
243 				 (unsigned long long)paddr,
244 				 (*desc_list)->rx_desc.cookie);
245 
246 		hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
247 						(*desc_list)->rx_desc.cookie,
248 						rx_desc_pool->owner);
249 
250 		*desc_list = next;
251 
252 	}
253 
254 	hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
255 
256 	dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
257 			 count, num_desc_to_free);
258 
259 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count,
260 			 (RX_BUFFER_SIZE * count));
261 
262 free_descs:
263 	DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
264 	/*
265 	 * add any available free desc back to the free list
266 	 */
267 	if (*desc_list)
268 		dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
269 			mac_id, rx_desc_pool);
270 
271 	return QDF_STATUS_SUCCESS;
272 }
273 
274 /*
275  * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
276  *				pkts to RAW mode simulation to
277  *				decapsulate the pkt.
278  *
279  * @vdev: vdev on which RAW mode is enabled
280  * @nbuf_list: list of RAW pkts to process
281  * @peer: peer object from which the pkt is rx
282  *
283  * Return: void
284  */
285 void
286 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
287 					struct dp_peer *peer)
288 {
289 	qdf_nbuf_t deliver_list_head = NULL;
290 	qdf_nbuf_t deliver_list_tail = NULL;
291 	qdf_nbuf_t nbuf;
292 
293 	nbuf = nbuf_list;
294 	while (nbuf) {
295 		qdf_nbuf_t next = qdf_nbuf_next(nbuf);
296 
297 		DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
298 
299 		DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
300 		DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf));
301 		/*
302 		 * reset the chfrag_start and chfrag_end bits in nbuf cb
303 		 * as this is a non-amsdu pkt and RAW mode simulation expects
304 		 * these bit s to be 0 for non-amsdu pkt.
305 		 */
306 		if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
307 			 qdf_nbuf_is_rx_chfrag_end(nbuf)) {
308 			qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
309 			qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
310 		}
311 
312 		nbuf = next;
313 	}
314 
315 	vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
316 				 &deliver_list_tail, (struct cdp_peer*) peer);
317 
318 	vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
319 }
320 
321 
322 #ifdef DP_LFR
323 /*
324  * In case of LFR, data of a new peer might be sent up
325  * even before peer is added.
326  */
327 static inline struct dp_vdev *
328 dp_get_vdev_from_peer(struct dp_soc *soc,
329 			uint16_t peer_id,
330 			struct dp_peer *peer,
331 			struct hal_rx_mpdu_desc_info mpdu_desc_info)
332 {
333 	struct dp_vdev *vdev;
334 	uint8_t vdev_id;
335 
336 	if (unlikely(!peer)) {
337 		if (peer_id != HTT_INVALID_PEER) {
338 			vdev_id = DP_PEER_METADATA_VDEV_ID_GET(
339 					mpdu_desc_info.peer_meta_data);
340 			QDF_TRACE(QDF_MODULE_ID_DP,
341 				QDF_TRACE_LEVEL_DEBUG,
342 				FL("PeerID %d not found use vdevID %d"),
343 				peer_id, vdev_id);
344 			vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
345 								  vdev_id);
346 		} else {
347 			QDF_TRACE(QDF_MODULE_ID_DP,
348 				QDF_TRACE_LEVEL_DEBUG,
349 				FL("Invalid PeerID %d"),
350 				peer_id);
351 			return NULL;
352 		}
353 	} else {
354 		vdev = peer->vdev;
355 	}
356 	return vdev;
357 }
358 #else
359 static inline struct dp_vdev *
360 dp_get_vdev_from_peer(struct dp_soc *soc,
361 			uint16_t peer_id,
362 			struct dp_peer *peer,
363 			struct hal_rx_mpdu_desc_info mpdu_desc_info)
364 {
365 	if (unlikely(!peer)) {
366 		QDF_TRACE(QDF_MODULE_ID_DP,
367 			QDF_TRACE_LEVEL_DEBUG,
368 			FL("Peer not found for peerID %d"),
369 			peer_id);
370 		return NULL;
371 	} else {
372 		return peer->vdev;
373 	}
374 }
375 #endif
376 
377 #ifndef FEATURE_WDS
378 static void
379 dp_rx_da_learn(struct dp_soc *soc,
380 	       uint8_t *rx_tlv_hdr,
381 	       struct dp_peer *ta_peer,
382 	       qdf_nbuf_t nbuf)
383 {
384 }
385 #endif
386 /*
387  * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
388  *
389  * @soc: core txrx main context
390  * @ta_peer	: source peer entry
391  * @rx_tlv_hdr	: start address of rx tlvs
392  * @nbuf	: nbuf that has to be intrabss forwarded
393  *
394  * Return: bool: true if it is forwarded else false
395  */
396 static bool
397 dp_rx_intrabss_fwd(struct dp_soc *soc,
398 			struct dp_peer *ta_peer,
399 			uint8_t *rx_tlv_hdr,
400 			qdf_nbuf_t nbuf)
401 {
402 	uint16_t da_idx;
403 	uint16_t len;
404 	uint8_t is_frag;
405 	struct dp_peer *da_peer;
406 	struct dp_ast_entry *ast_entry;
407 	qdf_nbuf_t nbuf_copy;
408 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
409 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
410 	struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
411 					tid_stats.tid_rx_stats[ring_id][tid];
412 
413 	/* check if the destination peer is available in peer table
414 	 * and also check if the source peer and destination peer
415 	 * belong to the same vap and destination peer is not bss peer.
416 	 */
417 
418 	if ((qdf_nbuf_is_da_valid(nbuf) && !qdf_nbuf_is_da_mcbc(nbuf))) {
419 		da_idx = hal_rx_msdu_end_da_idx_get(soc->hal_soc, rx_tlv_hdr);
420 
421 		ast_entry = soc->ast_table[da_idx];
422 		if (!ast_entry)
423 			return false;
424 
425 		if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) {
426 			ast_entry->is_active = TRUE;
427 			return false;
428 		}
429 
430 		da_peer = ast_entry->peer;
431 
432 		if (!da_peer)
433 			return false;
434 		/* TA peer cannot be same as peer(DA) on which AST is present
435 		 * this indicates a change in topology and that AST entries
436 		 * are yet to be updated.
437 		 */
438 		if (da_peer == ta_peer)
439 			return false;
440 
441 		if (da_peer->vdev == ta_peer->vdev && !da_peer->bss_peer) {
442 			len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
443 			is_frag = qdf_nbuf_is_frag(nbuf);
444 			memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
445 
446 			/* linearize the nbuf just before we send to
447 			 * dp_tx_send()
448 			 */
449 			if (qdf_unlikely(is_frag)) {
450 				if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
451 					return false;
452 
453 				nbuf = qdf_nbuf_unshare(nbuf);
454 				if (!nbuf) {
455 					DP_STATS_INC_PKT(ta_peer,
456 							 rx.intra_bss.fail,
457 							 1,
458 							 len);
459 					/* return true even though the pkt is
460 					 * not forwarded. Basically skb_unshare
461 					 * failed and we want to continue with
462 					 * next nbuf.
463 					 */
464 					tid_stats->fail_cnt[INTRABSS_DROP]++;
465 					return true;
466 				}
467 			}
468 
469 			if (!dp_tx_send((struct cdp_soc_t *)soc,
470 					ta_peer->vdev->vdev_id, nbuf)) {
471 				DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
472 						 len);
473 				return true;
474 			} else {
475 				DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
476 						len);
477 				tid_stats->fail_cnt[INTRABSS_DROP]++;
478 				return false;
479 			}
480 		}
481 	}
482 	/* if it is a broadcast pkt (eg: ARP) and it is not its own
483 	 * source, then clone the pkt and send the cloned pkt for
484 	 * intra BSS forwarding and original pkt up the network stack
485 	 * Note: how do we handle multicast pkts. do we forward
486 	 * all multicast pkts as is or let a higher layer module
487 	 * like igmpsnoop decide whether to forward or not with
488 	 * Mcast enhancement.
489 	 */
490 	else if (qdf_unlikely((qdf_nbuf_is_da_mcbc(nbuf) &&
491 			       !ta_peer->bss_peer))) {
492 		if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
493 			goto end;
494 
495 		nbuf_copy = qdf_nbuf_copy(nbuf);
496 		if (!nbuf_copy)
497 			goto end;
498 
499 		len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
500 		memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
501 
502 		/* Set cb->ftype to intrabss FWD */
503 		qdf_nbuf_set_tx_ftype(nbuf_copy, CB_FTYPE_INTRABSS_FWD);
504 		if (dp_tx_send((struct cdp_soc_t *)soc,
505 			       ta_peer->vdev->vdev_id, nbuf_copy)) {
506 			DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
507 			tid_stats->fail_cnt[INTRABSS_DROP]++;
508 			qdf_nbuf_free(nbuf_copy);
509 		} else {
510 			DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
511 			tid_stats->intrabss_cnt++;
512 		}
513 	}
514 
515 end:
516 	/* return false as we have to still send the original pkt
517 	 * up the stack
518 	 */
519 	return false;
520 }
521 
522 #ifdef MESH_MODE_SUPPORT
523 
524 /**
525  * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
526  *
527  * @vdev: DP Virtual device handle
528  * @nbuf: Buffer pointer
529  * @rx_tlv_hdr: start of rx tlv header
530  * @peer: pointer to peer
531  *
532  * This function allocated memory for mesh receive stats and fill the
533  * required stats. Stores the memory address in skb cb.
534  *
535  * Return: void
536  */
537 
538 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
539 				uint8_t *rx_tlv_hdr, struct dp_peer *peer)
540 {
541 	struct mesh_recv_hdr_s *rx_info = NULL;
542 	uint32_t pkt_type;
543 	uint32_t nss;
544 	uint32_t rate_mcs;
545 	uint32_t bw;
546 
547 	/* fill recv mesh stats */
548 	rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
549 
550 	/* upper layers are resposible to free this memory */
551 
552 	if (!rx_info) {
553 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
554 			"Memory allocation failed for mesh rx stats");
555 		DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
556 		return;
557 	}
558 
559 	rx_info->rs_flags = MESH_RXHDR_VER1;
560 	if (qdf_nbuf_is_rx_chfrag_start(nbuf))
561 		rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
562 
563 	if (qdf_nbuf_is_rx_chfrag_end(nbuf))
564 		rx_info->rs_flags |= MESH_RX_LAST_MSDU;
565 
566 	if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) {
567 		rx_info->rs_flags |= MESH_RX_DECRYPTED;
568 		rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr);
569 		if (vdev->osif_get_key)
570 			vdev->osif_get_key(vdev->osif_vdev,
571 					&rx_info->rs_decryptkey[0],
572 					&peer->mac_addr.raw[0],
573 					rx_info->rs_keyix);
574 	}
575 
576 	rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr);
577 	rx_info->rs_channel = hal_rx_msdu_start_get_freq(rx_tlv_hdr);
578 	pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
579 	rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
580 	bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
581 	nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr);
582 	rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
583 				(bw << 24);
584 
585 	qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
586 
587 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
588 		FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"),
589 						rx_info->rs_flags,
590 						rx_info->rs_rssi,
591 						rx_info->rs_channel,
592 						rx_info->rs_ratephy1,
593 						rx_info->rs_keyix);
594 
595 }
596 
597 /**
598  * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
599  *
600  * @vdev: DP Virtual device handle
601  * @nbuf: Buffer pointer
602  * @rx_tlv_hdr: start of rx tlv header
603  *
604  * This checks if the received packet is matching any filter out
605  * catogery and and drop the packet if it matches.
606  *
607  * Return: status(0 indicates drop, 1 indicate to no drop)
608  */
609 
610 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
611 					uint8_t *rx_tlv_hdr)
612 {
613 	union dp_align_mac_addr mac_addr;
614 	struct dp_soc *soc = vdev->pdev->soc;
615 
616 	if (qdf_unlikely(vdev->mesh_rx_filter)) {
617 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
618 			if (hal_rx_mpdu_get_fr_ds(soc->hal_soc,
619 						  rx_tlv_hdr))
620 				return  QDF_STATUS_SUCCESS;
621 
622 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
623 			if (hal_rx_mpdu_get_to_ds(soc->hal_soc,
624 						  rx_tlv_hdr))
625 				return  QDF_STATUS_SUCCESS;
626 
627 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
628 			if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc,
629 						   rx_tlv_hdr) &&
630 			    !hal_rx_mpdu_get_to_ds(soc->hal_soc,
631 						   rx_tlv_hdr))
632 				return  QDF_STATUS_SUCCESS;
633 
634 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
635 			if (hal_rx_mpdu_get_addr1(soc->hal_soc,
636 						  rx_tlv_hdr,
637 					&mac_addr.raw[0]))
638 				return QDF_STATUS_E_FAILURE;
639 
640 			if (!qdf_mem_cmp(&mac_addr.raw[0],
641 					&vdev->mac_addr.raw[0],
642 					QDF_MAC_ADDR_SIZE))
643 				return  QDF_STATUS_SUCCESS;
644 		}
645 
646 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
647 			if (hal_rx_mpdu_get_addr2(soc->hal_soc,
648 						  rx_tlv_hdr,
649 						  &mac_addr.raw[0]))
650 				return QDF_STATUS_E_FAILURE;
651 
652 			if (!qdf_mem_cmp(&mac_addr.raw[0],
653 					&vdev->mac_addr.raw[0],
654 					QDF_MAC_ADDR_SIZE))
655 				return  QDF_STATUS_SUCCESS;
656 		}
657 	}
658 
659 	return QDF_STATUS_E_FAILURE;
660 }
661 
662 #else
663 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
664 				uint8_t *rx_tlv_hdr, struct dp_peer *peer)
665 {
666 }
667 
668 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
669 					uint8_t *rx_tlv_hdr)
670 {
671 	return QDF_STATUS_E_FAILURE;
672 }
673 
674 #endif
675 
676 #ifdef FEATURE_NAC_RSSI
677 /**
678  * dp_rx_nac_filter(): Function to perform filtering of non-associated
679  * clients
680  * @pdev: DP pdev handle
681  * @rx_pkt_hdr: Rx packet Header
682  *
683  * return: dp_vdev*
684  */
685 static
686 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
687 		uint8_t *rx_pkt_hdr)
688 {
689 	struct ieee80211_frame *wh;
690 	struct dp_neighbour_peer *peer = NULL;
691 
692 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
693 
694 	if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
695 		return NULL;
696 
697 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
698 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
699 				neighbour_peer_list_elem) {
700 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
701 				wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) {
702 			QDF_TRACE(
703 				QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
704 				FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"),
705 				peer->neighbour_peers_macaddr.raw[0],
706 				peer->neighbour_peers_macaddr.raw[1],
707 				peer->neighbour_peers_macaddr.raw[2],
708 				peer->neighbour_peers_macaddr.raw[3],
709 				peer->neighbour_peers_macaddr.raw[4],
710 				peer->neighbour_peers_macaddr.raw[5]);
711 
712 				qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
713 
714 			return pdev->monitor_vdev;
715 		}
716 	}
717 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
718 
719 	return NULL;
720 }
721 
722 /**
723  * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
724  * @soc: DP SOC handle
725  * @mpdu: mpdu for which peer is invalid
726  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
727  * pool_id has same mapping)
728  *
729  * return: integer type
730  */
731 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
732 				   uint8_t mac_id)
733 {
734 	struct dp_invalid_peer_msg msg;
735 	struct dp_vdev *vdev = NULL;
736 	struct dp_pdev *pdev = NULL;
737 	struct ieee80211_frame *wh;
738 	qdf_nbuf_t curr_nbuf, next_nbuf;
739 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
740 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
741 
742 	rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
743 
744 	if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) {
745 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
746 			  "Drop decapped frames");
747 		goto free;
748 	}
749 
750 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
751 
752 	if (!DP_FRAME_IS_DATA(wh)) {
753 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
754 			  "NAWDS valid only for data frames");
755 		goto free;
756 	}
757 
758 	if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
759 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
760 			"Invalid nbuf length");
761 		goto free;
762 	}
763 
764 	pdev = dp_get_pdev_for_mac_id(soc, mac_id);
765 
766 	if (!pdev || qdf_unlikely(pdev->is_pdev_down)) {
767 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
768 			  "PDEV %s", !pdev ? "not found" : "down");
769 		goto free;
770 	}
771 
772 	if (pdev->filter_neighbour_peers) {
773 		/* Next Hop scenario not yet handle */
774 		vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
775 		if (vdev) {
776 			dp_rx_mon_deliver(soc, pdev->pdev_id,
777 					  pdev->invalid_peer_head_msdu,
778 					  pdev->invalid_peer_tail_msdu);
779 
780 			pdev->invalid_peer_head_msdu = NULL;
781 			pdev->invalid_peer_tail_msdu = NULL;
782 
783 			return 0;
784 		}
785 	}
786 
787 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
788 
789 		if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
790 				QDF_MAC_ADDR_SIZE) == 0) {
791 			goto out;
792 		}
793 	}
794 
795 	if (!vdev) {
796 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
797 			"VDEV not found");
798 		goto free;
799 	}
800 
801 out:
802 	msg.wh = wh;
803 	qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN);
804 	msg.nbuf = mpdu;
805 	msg.vdev_id = vdev->vdev_id;
806 	if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer)
807 		pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(
808 				(struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc,
809 				pdev->pdev_id, &msg);
810 
811 free:
812 	/* Drop and free packet */
813 	curr_nbuf = mpdu;
814 	while (curr_nbuf) {
815 		next_nbuf = qdf_nbuf_next(curr_nbuf);
816 		qdf_nbuf_free(curr_nbuf);
817 		curr_nbuf = next_nbuf;
818 	}
819 
820 	return 0;
821 }
822 
823 /**
824  * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
825  * @soc: DP SOC handle
826  * @mpdu: mpdu for which peer is invalid
827  * @mpdu_done: if an mpdu is completed
828  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
829  * pool_id has same mapping)
830  *
831  * return: integer type
832  */
833 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
834 					qdf_nbuf_t mpdu, bool mpdu_done,
835 					uint8_t mac_id)
836 {
837 	/* Only trigger the process when mpdu is completed */
838 	if (mpdu_done)
839 		dp_rx_process_invalid_peer(soc, mpdu, mac_id);
840 }
841 #else
842 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
843 				   uint8_t mac_id)
844 {
845 	qdf_nbuf_t curr_nbuf, next_nbuf;
846 	struct dp_pdev *pdev;
847 	struct dp_vdev *vdev = NULL;
848 	struct ieee80211_frame *wh;
849 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
850 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
851 
852 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
853 
854 	if (!DP_FRAME_IS_DATA(wh)) {
855 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP,
856 				   "only for data frames");
857 		goto free;
858 	}
859 
860 	if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
861 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
862 			  "Invalid nbuf length");
863 		goto free;
864 	}
865 
866 	pdev = dp_get_pdev_for_mac_id(soc, mac_id);
867 	if (!pdev) {
868 		QDF_TRACE(QDF_MODULE_ID_DP,
869 			  QDF_TRACE_LEVEL_ERROR,
870 			  "PDEV not found");
871 		goto free;
872 	}
873 
874 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
875 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
876 		if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
877 				QDF_MAC_ADDR_SIZE) == 0) {
878 			qdf_spin_unlock_bh(&pdev->vdev_list_lock);
879 			goto out;
880 		}
881 	}
882 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
883 
884 	if (!vdev) {
885 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
886 			  "VDEV not found");
887 		goto free;
888 	}
889 
890 out:
891 	if (soc->cdp_soc.ol_ops->rx_invalid_peer)
892 		soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh);
893 free:
894 	/* Drop and free packet */
895 	curr_nbuf = mpdu;
896 	while (curr_nbuf) {
897 		next_nbuf = qdf_nbuf_next(curr_nbuf);
898 		qdf_nbuf_free(curr_nbuf);
899 		curr_nbuf = next_nbuf;
900 	}
901 
902 	/* Reset the head and tail pointers */
903 	pdev = dp_get_pdev_for_mac_id(soc, mac_id);
904 	if (pdev) {
905 		pdev->invalid_peer_head_msdu = NULL;
906 		pdev->invalid_peer_tail_msdu = NULL;
907 	}
908 
909 	return 0;
910 }
911 
912 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
913 					qdf_nbuf_t mpdu, bool mpdu_done,
914 					uint8_t mac_id)
915 {
916 	/* Process the nbuf */
917 	dp_rx_process_invalid_peer(soc, mpdu, mac_id);
918 }
919 #endif
920 
921 #ifdef RECEIVE_OFFLOAD
922 /**
923  * dp_rx_print_offload_info() - Print offload info from RX TLV
924  * @soc: dp soc handle
925  * @rx_tlv: RX TLV for which offload information is to be printed
926  *
927  * Return: None
928  */
929 static void dp_rx_print_offload_info(struct dp_soc *soc, uint8_t *rx_tlv)
930 {
931 	dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------");
932 	dp_verbose_debug("lro_eligible 0x%x", HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv));
933 	dp_verbose_debug("pure_ack 0x%x", HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv));
934 	dp_verbose_debug("chksum 0x%x", hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
935 								  rx_tlv));
936 	dp_verbose_debug("TCP seq num 0x%x", HAL_RX_TLV_GET_TCP_SEQ(rx_tlv));
937 	dp_verbose_debug("TCP ack num 0x%x", HAL_RX_TLV_GET_TCP_ACK(rx_tlv));
938 	dp_verbose_debug("TCP window 0x%x", HAL_RX_TLV_GET_TCP_WIN(rx_tlv));
939 	dp_verbose_debug("TCP protocol 0x%x", HAL_RX_TLV_GET_TCP_PROTO(rx_tlv));
940 	dp_verbose_debug("TCP offset 0x%x", HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv));
941 	dp_verbose_debug("toeplitz 0x%x", HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv));
942 	dp_verbose_debug("---------------------------------------------------------");
943 }
944 
945 /**
946  * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
947  * @soc: DP SOC handle
948  * @rx_tlv: RX TLV received for the msdu
949  * @msdu: msdu for which GRO info needs to be filled
950  * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
951  *
952  * Return: None
953  */
954 static
955 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
956 			 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
957 {
958 	if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
959 		return;
960 
961 	/* Filling up RX offload info only for TCP packets */
962 	if (!HAL_RX_TLV_GET_TCP_PROTO(rx_tlv))
963 		return;
964 
965 	*rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1;
966 
967 	QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) =
968 		 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv);
969 	QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) =
970 			HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv);
971 	QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
972 			hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
973 						  rx_tlv);
974 	QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) =
975 			 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv);
976 	QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) =
977 			 HAL_RX_TLV_GET_TCP_ACK(rx_tlv);
978 	QDF_NBUF_CB_RX_TCP_WIN(msdu) =
979 			 HAL_RX_TLV_GET_TCP_WIN(rx_tlv);
980 	QDF_NBUF_CB_RX_TCP_PROTO(msdu) =
981 			 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv);
982 	QDF_NBUF_CB_RX_IPV6_PROTO(msdu) =
983 			 HAL_RX_TLV_GET_IPV6(rx_tlv);
984 	QDF_NBUF_CB_RX_TCP_OFFSET(msdu) =
985 			 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv);
986 	QDF_NBUF_CB_RX_FLOW_ID(msdu) =
987 			 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv);
988 
989 	dp_rx_print_offload_info(soc, rx_tlv);
990 }
991 #else
992 static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
993 				qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
994 {
995 }
996 #endif /* RECEIVE_OFFLOAD */
997 
998 /**
999  * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
1000  *
1001  * @nbuf: pointer to msdu.
1002  * @mpdu_len: mpdu length
1003  *
1004  * Return: returns true if nbuf is last msdu of mpdu else retuns false.
1005  */
1006 static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len)
1007 {
1008 	bool last_nbuf;
1009 
1010 	if (*mpdu_len > (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN)) {
1011 		qdf_nbuf_set_pktlen(nbuf, RX_BUFFER_SIZE);
1012 		last_nbuf = false;
1013 	} else {
1014 		qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN));
1015 		last_nbuf = true;
1016 	}
1017 
1018 	*mpdu_len -= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN);
1019 
1020 	return last_nbuf;
1021 }
1022 
1023 /**
1024  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
1025  *		     multiple nbufs.
1026  * @nbuf: pointer to the first msdu of an amsdu.
1027  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1028  *
1029  *
1030  * This function implements the creation of RX frag_list for cases
1031  * where an MSDU is spread across multiple nbufs.
1032  *
1033  * Return: returns the head nbuf which contains complete frag_list.
1034  */
1035 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
1036 {
1037 	qdf_nbuf_t parent, frag_list, next = NULL;
1038 	uint16_t frag_list_len = 0;
1039 	uint16_t mpdu_len;
1040 	bool last_nbuf;
1041 
1042 	mpdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
1043 	/*
1044 	 * this is a case where the complete msdu fits in one single nbuf.
1045 	 * in this case HW sets both start and end bit and we only need to
1046 	 * reset these bits for RAW mode simulator to decap the pkt
1047 	 */
1048 	if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
1049 					qdf_nbuf_is_rx_chfrag_end(nbuf)) {
1050 		qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN);
1051 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1052 		return nbuf;
1053 	}
1054 
1055 	/*
1056 	 * This is a case where we have multiple msdus (A-MSDU) spread across
1057 	 * multiple nbufs. here we create a fraglist out of these nbufs.
1058 	 *
1059 	 * the moment we encounter a nbuf with continuation bit set we
1060 	 * know for sure we have an MSDU which is spread across multiple
1061 	 * nbufs. We loop through and reap nbufs till we reach last nbuf.
1062 	 */
1063 	parent = nbuf;
1064 	frag_list = nbuf->next;
1065 	nbuf = nbuf->next;
1066 
1067 	/*
1068 	 * set the start bit in the first nbuf we encounter with continuation
1069 	 * bit set. This has the proper mpdu length set as it is the first
1070 	 * msdu of the mpdu. this becomes the parent nbuf and the subsequent
1071 	 * nbufs will form the frag_list of the parent nbuf.
1072 	 */
1073 	qdf_nbuf_set_rx_chfrag_start(parent, 1);
1074 	last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len);
1075 
1076 	/*
1077 	 * this is where we set the length of the fragments which are
1078 	 * associated to the parent nbuf. We iterate through the frag_list
1079 	 * till we hit the last_nbuf of the list.
1080 	 */
1081 	do {
1082 		last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len);
1083 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1084 		frag_list_len += qdf_nbuf_len(nbuf);
1085 
1086 		if (last_nbuf) {
1087 			next = nbuf->next;
1088 			nbuf->next = NULL;
1089 			break;
1090 		}
1091 
1092 		nbuf = nbuf->next;
1093 	} while (!last_nbuf);
1094 
1095 	qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
1096 	qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
1097 	parent->next = next;
1098 
1099 	qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN);
1100 	return parent;
1101 }
1102 
1103 /**
1104  * dp_rx_compute_delay() - Compute and fill in all timestamps
1105  *				to pass in correct fields
1106  *
1107  * @vdev: pdev handle
1108  * @tx_desc: tx descriptor
1109  * @tid: tid value
1110  * Return: none
1111  */
1112 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1113 {
1114 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
1115 	int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get());
1116 	uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
1117 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
1118 	uint32_t interframe_delay =
1119 		(uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp);
1120 
1121 	dp_update_delay_stats(vdev->pdev, to_stack, tid,
1122 			      CDP_DELAY_STATS_REAP_STACK, ring_id);
1123 	/*
1124 	 * Update interframe delay stats calculated at deliver_data_ol point.
1125 	 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so
1126 	 * interframe delay will not be calculate correctly for 1st frame.
1127 	 * On the other side, this will help in avoiding extra per packet check
1128 	 * of vdev->prev_rx_deliver_tstamp.
1129 	 */
1130 	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
1131 			      CDP_DELAY_STATS_RX_INTERFRAME, ring_id);
1132 	vdev->prev_rx_deliver_tstamp = current_ts;
1133 }
1134 
1135 /**
1136  * dp_rx_drop_nbuf_list() - drop an nbuf list
1137  * @pdev: dp pdev reference
1138  * @buf_list: buffer list to be dropepd
1139  *
1140  * Return: int (number of bufs dropped)
1141  */
1142 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
1143 				       qdf_nbuf_t buf_list)
1144 {
1145 	struct cdp_tid_rx_stats *stats = NULL;
1146 	uint8_t tid = 0, ring_id = 0;
1147 	int num_dropped = 0;
1148 	qdf_nbuf_t buf, next_buf;
1149 
1150 	buf = buf_list;
1151 	while (buf) {
1152 		ring_id = QDF_NBUF_CB_RX_CTX_ID(buf);
1153 		next_buf = qdf_nbuf_queue_next(buf);
1154 		tid = qdf_nbuf_get_tid_val(buf);
1155 		stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
1156 		stats->fail_cnt[INVALID_PEER_VDEV]++;
1157 		stats->delivered_to_stack--;
1158 		qdf_nbuf_free(buf);
1159 		buf = next_buf;
1160 		num_dropped++;
1161 	}
1162 
1163 	return num_dropped;
1164 }
1165 
1166 #ifdef PEER_CACHE_RX_PKTS
1167 /**
1168  * dp_rx_flush_rx_cached() - flush cached rx frames
1169  * @peer: peer
1170  * @drop: flag to drop frames or forward to net stack
1171  *
1172  * Return: None
1173  */
1174 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
1175 {
1176 	struct dp_peer_cached_bufq *bufqi;
1177 	struct dp_rx_cached_buf *cache_buf = NULL;
1178 	ol_txrx_rx_fp data_rx = NULL;
1179 	int num_buff_elem;
1180 	QDF_STATUS status;
1181 
1182 	if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
1183 		qdf_atomic_dec(&peer->flush_in_progress);
1184 		return;
1185 	}
1186 
1187 	qdf_spin_lock_bh(&peer->peer_info_lock);
1188 	if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx)
1189 		data_rx = peer->vdev->osif_rx;
1190 	else
1191 		drop = true;
1192 	qdf_spin_unlock_bh(&peer->peer_info_lock);
1193 
1194 	bufqi = &peer->bufq_info;
1195 
1196 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1197 	qdf_list_remove_front(&bufqi->cached_bufq,
1198 			      (qdf_list_node_t **)&cache_buf);
1199 	while (cache_buf) {
1200 		num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(
1201 								cache_buf->buf);
1202 		bufqi->entries -= num_buff_elem;
1203 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
1204 		if (drop) {
1205 			bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1206 							      cache_buf->buf);
1207 		} else {
1208 			/* Flush the cached frames to OSIF DEV */
1209 			status = data_rx(peer->vdev->osif_vdev, cache_buf->buf);
1210 			if (status != QDF_STATUS_SUCCESS)
1211 				bufqi->dropped = dp_rx_drop_nbuf_list(
1212 							peer->vdev->pdev,
1213 							cache_buf->buf);
1214 		}
1215 		qdf_mem_free(cache_buf);
1216 		cache_buf = NULL;
1217 		qdf_spin_lock_bh(&bufqi->bufq_lock);
1218 		qdf_list_remove_front(&bufqi->cached_bufq,
1219 				      (qdf_list_node_t **)&cache_buf);
1220 	}
1221 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1222 	qdf_atomic_dec(&peer->flush_in_progress);
1223 }
1224 
1225 /**
1226  * dp_rx_enqueue_rx() - cache rx frames
1227  * @peer: peer
1228  * @rx_buf_list: cache buffer list
1229  *
1230  * Return: None
1231  */
1232 static QDF_STATUS
1233 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1234 {
1235 	struct dp_rx_cached_buf *cache_buf;
1236 	struct dp_peer_cached_bufq *bufqi = &peer->bufq_info;
1237 	int num_buff_elem;
1238 
1239 	QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_TXRX, "bufq->curr %d bufq->drops %d",
1240 			   bufqi->entries, bufqi->dropped);
1241 
1242 	if (!peer->valid) {
1243 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1244 						      rx_buf_list);
1245 		return QDF_STATUS_E_INVAL;
1246 	}
1247 
1248 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1249 	if (bufqi->entries >= bufqi->thresh) {
1250 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1251 						      rx_buf_list);
1252 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
1253 		return QDF_STATUS_E_RESOURCES;
1254 	}
1255 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1256 
1257 	num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list);
1258 
1259 	cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf));
1260 	if (!cache_buf) {
1261 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1262 			  "Failed to allocate buf to cache rx frames");
1263 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1264 						      rx_buf_list);
1265 		return QDF_STATUS_E_NOMEM;
1266 	}
1267 
1268 	cache_buf->buf = rx_buf_list;
1269 
1270 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1271 	qdf_list_insert_back(&bufqi->cached_bufq,
1272 			     &cache_buf->node);
1273 	bufqi->entries += num_buff_elem;
1274 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1275 
1276 	return QDF_STATUS_SUCCESS;
1277 }
1278 
1279 static inline
1280 bool dp_rx_is_peer_cache_bufq_supported(void)
1281 {
1282 	return true;
1283 }
1284 #else
1285 static inline
1286 bool dp_rx_is_peer_cache_bufq_supported(void)
1287 {
1288 	return false;
1289 }
1290 
1291 static inline QDF_STATUS
1292 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1293 {
1294 	return QDF_STATUS_SUCCESS;
1295 }
1296 #endif
1297 
1298 static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev,
1299 						struct dp_peer *peer,
1300 						qdf_nbuf_t nbuf_head,
1301 						qdf_nbuf_t nbuf_tail)
1302 {
1303 	/*
1304 	 * highly unlikely to have a vdev without a registered rx
1305 	 * callback function. if so let us free the nbuf_list.
1306 	 */
1307 	if (qdf_unlikely(!vdev->osif_rx)) {
1308 		if (dp_rx_is_peer_cache_bufq_supported())
1309 			dp_rx_enqueue_rx(peer, nbuf_head);
1310 		else
1311 			dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
1312 
1313 		return;
1314 	}
1315 
1316 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
1317 			(vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
1318 		vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
1319 				&nbuf_tail, (struct cdp_peer *) peer);
1320 	}
1321 
1322 	vdev->osif_rx(vdev->osif_vdev, nbuf_head);
1323 }
1324 
1325 /**
1326  * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
1327  * @nbuf: pointer to the first msdu of an amsdu.
1328  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1329  *
1330  * The ipsumed field of the skb is set based on whether HW validated the
1331  * IP/TCP/UDP checksum.
1332  *
1333  * Return: void
1334  */
1335 static inline void dp_rx_cksum_offload(struct dp_pdev *pdev,
1336 				       qdf_nbuf_t nbuf,
1337 				       uint8_t *rx_tlv_hdr)
1338 {
1339 	qdf_nbuf_rx_cksum_t cksum = {0};
1340 	bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr);
1341 	bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr);
1342 
1343 	if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) {
1344 		cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
1345 		qdf_nbuf_set_rx_cksum(nbuf, &cksum);
1346 	} else {
1347 		DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err);
1348 		DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er);
1349 	}
1350 }
1351 
1352 /**
1353  * dp_rx_msdu_stats_update() - update per msdu stats.
1354  * @soc: core txrx main context
1355  * @nbuf: pointer to the first msdu of an amsdu.
1356  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1357  * @peer: pointer to the peer object.
1358  * @ring_id: reo dest ring number on which pkt is reaped.
1359  * @tid_stats: per tid rx stats.
1360  *
1361  * update all the per msdu stats for that nbuf.
1362  * Return: void
1363  */
1364 static void dp_rx_msdu_stats_update(struct dp_soc *soc,
1365 				    qdf_nbuf_t nbuf,
1366 				    uint8_t *rx_tlv_hdr,
1367 				    struct dp_peer *peer,
1368 				    uint8_t ring_id,
1369 				    struct cdp_tid_rx_stats *tid_stats)
1370 {
1371 	bool is_ampdu, is_not_amsdu;
1372 	uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
1373 	struct dp_vdev *vdev = peer->vdev;
1374 	qdf_ether_header_t *eh;
1375 	uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1376 
1377 	is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
1378 			qdf_nbuf_is_rx_chfrag_end(nbuf);
1379 
1380 	DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len);
1381 	DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu);
1382 	DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu);
1383 	DP_STATS_INCC(peer, rx.rx_retries, 1, qdf_nbuf_is_rx_retry_flag(nbuf));
1384 
1385 	tid_stats->msdu_cnt++;
1386 	if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
1387 			 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
1388 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1389 		DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len);
1390 		tid_stats->mcast_msdu_cnt++;
1391 		if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
1392 			DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len);
1393 			tid_stats->bcast_msdu_cnt++;
1394 		}
1395 	}
1396 
1397 	/*
1398 	 * currently we can return from here as we have similar stats
1399 	 * updated at per ppdu level instead of msdu level
1400 	 */
1401 	if (!soc->process_rx_status)
1402 		return;
1403 
1404 	is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr);
1405 	DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu);
1406 	DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
1407 
1408 	sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr);
1409 	mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
1410 	tid = qdf_nbuf_get_tid_val(nbuf);
1411 	bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
1412 	reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
1413 							      rx_tlv_hdr);
1414 	nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
1415 	pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
1416 
1417 	DP_STATS_INC(peer, rx.bw[bw], 1);
1418 	/*
1419 	 * only if nss > 0 and pkt_type is 11N/AC/AX,
1420 	 * then increase index [nss - 1] in array counter.
1421 	 */
1422 	if (nss > 0 && (pkt_type == DOT11_N ||
1423 			pkt_type == DOT11_AC ||
1424 			pkt_type == DOT11_AX))
1425 		DP_STATS_INC(peer, rx.nss[nss - 1], 1);
1426 
1427 	DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
1428 	DP_STATS_INCC(peer, rx.err.mic_err, 1,
1429 		      hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr));
1430 	DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
1431 		      hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr));
1432 
1433 	DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
1434 	DP_STATS_INC(peer, rx.reception_type[reception_type], 1);
1435 
1436 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1437 		      ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1438 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1439 		      ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1440 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1441 		      ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1442 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1443 		      ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1444 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1445 		      ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1446 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1447 		      ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1448 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1449 		      ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1450 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1451 		      ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1452 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1453 		      ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX)));
1454 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1455 		      ((mcs < MAX_MCS) && (pkt_type == DOT11_AX)));
1456 
1457 	if ((soc->process_rx_status) &&
1458 	    hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) {
1459 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
1460 		if (!vdev->pdev)
1461 			return;
1462 
1463 		dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
1464 				     &peer->stats, peer->peer_ids[0],
1465 				     UPDATE_PEER_STATS,
1466 				     vdev->pdev->pdev_id);
1467 #endif
1468 
1469 	}
1470 }
1471 
1472 static inline bool is_sa_da_idx_valid(struct dp_soc *soc,
1473 				      uint8_t *rx_tlv_hdr,
1474 				      qdf_nbuf_t nbuf)
1475 {
1476 	if ((qdf_nbuf_is_sa_valid(nbuf) &&
1477 	     (hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr) >
1478 		wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) ||
1479 	    (!qdf_nbuf_is_da_mcbc(nbuf) &&
1480 	     qdf_nbuf_is_da_valid(nbuf) &&
1481 	     (hal_rx_msdu_end_da_idx_get(soc->hal_soc, rx_tlv_hdr) >
1482 	      wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))))
1483 		return false;
1484 
1485 	return true;
1486 }
1487 
1488 #ifndef WDS_VENDOR_EXTENSION
1489 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
1490 			   struct dp_vdev *vdev,
1491 			   struct dp_peer *peer)
1492 {
1493 	return 1;
1494 }
1495 #endif
1496 
1497 #ifdef RX_DESC_DEBUG_CHECK
1498 /**
1499  * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
1500  *				  corruption
1501  *
1502  * @ring_desc: REO ring descriptor
1503  * @rx_desc: Rx descriptor
1504  *
1505  * Return: NONE
1506  */
1507 static inline
1508 void dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc,
1509 				  struct dp_rx_desc *rx_desc)
1510 {
1511 	struct hal_buf_info hbi;
1512 
1513 	hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
1514 	/* Sanity check for possible buffer paddr corruption */
1515 	qdf_assert_always((&hbi)->paddr ==
1516 			  qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0));
1517 }
1518 #else
1519 static inline
1520 void dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc,
1521 				  struct dp_rx_desc *rx_desc)
1522 {
1523 }
1524 #endif
1525 
1526 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1527 static inline
1528 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
1529 {
1530 	bool limit_hit = false;
1531 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
1532 
1533 	limit_hit =
1534 		(num_reaped >= cfg->rx_reap_loop_pkt_limit) ? true : false;
1535 
1536 	if (limit_hit)
1537 		DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1)
1538 
1539 	return limit_hit;
1540 }
1541 
1542 static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1543 {
1544 	return soc->wlan_cfg_ctx->rx_enable_eol_data_check;
1545 }
1546 
1547 #else
1548 static inline
1549 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
1550 {
1551 	return false;
1552 }
1553 
1554 static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1555 {
1556 	return false;
1557 }
1558 
1559 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
1560 
1561 /**
1562  * dp_is_special_data() - check is the pkt special like eapol, dhcp, etc
1563  *
1564  * @nbuf: pkt skb pointer
1565  *
1566  * Return: true if matched, false if not
1567  */
1568 static inline
1569 bool dp_is_special_data(qdf_nbuf_t nbuf)
1570 {
1571 	if (qdf_nbuf_is_ipv4_arp_pkt(nbuf) ||
1572 	    qdf_nbuf_is_ipv4_dhcp_pkt(nbuf) ||
1573 	    qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
1574 	    qdf_nbuf_is_ipv6_dhcp_pkt(nbuf))
1575 		return true;
1576 	else
1577 		return false;
1578 }
1579 
1580 #ifdef DP_RX_PKT_NO_PEER_DELIVER
1581 /**
1582  * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
1583  *				      no corresbonding peer found
1584  * @soc: core txrx main context
1585  * @nbuf: pkt skb pointer
1586  *
1587  * This function will try to deliver some RX special frames to stack
1588  * even there is no peer matched found. for instance, LFR case, some
1589  * eapol data will be sent to host before peer_map done.
1590  *
1591  * Return: None
1592  */
1593 static inline
1594 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
1595 {
1596 	uint16_t peer_id;
1597 	uint8_t vdev_id;
1598 	struct dp_vdev *vdev;
1599 	uint32_t l2_hdr_offset = 0;
1600 	uint16_t msdu_len = 0;
1601 	uint32_t pkt_len = 0;
1602 	uint8_t *rx_tlv_hdr;
1603 
1604 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
1605 	if (peer_id > soc->max_peers)
1606 		goto deliver_fail;
1607 
1608 	vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
1609 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
1610 	if (!vdev || !vdev->osif_rx)
1611 		goto deliver_fail;
1612 
1613 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
1614 	l2_hdr_offset =
1615 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
1616 
1617 	msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1618 	pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
1619 
1620 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
1621 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1622 	} else {
1623 		qdf_nbuf_set_pktlen(nbuf, pkt_len);
1624 		qdf_nbuf_pull_head(nbuf,
1625 				   RX_PKT_TLVS_LEN +
1626 				   l2_hdr_offset);
1627 	}
1628 
1629 	/* only allow special frames */
1630 	if (!dp_is_special_data(nbuf))
1631 		goto deliver_fail;
1632 
1633 	vdev->osif_rx(vdev->osif_vdev, nbuf);
1634 	DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1);
1635 	return;
1636 
1637 deliver_fail:
1638 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1639 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
1640 	qdf_nbuf_free(nbuf);
1641 }
1642 #else
1643 static inline
1644 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
1645 {
1646 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1647 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
1648 	qdf_nbuf_free(nbuf);
1649 }
1650 #endif
1651 
1652 /**
1653  * dp_rx_srng_get_num_pending() - get number of pending entries
1654  * @hal_soc: hal soc opaque pointer
1655  * @hal_ring: opaque pointer to the HAL Rx Ring
1656  * @num_entries: number of entries in the hal_ring.
1657  * @near_full: pointer to a boolean. This is set if ring is near full.
1658  *
1659  * The function returns the number of entries in a destination ring which are
1660  * yet to be reaped. The function also checks if the ring is near full.
1661  * If more than half of the ring needs to be reaped, the ring is considered
1662  * approaching full.
1663  * The function useses hal_srng_dst_num_valid_locked to get the number of valid
1664  * entries. It should not be called within a SRNG lock. HW pointer value is
1665  * synced into cached_hp.
1666  *
1667  * Return: Number of pending entries if any
1668  */
1669 static
1670 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
1671 				    hal_ring_handle_t hal_ring_hdl,
1672 				    uint32_t num_entries,
1673 				    bool *near_full)
1674 {
1675 	uint32_t num_pending = 0;
1676 
1677 	num_pending = hal_srng_dst_num_valid_locked(hal_soc,
1678 						    hal_ring_hdl,
1679 						    true);
1680 
1681 	if (num_entries && (num_pending >= num_entries >> 1))
1682 		*near_full = true;
1683 	else
1684 		*near_full = false;
1685 
1686 	return num_pending;
1687 }
1688 
1689 /**
1690  * dp_rx_process() - Brain of the Rx processing functionality
1691  *		     Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
1692  * @int_ctx: per interrupt context
1693  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1694  * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
1695  * @quota: No. of units (packets) that can be serviced in one shot.
1696  *
1697  * This function implements the core of Rx functionality. This is
1698  * expected to handle only non-error frames.
1699  *
1700  * Return: uint32_t: No. of elements processed
1701  */
1702 uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
1703 			    uint8_t reo_ring_num, uint32_t quota)
1704 {
1705 	hal_ring_desc_t ring_desc;
1706 	hal_soc_handle_t hal_soc;
1707 	struct dp_rx_desc *rx_desc = NULL;
1708 	qdf_nbuf_t nbuf, next;
1709 	bool near_full;
1710 	union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT];
1711 	union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT];
1712 	uint32_t num_pending;
1713 	uint32_t rx_bufs_used = 0, rx_buf_cookie;
1714 	uint32_t l2_hdr_offset = 0;
1715 	uint16_t msdu_len = 0;
1716 	uint16_t peer_id;
1717 	uint8_t vdev_id;
1718 	struct dp_peer *peer;
1719 	struct dp_vdev *vdev;
1720 	uint32_t pkt_len = 0;
1721 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
1722 	struct hal_rx_msdu_desc_info msdu_desc_info;
1723 	enum hal_reo_error_status error;
1724 	uint32_t peer_mdata;
1725 	uint8_t *rx_tlv_hdr;
1726 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT];
1727 	uint8_t mac_id = 0;
1728 	struct dp_pdev *pdev;
1729 	struct dp_pdev *rx_pdev;
1730 	struct dp_srng *dp_rxdma_srng;
1731 	struct rx_desc_pool *rx_desc_pool;
1732 	struct dp_soc *soc = int_ctx->soc;
1733 	uint8_t ring_id = 0;
1734 	uint8_t core_id = 0;
1735 	struct cdp_tid_rx_stats *tid_stats;
1736 	qdf_nbuf_t nbuf_head;
1737 	qdf_nbuf_t nbuf_tail;
1738 	qdf_nbuf_t deliver_list_head;
1739 	qdf_nbuf_t deliver_list_tail;
1740 	uint32_t num_rx_bufs_reaped = 0;
1741 	uint32_t intr_id;
1742 	struct hif_opaque_softc *scn;
1743 	int32_t tid = 0;
1744 	bool is_prev_msdu_last = true;
1745 	uint32_t num_entries_avail = 0;
1746 	uint32_t rx_ol_pkt_cnt = 0;
1747 	uint32_t num_entries = 0;
1748 
1749 	DP_HIST_INIT();
1750 
1751 	qdf_assert_always(soc && hal_ring_hdl);
1752 	hal_soc = soc->hal_soc;
1753 	qdf_assert_always(hal_soc);
1754 
1755 	scn = soc->hif_handle;
1756 	hif_pm_runtime_mark_dp_rx_busy(scn);
1757 	intr_id = int_ctx->dp_intr_id;
1758 	num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl);
1759 
1760 more_data:
1761 	/* reset local variables here to be re-used in the function */
1762 	nbuf_head = NULL;
1763 	nbuf_tail = NULL;
1764 	deliver_list_head = NULL;
1765 	deliver_list_tail = NULL;
1766 	peer = NULL;
1767 	vdev = NULL;
1768 	num_rx_bufs_reaped = 0;
1769 
1770 	qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped));
1771 	qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
1772 	qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
1773 	qdf_mem_zero(head, sizeof(head));
1774 	qdf_mem_zero(tail, sizeof(tail));
1775 
1776 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
1777 
1778 		/*
1779 		 * Need API to convert from hal_ring pointer to
1780 		 * Ring Type / Ring Id combo
1781 		 */
1782 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
1783 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1784 			FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
1785 		goto done;
1786 	}
1787 
1788 	/*
1789 	 * start reaping the buffers from reo ring and queue
1790 	 * them in per vdev queue.
1791 	 * Process the received pkts in a different per vdev loop.
1792 	 */
1793 	while (qdf_likely(quota &&
1794 			  (ring_desc = hal_srng_dst_peek(hal_soc,
1795 							 hal_ring_hdl)))) {
1796 
1797 		error = HAL_RX_ERROR_STATUS_GET(ring_desc);
1798 		ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1799 
1800 		if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
1801 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1802 			FL("HAL RING 0x%pK:error %d"), hal_ring_hdl, error);
1803 			DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1);
1804 			/* Don't know how to deal with this -- assert */
1805 			qdf_assert(0);
1806 		}
1807 
1808 		rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
1809 
1810 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
1811 		qdf_assert(rx_desc);
1812 
1813 		/*
1814 		 * this is a unlikely scenario where the host is reaping
1815 		 * a descriptor which it already reaped just a while ago
1816 		 * but is yet to replenish it back to HW.
1817 		 * In this case host will dump the last 128 descriptors
1818 		 * including the software descriptor rx_desc and assert.
1819 		 */
1820 
1821 		if (qdf_unlikely(!rx_desc->in_use)) {
1822 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
1823 			dp_info_rl("Reaping rx_desc not in use!");
1824 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1825 						   ring_desc, rx_desc);
1826 			/* ignore duplicate RX desc and continue to process */
1827 			/* Pop out the descriptor */
1828 			hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
1829 			continue;
1830 		}
1831 
1832 		if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) {
1833 			dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie);
1834 			DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1);
1835 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1836 						   ring_desc, rx_desc);
1837 		}
1838 
1839 		dp_rx_desc_nbuf_sanity_check(ring_desc, rx_desc);
1840 
1841 		/* TODO */
1842 		/*
1843 		 * Need a separate API for unmapping based on
1844 		 * phyiscal address
1845 		 */
1846 		qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
1847 					QDF_DMA_FROM_DEVICE);
1848 		rx_desc->unmapped = 1;
1849 
1850 		core_id = smp_processor_id();
1851 		DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1);
1852 
1853 		/* Get MPDU DESC info */
1854 		hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
1855 
1856 		/* Get MSDU DESC info */
1857 		hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info);
1858 
1859 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
1860 			qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1);
1861 
1862 		if (qdf_unlikely(mpdu_desc_info.mpdu_flags &
1863 				HAL_MPDU_F_RAW_AMPDU)) {
1864 			/* previous msdu has end bit set, so current one is
1865 			 * the new MPDU
1866 			 */
1867 			if (is_prev_msdu_last) {
1868 				is_prev_msdu_last = false;
1869 				/* Get number of entries available in HW ring */
1870 				num_entries_avail =
1871 				hal_srng_dst_num_valid(hal_soc,
1872 						       hal_ring_hdl, 1);
1873 
1874 				/* For new MPDU check if we can read complete
1875 				 * MPDU by comparing the number of buffers
1876 				 * available and number of buffers needed to
1877 				 * reap this MPDU
1878 				 */
1879 				if (((msdu_desc_info.msdu_len /
1880 				     (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN) + 1)) >
1881 				     num_entries_avail)
1882 					break;
1883 			} else {
1884 				if (msdu_desc_info.msdu_flags &
1885 				    HAL_MSDU_F_LAST_MSDU_IN_MPDU)
1886 					is_prev_msdu_last = true;
1887 			}
1888 			qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1);
1889 		}
1890 
1891 		/* Pop out the descriptor*/
1892 		hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
1893 
1894 		rx_bufs_reaped[rx_desc->pool_id]++;
1895 		peer_mdata = mpdu_desc_info.peer_meta_data;
1896 		QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) =
1897 			DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
1898 		QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) =
1899 			DP_PEER_METADATA_VDEV_ID_GET(peer_mdata);
1900 
1901 		/*
1902 		 * save msdu flags first, last and continuation msdu in
1903 		 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
1904 		 * length to nbuf->cb. This ensures the info required for
1905 		 * per pkt processing is always in the same cache line.
1906 		 * This helps in improving throughput for smaller pkt
1907 		 * sizes.
1908 		 */
1909 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
1910 			qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
1911 
1912 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
1913 			qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
1914 
1915 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
1916 			qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
1917 
1918 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
1919 			qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1);
1920 
1921 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
1922 			qdf_nbuf_set_da_valid(rx_desc->nbuf, 1);
1923 
1924 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
1925 			qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1);
1926 
1927 		qdf_nbuf_set_tid_val(rx_desc->nbuf,
1928 				     HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc));
1929 
1930 		QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len;
1931 
1932 		QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num;
1933 
1934 		DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
1935 
1936 		/*
1937 		 * if continuation bit is set then we have MSDU spread
1938 		 * across multiple buffers, let us not decrement quota
1939 		 * till we reap all buffers of that MSDU.
1940 		 */
1941 		if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)))
1942 			quota -= 1;
1943 
1944 		dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
1945 						&tail[rx_desc->pool_id],
1946 						rx_desc);
1947 
1948 		num_rx_bufs_reaped++;
1949 		if (dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped))
1950 			break;
1951 	}
1952 done:
1953 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
1954 
1955 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
1956 		/*
1957 		 * continue with next mac_id if no pkts were reaped
1958 		 * from that pool
1959 		 */
1960 		if (!rx_bufs_reaped[mac_id])
1961 			continue;
1962 
1963 		pdev = soc->pdev_list[mac_id];
1964 		dp_rxdma_srng = &pdev->rx_refill_buf_ring;
1965 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
1966 
1967 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
1968 					rx_desc_pool, rx_bufs_reaped[mac_id],
1969 					&head[mac_id], &tail[mac_id]);
1970 	}
1971 
1972 	dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]);
1973 	/* Peer can be NULL is case of LFR */
1974 	if (qdf_likely(peer))
1975 		vdev = NULL;
1976 
1977 	/*
1978 	 * BIG loop where each nbuf is dequeued from global queue,
1979 	 * processed and queued back on a per vdev basis. These nbufs
1980 	 * are sent to stack as and when we run out of nbufs
1981 	 * or a new nbuf dequeued from global queue has a different
1982 	 * vdev when compared to previous nbuf.
1983 	 */
1984 	nbuf = nbuf_head;
1985 	while (nbuf) {
1986 		next = nbuf->next;
1987 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
1988 		vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
1989 
1990 		if (deliver_list_head && vdev && (vdev->vdev_id != vdev_id)) {
1991 			dp_rx_deliver_to_stack(vdev, peer, deliver_list_head,
1992 					       deliver_list_tail);
1993 			deliver_list_head = NULL;
1994 			deliver_list_tail = NULL;
1995 		}
1996 
1997 		/* Get TID from struct cb->tid_val, save to tid */
1998 		if (qdf_nbuf_is_rx_chfrag_start(nbuf))
1999 			tid = qdf_nbuf_get_tid_val(nbuf);
2000 
2001 		peer_id =  QDF_NBUF_CB_RX_PEER_ID(nbuf);
2002 		peer = dp_peer_find_by_id(soc, peer_id);
2003 
2004 		if (peer) {
2005 			QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
2006 			qdf_dp_trace_set_track(nbuf, QDF_RX);
2007 			QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
2008 			QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
2009 				QDF_NBUF_RX_PKT_DATA_TRACK;
2010 		}
2011 
2012 		rx_bufs_used++;
2013 
2014 		if (qdf_likely(peer)) {
2015 			vdev = peer->vdev;
2016 		} else {
2017 			nbuf->next = NULL;
2018 			dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2019 			nbuf = next;
2020 			continue;
2021 		}
2022 
2023 		if (qdf_unlikely(!vdev)) {
2024 			qdf_nbuf_free(nbuf);
2025 			nbuf = next;
2026 			DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
2027 			dp_peer_unref_del_find_by_id(peer);
2028 			continue;
2029 		}
2030 
2031 		rx_pdev = vdev->pdev;
2032 		DP_RX_TID_SAVE(nbuf, tid);
2033 		if (qdf_unlikely(rx_pdev->delay_stats_flag))
2034 			qdf_nbuf_set_timestamp(nbuf);
2035 
2036 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
2037 		tid_stats =
2038 			&rx_pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
2039 
2040 		/*
2041 		 * Check if DMA completed -- msdu_done is the last bit
2042 		 * to be written
2043 		 */
2044 		if (qdf_unlikely(!qdf_nbuf_is_raw_frame(nbuf) &&
2045 				 !hal_rx_attn_msdu_done_get(rx_tlv_hdr))) {
2046 			dp_err("MSDU DONE failure");
2047 			DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
2048 			hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
2049 					     QDF_TRACE_LEVEL_INFO);
2050 			tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
2051 			qdf_nbuf_free(nbuf);
2052 			qdf_assert(0);
2053 			nbuf = next;
2054 			continue;
2055 		}
2056 
2057 		DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
2058 		/*
2059 		 * First IF condition:
2060 		 * 802.11 Fragmented pkts are reinjected to REO
2061 		 * HW block as SG pkts and for these pkts we only
2062 		 * need to pull the RX TLVS header length.
2063 		 * Second IF condition:
2064 		 * The below condition happens when an MSDU is spread
2065 		 * across multiple buffers. This can happen in two cases
2066 		 * 1. The nbuf size is smaller then the received msdu.
2067 		 *    ex: we have set the nbuf size to 2048 during
2068 		 *        nbuf_alloc. but we received an msdu which is
2069 		 *        2304 bytes in size then this msdu is spread
2070 		 *        across 2 nbufs.
2071 		 *
2072 		 * 2. AMSDUs when RAW mode is enabled.
2073 		 *    ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
2074 		 *        across 1st nbuf and 2nd nbuf and last MSDU is
2075 		 *        spread across 2nd nbuf and 3rd nbuf.
2076 		 *
2077 		 * for these scenarios let us create a skb frag_list and
2078 		 * append these buffers till the last MSDU of the AMSDU
2079 		 * Third condition:
2080 		 * This is the most likely case, we receive 802.3 pkts
2081 		 * decapsulated by HW, here we need to set the pkt length.
2082 		 */
2083 		if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
2084 			bool is_mcbc, is_sa_vld, is_da_vld;
2085 
2086 			is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
2087 								 rx_tlv_hdr);
2088 			is_sa_vld =
2089 				hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
2090 								rx_tlv_hdr);
2091 			is_da_vld =
2092 				hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
2093 								rx_tlv_hdr);
2094 
2095 			qdf_nbuf_set_da_mcbc(nbuf, is_mcbc);
2096 			qdf_nbuf_set_da_valid(nbuf, is_da_vld);
2097 			qdf_nbuf_set_sa_valid(nbuf, is_sa_vld);
2098 
2099 			qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
2100 		} else if (qdf_nbuf_is_raw_frame(nbuf)) {
2101 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
2102 			nbuf = dp_rx_sg_create(nbuf, rx_tlv_hdr);
2103 
2104 			DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
2105 			DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len);
2106 
2107 			next = nbuf->next;
2108 		} else {
2109 			l2_hdr_offset =
2110 				hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
2111 								   rx_tlv_hdr);
2112 
2113 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
2114 			pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
2115 
2116 			qdf_nbuf_set_pktlen(nbuf, pkt_len);
2117 			qdf_nbuf_pull_head(nbuf,
2118 					   RX_PKT_TLVS_LEN +
2119 					   l2_hdr_offset);
2120 		}
2121 
2122 		/*
2123 		 * process frame for mulitpass phrase processing
2124 		 */
2125 		if (qdf_unlikely(vdev->multipass_en)) {
2126 			dp_rx_multipass_process(peer, nbuf, tid);
2127 		}
2128 
2129 		if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
2130 			QDF_TRACE(QDF_MODULE_ID_DP,
2131 					QDF_TRACE_LEVEL_ERROR,
2132 					FL("Policy Check Drop pkt"));
2133 			tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
2134 			/* Drop & free packet */
2135 			qdf_nbuf_free(nbuf);
2136 			/* Statistics */
2137 			nbuf = next;
2138 			dp_peer_unref_del_find_by_id(peer);
2139 			continue;
2140 		}
2141 
2142 		if (qdf_unlikely(peer && (peer->nawds_enabled) &&
2143 				 (qdf_nbuf_is_da_mcbc(nbuf)) &&
2144 				 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc,
2145 								rx_tlv_hdr) ==
2146 				  false))) {
2147 			tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
2148 			DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
2149 			qdf_nbuf_free(nbuf);
2150 			nbuf = next;
2151 			dp_peer_unref_del_find_by_id(peer);
2152 			continue;
2153 		}
2154 
2155 		if (soc->process_rx_status)
2156 			dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
2157 
2158 		/* Update the protocol tag in SKB based on CCE metadata */
2159 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
2160 					  reo_ring_num, false, true);
2161 
2162 		/* Update the flow tag in SKB based on FSE metadata */
2163 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
2164 
2165 		dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer,
2166 					ring_id, tid_stats);
2167 
2168 		if (qdf_unlikely(vdev->mesh_vdev)) {
2169 			if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
2170 					== QDF_STATUS_SUCCESS) {
2171 				QDF_TRACE(QDF_MODULE_ID_DP,
2172 						QDF_TRACE_LEVEL_INFO_MED,
2173 						FL("mesh pkt filtered"));
2174 				tid_stats->fail_cnt[MESH_FILTER_DROP]++;
2175 				DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
2176 					     1);
2177 
2178 				qdf_nbuf_free(nbuf);
2179 				nbuf = next;
2180 				dp_peer_unref_del_find_by_id(peer);
2181 				continue;
2182 			}
2183 			dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
2184 		}
2185 
2186 		if (qdf_likely(vdev->rx_decap_type ==
2187 			       htt_cmn_pkt_type_ethernet) &&
2188 		    qdf_likely(!vdev->mesh_vdev)) {
2189 			/* WDS Destination Address Learning */
2190 			dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf);
2191 
2192 			/* Due to HW issue, sometimes we see that the sa_idx
2193 			 * and da_idx are invalid with sa_valid and da_valid
2194 			 * bits set
2195 			 *
2196 			 * in this case we also see that value of
2197 			 * sa_sw_peer_id is set as 0
2198 			 *
2199 			 * Drop the packet if sa_idx and da_idx OOB or
2200 			 * sa_sw_peerid is 0
2201 			 */
2202 			if (!is_sa_da_idx_valid(soc, rx_tlv_hdr, nbuf)) {
2203 				qdf_nbuf_free(nbuf);
2204 				nbuf = next;
2205 				DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
2206 				dp_peer_unref_del_find_by_id(peer);
2207 				continue;
2208 			}
2209 			/* WDS Source Port Learning */
2210 			if (qdf_likely(vdev->wds_enabled))
2211 				dp_rx_wds_srcport_learn(soc, rx_tlv_hdr,
2212 							peer, nbuf);
2213 
2214 			/* Intrabss-fwd */
2215 			if (dp_rx_check_ap_bridge(vdev))
2216 				if (dp_rx_intrabss_fwd(soc,
2217 							peer,
2218 							rx_tlv_hdr,
2219 							nbuf)) {
2220 					nbuf = next;
2221 					dp_peer_unref_del_find_by_id(peer);
2222 					tid_stats->intrabss_cnt++;
2223 					continue; /* Get next desc */
2224 				}
2225 		}
2226 
2227 		dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt);
2228 
2229 		DP_RX_LIST_APPEND(deliver_list_head,
2230 				  deliver_list_tail,
2231 				  nbuf);
2232 		DP_STATS_INC_PKT(peer, rx.to_stack, 1,
2233 				 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
2234 
2235 		tid_stats->delivered_to_stack++;
2236 		nbuf = next;
2237 		dp_peer_unref_del_find_by_id(peer);
2238 	}
2239 
2240 	if (qdf_likely(deliver_list_head)) {
2241 		if (qdf_likely(peer))
2242 			dp_rx_deliver_to_stack(vdev, peer, deliver_list_head,
2243 					       deliver_list_tail);
2244 		else {
2245 			nbuf = deliver_list_head;
2246 			while (nbuf) {
2247 				next = nbuf->next;
2248 				nbuf->next = NULL;
2249 				dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2250 				nbuf = next;
2251 			}
2252 		}
2253 	}
2254 
2255 	if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
2256 		if (quota) {
2257 			num_pending =
2258 				dp_rx_srng_get_num_pending(hal_soc,
2259 							   hal_ring_hdl,
2260 							   num_entries,
2261 							   &near_full);
2262 			if (num_pending) {
2263 				DP_STATS_INC(soc, rx.hp_oos2, 1);
2264 
2265 				if (!hif_exec_should_yield(scn, intr_id))
2266 					goto more_data;
2267 
2268 				if (qdf_unlikely(near_full)) {
2269 					DP_STATS_INC(soc, rx.near_full, 1);
2270 					goto more_data;
2271 				}
2272 			}
2273 		}
2274 
2275 		if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) {
2276 			vdev->osif_gro_flush(vdev->osif_vdev,
2277 					     reo_ring_num);
2278 		}
2279 	}
2280 
2281 	/* Update histogram statistics by looping through pdev's */
2282 	DP_RX_HIST_STATS_PER_PDEV();
2283 
2284 	return rx_bufs_used; /* Assume no scale factor for now */
2285 }
2286 
2287 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
2288 {
2289 	QDF_STATUS ret;
2290 
2291 	if (vdev->osif_rx_flush) {
2292 		ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id);
2293 		if (!ret) {
2294 			dp_err("Failed to flush rx pkts for vdev %d\n",
2295 			       vdev->vdev_id);
2296 			return ret;
2297 		}
2298 	}
2299 
2300 	return QDF_STATUS_SUCCESS;
2301 }
2302 
2303 /**
2304  * dp_rx_pdev_detach() - detach dp rx
2305  * @pdev: core txrx pdev context
2306  *
2307  * This function will detach DP RX into main device context
2308  * will free DP Rx resources.
2309  *
2310  * Return: void
2311  */
2312 void
2313 dp_rx_pdev_detach(struct dp_pdev *pdev)
2314 {
2315 	uint8_t pdev_id = pdev->pdev_id;
2316 	struct dp_soc *soc = pdev->soc;
2317 	struct rx_desc_pool *rx_desc_pool;
2318 
2319 	rx_desc_pool = &soc->rx_desc_buf[pdev_id];
2320 
2321 	if (rx_desc_pool->pool_size != 0) {
2322 		if (!dp_is_soc_reinit(soc))
2323 			dp_rx_desc_nbuf_and_pool_free(soc, pdev_id,
2324 						      rx_desc_pool);
2325 		else
2326 			dp_rx_desc_nbuf_free(soc, rx_desc_pool);
2327 	}
2328 
2329 	return;
2330 }
2331 
2332 static QDF_STATUS
2333 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, qdf_nbuf_t *nbuf,
2334 			   struct dp_pdev *dp_pdev)
2335 {
2336 	qdf_dma_addr_t paddr;
2337 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
2338 
2339 	*nbuf = qdf_nbuf_alloc(dp_soc->osdev, RX_BUFFER_SIZE,
2340 			      RX_BUFFER_RESERVATION, RX_BUFFER_ALIGNMENT,
2341 			      FALSE);
2342 	if (!(*nbuf)) {
2343 		dp_err("nbuf alloc failed");
2344 		DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
2345 		return ret;
2346 	}
2347 
2348 	ret = qdf_nbuf_map_single(dp_soc->osdev, *nbuf,
2349 				  QDF_DMA_FROM_DEVICE);
2350 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
2351 		qdf_nbuf_free(*nbuf);
2352 		dp_err("nbuf map failed");
2353 		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
2354 		return ret;
2355 	}
2356 
2357 	paddr = qdf_nbuf_get_frag_paddr(*nbuf, 0);
2358 
2359 	ret = check_x86_paddr(dp_soc, nbuf, &paddr, dp_pdev);
2360 	if (ret == QDF_STATUS_E_FAILURE) {
2361 		qdf_nbuf_unmap_single(dp_soc->osdev, *nbuf,
2362 				      QDF_DMA_FROM_DEVICE);
2363 		qdf_nbuf_free(*nbuf);
2364 		dp_err("nbuf check x86 failed");
2365 		DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
2366 		return ret;
2367 	}
2368 
2369 	return QDF_STATUS_SUCCESS;
2370 }
2371 
2372 QDF_STATUS
2373 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
2374 			  struct dp_srng *dp_rxdma_srng,
2375 			  struct rx_desc_pool *rx_desc_pool,
2376 			  uint32_t num_req_buffers)
2377 {
2378 	struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id);
2379 	hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng;
2380 	union dp_rx_desc_list_elem_t *next;
2381 	void *rxdma_ring_entry;
2382 	qdf_dma_addr_t paddr;
2383 	qdf_nbuf_t *rx_nbuf_arr;
2384 	uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0;
2385 	uint32_t buffer_index, nbuf_ptrs_per_page;
2386 	qdf_nbuf_t nbuf;
2387 	QDF_STATUS ret;
2388 	int page_idx, total_pages;
2389 	union dp_rx_desc_list_elem_t *desc_list = NULL;
2390 	union dp_rx_desc_list_elem_t *tail = NULL;
2391 
2392 	if (qdf_unlikely(!rxdma_srng)) {
2393 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
2394 		return QDF_STATUS_E_FAILURE;
2395 	}
2396 
2397 	dp_debug("requested %u RX buffers for driver attach", num_req_buffers);
2398 
2399 	nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool,
2400 					    num_req_buffers, &desc_list, &tail);
2401 	if (!nr_descs) {
2402 		dp_err("no free rx_descs in freelist");
2403 		DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
2404 		return QDF_STATUS_E_NOMEM;
2405 	}
2406 
2407 	dp_debug("got %u RX descs for driver attach", nr_descs);
2408 
2409 	/*
2410 	 * Try to allocate pointers to the nbuf one page at a time.
2411 	 * Take pointers that can fit in one page of memory and
2412 	 * iterate through the total descriptors that need to be
2413 	 * allocated in order of pages. Reuse the pointers that
2414 	 * have been allocated to fit in one page across each
2415 	 * iteration to index into the nbuf.
2416 	 */
2417 	total_pages = (nr_descs * sizeof(*rx_nbuf_arr)) / PAGE_SIZE;
2418 
2419 	/*
2420 	 * Add an extra page to store the remainder if any
2421 	 */
2422 	if ((nr_descs * sizeof(*rx_nbuf_arr)) % PAGE_SIZE)
2423 		total_pages++;
2424 	rx_nbuf_arr = qdf_mem_malloc(PAGE_SIZE);
2425 	if (!rx_nbuf_arr) {
2426 		dp_err("failed to allocate nbuf array");
2427 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
2428 		QDF_BUG(0);
2429 		return QDF_STATUS_E_NOMEM;
2430 	}
2431 	nbuf_ptrs_per_page = PAGE_SIZE / sizeof(*rx_nbuf_arr);
2432 
2433 	for (page_idx = 0; page_idx < total_pages; page_idx++) {
2434 		qdf_mem_zero(rx_nbuf_arr, PAGE_SIZE);
2435 
2436 		for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) {
2437 			/*
2438 			 * The last page of buffer pointers may not be required
2439 			 * completely based on the number of descriptors. Below
2440 			 * check will ensure we are allocating only the
2441 			 * required number of descriptors.
2442 			 */
2443 			if (nr_nbuf_total >= nr_descs)
2444 				break;
2445 			ret = dp_pdev_nbuf_alloc_and_map(dp_soc,
2446 							 &rx_nbuf_arr[nr_nbuf],
2447 							 dp_pdev);
2448 			if (QDF_IS_STATUS_ERROR(ret))
2449 				break;
2450 
2451 			nr_nbuf_total++;
2452 		}
2453 
2454 		hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
2455 
2456 		for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) {
2457 			rxdma_ring_entry =
2458 				hal_srng_src_get_next(dp_soc->hal_soc,
2459 						      rxdma_srng);
2460 			qdf_assert_always(rxdma_ring_entry);
2461 
2462 			next = desc_list->next;
2463 			nbuf = rx_nbuf_arr[buffer_index];
2464 			paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
2465 
2466 			dp_rx_desc_prep(&desc_list->rx_desc, nbuf);
2467 			desc_list->rx_desc.in_use = 1;
2468 
2469 			hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
2470 						     desc_list->rx_desc.cookie,
2471 						     rx_desc_pool->owner);
2472 
2473 			dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, nbuf, true);
2474 
2475 			desc_list = next;
2476 		}
2477 
2478 		hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
2479 	}
2480 
2481 	dp_info("filled %u RX buffers for driver attach", nr_nbuf_total);
2482 	qdf_mem_free(rx_nbuf_arr);
2483 
2484 	if (!nr_nbuf_total) {
2485 		dp_err("No nbuf's allocated");
2486 		QDF_BUG(0);
2487 		return QDF_STATUS_E_RESOURCES;
2488 	}
2489 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf,
2490 			 RX_BUFFER_SIZE * nr_nbuf_total);
2491 
2492 	return QDF_STATUS_SUCCESS;
2493 }
2494 
2495 /**
2496  * dp_rx_attach() - attach DP RX
2497  * @pdev: core txrx pdev context
2498  *
2499  * This function will attach a DP RX instance into the main
2500  * device (SOC) context. Will allocate dp rx resource and
2501  * initialize resources.
2502  *
2503  * Return: QDF_STATUS_SUCCESS: success
2504  *         QDF_STATUS_E_RESOURCES: Error return
2505  */
2506 QDF_STATUS
2507 dp_rx_pdev_attach(struct dp_pdev *pdev)
2508 {
2509 	uint8_t pdev_id = pdev->pdev_id;
2510 	struct dp_soc *soc = pdev->soc;
2511 	uint32_t rxdma_entries;
2512 	uint32_t rx_sw_desc_weight;
2513 	struct dp_srng *dp_rxdma_srng;
2514 	struct rx_desc_pool *rx_desc_pool;
2515 	QDF_STATUS ret_val;
2516 
2517 
2518 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
2519 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2520 			  "nss-wifi<4> skip Rx refil %d", pdev_id);
2521 		return QDF_STATUS_SUCCESS;
2522 	}
2523 
2524 	pdev = soc->pdev_list[pdev_id];
2525 	dp_rxdma_srng = &pdev->rx_refill_buf_ring;
2526 	rxdma_entries = dp_rxdma_srng->num_entries;
2527 
2528 	soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
2529 
2530 	rx_desc_pool = &soc->rx_desc_buf[pdev_id];
2531 	rx_sw_desc_weight = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx);
2532 
2533 	dp_rx_desc_pool_alloc(soc, pdev_id,
2534 			      rx_sw_desc_weight * rxdma_entries,
2535 			      rx_desc_pool);
2536 
2537 	rx_desc_pool->owner = DP_WBM2SW_RBM;
2538 	/* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */
2539 
2540 	ret_val = dp_rx_fst_attach(soc, pdev);
2541 	if ((ret_val != QDF_STATUS_SUCCESS) &&
2542 	    (ret_val != QDF_STATUS_E_NOSUPPORT)) {
2543 		QDF_TRACE(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_ERROR,
2544 			  "RX Flow Search Table attach failed: pdev %d err %d",
2545 			  pdev_id, ret_val);
2546 		return ret_val;
2547 	}
2548 
2549 	return dp_pdev_rx_buffers_attach(soc, pdev_id, dp_rxdma_srng,
2550 					 rx_desc_pool, rxdma_entries - 1);
2551 }
2552 
2553 /*
2554  * dp_rx_nbuf_prepare() - prepare RX nbuf
2555  * @soc: core txrx main context
2556  * @pdev: core txrx pdev context
2557  *
2558  * This function alloc & map nbuf for RX dma usage, retry it if failed
2559  * until retry times reaches max threshold or succeeded.
2560  *
2561  * Return: qdf_nbuf_t pointer if succeeded, NULL if failed.
2562  */
2563 qdf_nbuf_t
2564 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev)
2565 {
2566 	uint8_t *buf;
2567 	int32_t nbuf_retry_count;
2568 	QDF_STATUS ret;
2569 	qdf_nbuf_t nbuf = NULL;
2570 
2571 	for (nbuf_retry_count = 0; nbuf_retry_count <
2572 		QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD;
2573 			nbuf_retry_count++) {
2574 		/* Allocate a new skb */
2575 		nbuf = qdf_nbuf_alloc(soc->osdev,
2576 					RX_BUFFER_SIZE,
2577 					RX_BUFFER_RESERVATION,
2578 					RX_BUFFER_ALIGNMENT,
2579 					FALSE);
2580 
2581 		if (!nbuf) {
2582 			DP_STATS_INC(pdev,
2583 				replenish.nbuf_alloc_fail, 1);
2584 			continue;
2585 		}
2586 
2587 		buf = qdf_nbuf_data(nbuf);
2588 
2589 		memset(buf, 0, RX_BUFFER_SIZE);
2590 
2591 		ret = qdf_nbuf_map_single(soc->osdev, nbuf,
2592 				    QDF_DMA_FROM_DEVICE);
2593 
2594 		/* nbuf map failed */
2595 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
2596 			qdf_nbuf_free(nbuf);
2597 			DP_STATS_INC(pdev, replenish.map_err, 1);
2598 			continue;
2599 		}
2600 		/* qdf_nbuf alloc and map succeeded */
2601 		break;
2602 	}
2603 
2604 	/* qdf_nbuf still alloc or map failed */
2605 	if (qdf_unlikely(nbuf_retry_count >=
2606 			QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD))
2607 		return NULL;
2608 
2609 	return nbuf;
2610 }
2611