xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.c (revision 297e63ab482d9b116c13ca2c7546ae532e27e27a)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "dp_types.h"
21 #include "dp_rx.h"
22 #include "dp_peer.h"
23 #include "hal_rx.h"
24 #include "hal_api.h"
25 #include "qdf_nbuf.h"
26 #ifdef MESH_MODE_SUPPORT
27 #include "if_meta_hdr.h"
28 #endif
29 #include "dp_internal.h"
30 #include "dp_rx_mon.h"
31 #include "dp_ipa.h"
32 #ifdef FEATURE_WDS
33 #include "dp_txrx_wds.h"
34 #endif
35 
36 #ifdef ATH_RX_PRI_SAVE
37 #define DP_RX_TID_SAVE(_nbuf, _tid) \
38 	(qdf_nbuf_set_priority(_nbuf, _tid))
39 #else
40 #define DP_RX_TID_SAVE(_nbuf, _tid)
41 #endif
42 
43 #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING
44 static inline
45 bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
46 {
47 	if (ta_peer->vdev->opmode == wlan_op_mode_ndi &&
48 	    qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) {
49 		DP_STATS_INC(ta_peer, rx.intra_bss.mdns_no_fwd, 1);
50 		return false;
51 	}
52 		return true;
53 }
54 #else
55 static inline
56 bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
57 {
58 	return true;
59 }
60 #endif
61 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
62 {
63 	return vdev->ap_bridge_enabled;
64 }
65 
66 #ifdef DUP_RX_DESC_WAR
67 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
68 				hal_ring_handle_t hal_ring,
69 				hal_ring_desc_t ring_desc,
70 				struct dp_rx_desc *rx_desc)
71 {
72 	void *hal_soc = soc->hal_soc;
73 
74 	hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc);
75 	dp_rx_desc_dump(rx_desc);
76 }
77 #else
78 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
79 				hal_ring_handle_t hal_ring_hdl,
80 				hal_ring_desc_t ring_desc,
81 				struct dp_rx_desc *rx_desc)
82 {
83 	hal_soc_handle_t hal_soc = soc->hal_soc;
84 
85 	dp_rx_desc_dump(rx_desc);
86 	hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc);
87 	hal_srng_dump_ring(hal_soc, hal_ring_hdl);
88 	qdf_assert_always(0);
89 }
90 #endif
91 
92 #ifdef RX_DESC_SANITY_WAR
93 static inline
94 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
95 			     hal_ring_handle_t hal_ring_hdl,
96 			     hal_ring_desc_t ring_desc,
97 			     struct dp_rx_desc *rx_desc)
98 {
99 	uint8_t return_buffer_manager;
100 
101 	if (qdf_unlikely(!rx_desc)) {
102 		/*
103 		 * This is an unlikely case where the cookie obtained
104 		 * from the ring_desc is invalid and hence we are not
105 		 * able to find the corresponding rx_desc
106 		 */
107 		goto fail;
108 	}
109 
110 	return_buffer_manager = hal_rx_ret_buf_manager_get(ring_desc);
111 	if (qdf_unlikely(!(return_buffer_manager == HAL_RX_BUF_RBM_SW1_BM ||
112 			 return_buffer_manager == HAL_RX_BUF_RBM_SW3_BM))) {
113 		goto fail;
114 	}
115 
116 	return QDF_STATUS_SUCCESS;
117 
118 fail:
119 	DP_STATS_INC(soc, rx.err.invalid_cookie, 1);
120 	dp_err("Ring Desc:");
121 	hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl,
122 				ring_desc);
123 	return QDF_STATUS_E_NULL_VALUE;
124 
125 }
126 #else
127 static inline
128 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
129 			     hal_ring_handle_t hal_ring_hdl,
130 			     hal_ring_desc_t ring_desc,
131 			     struct dp_rx_desc *rx_desc)
132 {
133 	return QDF_STATUS_SUCCESS;
134 }
135 #endif
136 
137 /*
138  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
139  *			       called during dp rx initialization
140  *			       and at the end of dp_rx_process.
141  *
142  * @soc: core txrx main context
143  * @mac_id: mac_id which is one of 3 mac_ids
144  * @dp_rxdma_srng: dp rxdma circular ring
145  * @rx_desc_pool: Pointer to free Rx descriptor pool
146  * @num_req_buffers: number of buffer to be replenished
147  * @desc_list: list of descs if called from dp_rx_process
148  *	       or NULL during dp rx initialization or out of buffer
149  *	       interrupt.
150  * @tail: tail of descs list
151  * Return: return success or failure
152  */
153 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
154 				struct dp_srng *dp_rxdma_srng,
155 				struct rx_desc_pool *rx_desc_pool,
156 				uint32_t num_req_buffers,
157 				union dp_rx_desc_list_elem_t **desc_list,
158 				union dp_rx_desc_list_elem_t **tail)
159 {
160 	uint32_t num_alloc_desc;
161 	uint16_t num_desc_to_free = 0;
162 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
163 	uint32_t num_entries_avail;
164 	uint32_t count;
165 	int sync_hw_ptr = 1;
166 	qdf_dma_addr_t paddr;
167 	qdf_nbuf_t rx_netbuf;
168 	void *rxdma_ring_entry;
169 	union dp_rx_desc_list_elem_t *next;
170 	QDF_STATUS ret;
171 	uint16_t buf_size = rx_desc_pool->buf_size;
172 	uint8_t buf_alignment = rx_desc_pool->buf_alignment;
173 
174 	void *rxdma_srng;
175 
176 	rxdma_srng = dp_rxdma_srng->hal_srng;
177 
178 	if (!rxdma_srng) {
179 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
180 				  "rxdma srng not initialized");
181 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
182 		return QDF_STATUS_E_FAILURE;
183 	}
184 
185 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
186 		"requested %d buffers for replenish", num_req_buffers);
187 
188 	hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
189 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
190 						   rxdma_srng,
191 						   sync_hw_ptr);
192 
193 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
194 		"no of available entries in rxdma ring: %d",
195 		num_entries_avail);
196 
197 	if (!(*desc_list) && (num_entries_avail >
198 		((dp_rxdma_srng->num_entries * 3) / 4))) {
199 		num_req_buffers = num_entries_avail;
200 	} else if (num_entries_avail < num_req_buffers) {
201 		num_desc_to_free = num_req_buffers - num_entries_avail;
202 		num_req_buffers = num_entries_avail;
203 	}
204 
205 	if (qdf_unlikely(!num_req_buffers)) {
206 		num_desc_to_free = num_req_buffers;
207 		hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
208 		goto free_descs;
209 	}
210 
211 	/*
212 	 * if desc_list is NULL, allocate the descs from freelist
213 	 */
214 	if (!(*desc_list)) {
215 		num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
216 							  rx_desc_pool,
217 							  num_req_buffers,
218 							  desc_list,
219 							  tail);
220 
221 		if (!num_alloc_desc) {
222 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
223 				"no free rx_descs in freelist");
224 			DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
225 					num_req_buffers);
226 			hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
227 			return QDF_STATUS_E_NOMEM;
228 		}
229 
230 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
231 			"%d rx desc allocated", num_alloc_desc);
232 		num_req_buffers = num_alloc_desc;
233 	}
234 
235 
236 	count = 0;
237 
238 	while (count < num_req_buffers) {
239 		rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
240 					buf_size,
241 					RX_BUFFER_RESERVATION,
242 					buf_alignment,
243 					FALSE);
244 
245 		if (qdf_unlikely(!rx_netbuf)) {
246 			DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
247 			break;
248 		}
249 
250 		ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, rx_netbuf,
251 						 QDF_DMA_FROM_DEVICE, buf_size);
252 
253 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
254 			qdf_nbuf_free(rx_netbuf);
255 			DP_STATS_INC(dp_pdev, replenish.map_err, 1);
256 			continue;
257 		}
258 
259 		paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
260 
261 		dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, rx_netbuf, true);
262 		/*
263 		 * check if the physical address of nbuf->data is
264 		 * less then 0x50000000 then free the nbuf and try
265 		 * allocating new nbuf. We can try for 100 times.
266 		 * this is a temp WAR till we fix it properly.
267 		 */
268 		ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, rx_desc_pool);
269 		if (ret == QDF_STATUS_E_FAILURE) {
270 			DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
271 			break;
272 		}
273 
274 		count++;
275 
276 		rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
277 							 rxdma_srng);
278 		qdf_assert_always(rxdma_ring_entry);
279 
280 		next = (*desc_list)->next;
281 
282 		dp_rx_desc_prep(&((*desc_list)->rx_desc), rx_netbuf);
283 
284 		/* rx_desc.in_use should be zero at this time*/
285 		qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
286 
287 		(*desc_list)->rx_desc.in_use = 1;
288 
289 		dp_verbose_debug("rx_netbuf=%pK, buf=%pK, paddr=0x%llx, cookie=%d",
290 				 rx_netbuf, qdf_nbuf_data(rx_netbuf),
291 				 (unsigned long long)paddr,
292 				 (*desc_list)->rx_desc.cookie);
293 
294 		hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
295 						(*desc_list)->rx_desc.cookie,
296 						rx_desc_pool->owner);
297 
298 		*desc_list = next;
299 
300 	}
301 
302 	hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
303 
304 	dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
305 			 count, num_desc_to_free);
306 
307 	/* No need to count the number of bytes received during replenish.
308 	 * Therefore set replenish.pkts.bytes as 0.
309 	 */
310 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
311 
312 free_descs:
313 	DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
314 	/*
315 	 * add any available free desc back to the free list
316 	 */
317 	if (*desc_list)
318 		dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
319 			mac_id, rx_desc_pool);
320 
321 	return QDF_STATUS_SUCCESS;
322 }
323 
324 /*
325  * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
326  *				pkts to RAW mode simulation to
327  *				decapsulate the pkt.
328  *
329  * @vdev: vdev on which RAW mode is enabled
330  * @nbuf_list: list of RAW pkts to process
331  * @peer: peer object from which the pkt is rx
332  *
333  * Return: void
334  */
335 void
336 dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
337 					struct dp_peer *peer)
338 {
339 	qdf_nbuf_t deliver_list_head = NULL;
340 	qdf_nbuf_t deliver_list_tail = NULL;
341 	qdf_nbuf_t nbuf;
342 
343 	nbuf = nbuf_list;
344 	while (nbuf) {
345 		qdf_nbuf_t next = qdf_nbuf_next(nbuf);
346 
347 		DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
348 
349 		DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
350 		DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf));
351 		/*
352 		 * reset the chfrag_start and chfrag_end bits in nbuf cb
353 		 * as this is a non-amsdu pkt and RAW mode simulation expects
354 		 * these bit s to be 0 for non-amsdu pkt.
355 		 */
356 		if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
357 			 qdf_nbuf_is_rx_chfrag_end(nbuf)) {
358 			qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
359 			qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
360 		}
361 
362 		nbuf = next;
363 	}
364 
365 	vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
366 				 &deliver_list_tail, peer->mac_addr.raw);
367 
368 	vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
369 }
370 
371 
372 #ifdef DP_LFR
373 /*
374  * In case of LFR, data of a new peer might be sent up
375  * even before peer is added.
376  */
377 static inline struct dp_vdev *
378 dp_get_vdev_from_peer(struct dp_soc *soc,
379 			uint16_t peer_id,
380 			struct dp_peer *peer,
381 			struct hal_rx_mpdu_desc_info mpdu_desc_info)
382 {
383 	struct dp_vdev *vdev;
384 	uint8_t vdev_id;
385 
386 	if (unlikely(!peer)) {
387 		if (peer_id != HTT_INVALID_PEER) {
388 			vdev_id = DP_PEER_METADATA_VDEV_ID_GET(
389 					mpdu_desc_info.peer_meta_data);
390 			QDF_TRACE(QDF_MODULE_ID_DP,
391 				QDF_TRACE_LEVEL_DEBUG,
392 				FL("PeerID %d not found use vdevID %d"),
393 				peer_id, vdev_id);
394 			vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
395 								  vdev_id);
396 		} else {
397 			QDF_TRACE(QDF_MODULE_ID_DP,
398 				QDF_TRACE_LEVEL_DEBUG,
399 				FL("Invalid PeerID %d"),
400 				peer_id);
401 			return NULL;
402 		}
403 	} else {
404 		vdev = peer->vdev;
405 	}
406 	return vdev;
407 }
408 #else
409 static inline struct dp_vdev *
410 dp_get_vdev_from_peer(struct dp_soc *soc,
411 			uint16_t peer_id,
412 			struct dp_peer *peer,
413 			struct hal_rx_mpdu_desc_info mpdu_desc_info)
414 {
415 	if (unlikely(!peer)) {
416 		QDF_TRACE(QDF_MODULE_ID_DP,
417 			QDF_TRACE_LEVEL_DEBUG,
418 			FL("Peer not found for peerID %d"),
419 			peer_id);
420 		return NULL;
421 	} else {
422 		return peer->vdev;
423 	}
424 }
425 #endif
426 
427 #ifndef FEATURE_WDS
428 static void
429 dp_rx_da_learn(struct dp_soc *soc,
430 	       uint8_t *rx_tlv_hdr,
431 	       struct dp_peer *ta_peer,
432 	       qdf_nbuf_t nbuf)
433 {
434 }
435 #endif
436 /*
437  * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
438  *
439  * @soc: core txrx main context
440  * @ta_peer	: source peer entry
441  * @rx_tlv_hdr	: start address of rx tlvs
442  * @nbuf	: nbuf that has to be intrabss forwarded
443  *
444  * Return: bool: true if it is forwarded else false
445  */
446 static bool
447 dp_rx_intrabss_fwd(struct dp_soc *soc,
448 			struct dp_peer *ta_peer,
449 			uint8_t *rx_tlv_hdr,
450 			qdf_nbuf_t nbuf,
451 			struct hal_rx_msdu_metadata msdu_metadata)
452 {
453 	uint16_t len;
454 	uint8_t is_frag;
455 	struct dp_peer *da_peer;
456 	struct dp_ast_entry *ast_entry;
457 	qdf_nbuf_t nbuf_copy;
458 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
459 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
460 	struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
461 					tid_stats.tid_rx_stats[ring_id][tid];
462 
463 	/* check if the destination peer is available in peer table
464 	 * and also check if the source peer and destination peer
465 	 * belong to the same vap and destination peer is not bss peer.
466 	 */
467 
468 	if ((qdf_nbuf_is_da_valid(nbuf) && !qdf_nbuf_is_da_mcbc(nbuf))) {
469 
470 		ast_entry = soc->ast_table[msdu_metadata.da_idx];
471 		if (!ast_entry)
472 			return false;
473 
474 		if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) {
475 			ast_entry->is_active = TRUE;
476 			return false;
477 		}
478 
479 		da_peer = ast_entry->peer;
480 
481 		if (!da_peer)
482 			return false;
483 		/* TA peer cannot be same as peer(DA) on which AST is present
484 		 * this indicates a change in topology and that AST entries
485 		 * are yet to be updated.
486 		 */
487 		if (da_peer == ta_peer)
488 			return false;
489 
490 		if (da_peer->vdev == ta_peer->vdev && !da_peer->bss_peer) {
491 			len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
492 			is_frag = qdf_nbuf_is_frag(nbuf);
493 			memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
494 
495 			/* linearize the nbuf just before we send to
496 			 * dp_tx_send()
497 			 */
498 			if (qdf_unlikely(is_frag)) {
499 				if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
500 					return false;
501 
502 				nbuf = qdf_nbuf_unshare(nbuf);
503 				if (!nbuf) {
504 					DP_STATS_INC_PKT(ta_peer,
505 							 rx.intra_bss.fail,
506 							 1,
507 							 len);
508 					/* return true even though the pkt is
509 					 * not forwarded. Basically skb_unshare
510 					 * failed and we want to continue with
511 					 * next nbuf.
512 					 */
513 					tid_stats->fail_cnt[INTRABSS_DROP]++;
514 					return true;
515 				}
516 			}
517 
518 			if (!dp_tx_send((struct cdp_soc_t *)soc,
519 					ta_peer->vdev->vdev_id, nbuf)) {
520 				DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
521 						 len);
522 				return true;
523 			} else {
524 				DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
525 						len);
526 				tid_stats->fail_cnt[INTRABSS_DROP]++;
527 				return false;
528 			}
529 		}
530 	}
531 	/* if it is a broadcast pkt (eg: ARP) and it is not its own
532 	 * source, then clone the pkt and send the cloned pkt for
533 	 * intra BSS forwarding and original pkt up the network stack
534 	 * Note: how do we handle multicast pkts. do we forward
535 	 * all multicast pkts as is or let a higher layer module
536 	 * like igmpsnoop decide whether to forward or not with
537 	 * Mcast enhancement.
538 	 */
539 	else if (qdf_unlikely((qdf_nbuf_is_da_mcbc(nbuf) &&
540 			       !ta_peer->bss_peer))) {
541 		if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
542 			goto end;
543 
544 		nbuf_copy = qdf_nbuf_copy(nbuf);
545 		if (!nbuf_copy)
546 			goto end;
547 
548 		len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
549 		memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
550 
551 		/* Set cb->ftype to intrabss FWD */
552 		qdf_nbuf_set_tx_ftype(nbuf_copy, CB_FTYPE_INTRABSS_FWD);
553 		if (dp_tx_send((struct cdp_soc_t *)soc,
554 			       ta_peer->vdev->vdev_id, nbuf_copy)) {
555 			DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
556 			tid_stats->fail_cnt[INTRABSS_DROP]++;
557 			qdf_nbuf_free(nbuf_copy);
558 		} else {
559 			DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
560 			tid_stats->intrabss_cnt++;
561 		}
562 	}
563 
564 end:
565 	/* return false as we have to still send the original pkt
566 	 * up the stack
567 	 */
568 	return false;
569 }
570 
571 #ifdef MESH_MODE_SUPPORT
572 
573 /**
574  * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
575  *
576  * @vdev: DP Virtual device handle
577  * @nbuf: Buffer pointer
578  * @rx_tlv_hdr: start of rx tlv header
579  * @peer: pointer to peer
580  *
581  * This function allocated memory for mesh receive stats and fill the
582  * required stats. Stores the memory address in skb cb.
583  *
584  * Return: void
585  */
586 
587 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
588 				uint8_t *rx_tlv_hdr, struct dp_peer *peer)
589 {
590 	struct mesh_recv_hdr_s *rx_info = NULL;
591 	uint32_t pkt_type;
592 	uint32_t nss;
593 	uint32_t rate_mcs;
594 	uint32_t bw;
595 	uint8_t primary_chan_num;
596 	uint32_t center_chan_freq;
597 	struct dp_soc *soc;
598 
599 	/* fill recv mesh stats */
600 	rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
601 
602 	/* upper layers are resposible to free this memory */
603 
604 	if (!rx_info) {
605 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
606 			"Memory allocation failed for mesh rx stats");
607 		DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
608 		return;
609 	}
610 
611 	rx_info->rs_flags = MESH_RXHDR_VER1;
612 	if (qdf_nbuf_is_rx_chfrag_start(nbuf))
613 		rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
614 
615 	if (qdf_nbuf_is_rx_chfrag_end(nbuf))
616 		rx_info->rs_flags |= MESH_RX_LAST_MSDU;
617 
618 	if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) {
619 		rx_info->rs_flags |= MESH_RX_DECRYPTED;
620 		rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr);
621 		if (vdev->osif_get_key)
622 			vdev->osif_get_key(vdev->osif_vdev,
623 					&rx_info->rs_decryptkey[0],
624 					&peer->mac_addr.raw[0],
625 					rx_info->rs_keyix);
626 	}
627 
628 	rx_info->rs_rssi = hal_rx_msdu_start_get_rssi(rx_tlv_hdr);
629 
630 	soc = vdev->pdev->soc;
631 	primary_chan_num = hal_rx_msdu_start_get_freq(rx_tlv_hdr);
632 	center_chan_freq = hal_rx_msdu_start_get_freq(rx_tlv_hdr) >> 16;
633 
634 	if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) {
635 		rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band(
636 							soc->ctrl_psoc,
637 							vdev->pdev->pdev_id,
638 							center_chan_freq);
639 	}
640 	rx_info->rs_channel = primary_chan_num;
641 	pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
642 	rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
643 	bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
644 	nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr);
645 	rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
646 				(bw << 24);
647 
648 	qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
649 
650 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
651 		FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x"),
652 						rx_info->rs_flags,
653 						rx_info->rs_rssi,
654 						rx_info->rs_channel,
655 						rx_info->rs_ratephy1,
656 						rx_info->rs_keyix);
657 
658 }
659 
660 /**
661  * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
662  *
663  * @vdev: DP Virtual device handle
664  * @nbuf: Buffer pointer
665  * @rx_tlv_hdr: start of rx tlv header
666  *
667  * This checks if the received packet is matching any filter out
668  * catogery and and drop the packet if it matches.
669  *
670  * Return: status(0 indicates drop, 1 indicate to no drop)
671  */
672 
673 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
674 					uint8_t *rx_tlv_hdr)
675 {
676 	union dp_align_mac_addr mac_addr;
677 	struct dp_soc *soc = vdev->pdev->soc;
678 
679 	if (qdf_unlikely(vdev->mesh_rx_filter)) {
680 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
681 			if (hal_rx_mpdu_get_fr_ds(soc->hal_soc,
682 						  rx_tlv_hdr))
683 				return  QDF_STATUS_SUCCESS;
684 
685 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
686 			if (hal_rx_mpdu_get_to_ds(soc->hal_soc,
687 						  rx_tlv_hdr))
688 				return  QDF_STATUS_SUCCESS;
689 
690 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
691 			if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc,
692 						   rx_tlv_hdr) &&
693 			    !hal_rx_mpdu_get_to_ds(soc->hal_soc,
694 						   rx_tlv_hdr))
695 				return  QDF_STATUS_SUCCESS;
696 
697 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
698 			if (hal_rx_mpdu_get_addr1(soc->hal_soc,
699 						  rx_tlv_hdr,
700 					&mac_addr.raw[0]))
701 				return QDF_STATUS_E_FAILURE;
702 
703 			if (!qdf_mem_cmp(&mac_addr.raw[0],
704 					&vdev->mac_addr.raw[0],
705 					QDF_MAC_ADDR_SIZE))
706 				return  QDF_STATUS_SUCCESS;
707 		}
708 
709 		if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
710 			if (hal_rx_mpdu_get_addr2(soc->hal_soc,
711 						  rx_tlv_hdr,
712 						  &mac_addr.raw[0]))
713 				return QDF_STATUS_E_FAILURE;
714 
715 			if (!qdf_mem_cmp(&mac_addr.raw[0],
716 					&vdev->mac_addr.raw[0],
717 					QDF_MAC_ADDR_SIZE))
718 				return  QDF_STATUS_SUCCESS;
719 		}
720 	}
721 
722 	return QDF_STATUS_E_FAILURE;
723 }
724 
725 #else
726 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
727 				uint8_t *rx_tlv_hdr, struct dp_peer *peer)
728 {
729 }
730 
731 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
732 					uint8_t *rx_tlv_hdr)
733 {
734 	return QDF_STATUS_E_FAILURE;
735 }
736 
737 #endif
738 
739 #ifdef FEATURE_NAC_RSSI
740 /**
741  * dp_rx_nac_filter(): Function to perform filtering of non-associated
742  * clients
743  * @pdev: DP pdev handle
744  * @rx_pkt_hdr: Rx packet Header
745  *
746  * return: dp_vdev*
747  */
748 static
749 struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
750 		uint8_t *rx_pkt_hdr)
751 {
752 	struct ieee80211_frame *wh;
753 	struct dp_neighbour_peer *peer = NULL;
754 
755 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
756 
757 	if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
758 		return NULL;
759 
760 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
761 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
762 				neighbour_peer_list_elem) {
763 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
764 				wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) {
765 			QDF_TRACE(
766 				QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
767 				FL("NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x"),
768 				peer->neighbour_peers_macaddr.raw[0],
769 				peer->neighbour_peers_macaddr.raw[1],
770 				peer->neighbour_peers_macaddr.raw[2],
771 				peer->neighbour_peers_macaddr.raw[3],
772 				peer->neighbour_peers_macaddr.raw[4],
773 				peer->neighbour_peers_macaddr.raw[5]);
774 
775 				qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
776 
777 			return pdev->monitor_vdev;
778 		}
779 	}
780 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
781 
782 	return NULL;
783 }
784 
785 /**
786  * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
787  * @soc: DP SOC handle
788  * @mpdu: mpdu for which peer is invalid
789  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
790  * pool_id has same mapping)
791  *
792  * return: integer type
793  */
794 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
795 				   uint8_t mac_id)
796 {
797 	struct dp_invalid_peer_msg msg;
798 	struct dp_vdev *vdev = NULL;
799 	struct dp_pdev *pdev = NULL;
800 	struct ieee80211_frame *wh;
801 	qdf_nbuf_t curr_nbuf, next_nbuf;
802 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
803 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
804 
805 	rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
806 
807 	if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) {
808 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
809 			  "Drop decapped frames");
810 		goto free;
811 	}
812 
813 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
814 
815 	if (!DP_FRAME_IS_DATA(wh)) {
816 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
817 			  "NAWDS valid only for data frames");
818 		goto free;
819 	}
820 
821 	if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
822 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
823 			"Invalid nbuf length");
824 		goto free;
825 	}
826 
827 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
828 
829 	if (!pdev || qdf_unlikely(pdev->is_pdev_down)) {
830 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
831 			  "PDEV %s", !pdev ? "not found" : "down");
832 		goto free;
833 	}
834 
835 	if (pdev->filter_neighbour_peers) {
836 		/* Next Hop scenario not yet handle */
837 		vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
838 		if (vdev) {
839 			dp_rx_mon_deliver(soc, pdev->pdev_id,
840 					  pdev->invalid_peer_head_msdu,
841 					  pdev->invalid_peer_tail_msdu);
842 
843 			pdev->invalid_peer_head_msdu = NULL;
844 			pdev->invalid_peer_tail_msdu = NULL;
845 
846 			return 0;
847 		}
848 	}
849 
850 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
851 
852 		if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
853 				QDF_MAC_ADDR_SIZE) == 0) {
854 			goto out;
855 		}
856 	}
857 
858 	if (!vdev) {
859 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
860 			"VDEV not found");
861 		goto free;
862 	}
863 
864 out:
865 	msg.wh = wh;
866 	qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN);
867 	msg.nbuf = mpdu;
868 	msg.vdev_id = vdev->vdev_id;
869 	if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer)
870 		pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(
871 				(struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc,
872 				pdev->pdev_id, &msg);
873 
874 free:
875 	/* Drop and free packet */
876 	curr_nbuf = mpdu;
877 	while (curr_nbuf) {
878 		next_nbuf = qdf_nbuf_next(curr_nbuf);
879 		qdf_nbuf_free(curr_nbuf);
880 		curr_nbuf = next_nbuf;
881 	}
882 
883 	return 0;
884 }
885 
886 /**
887  * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
888  * @soc: DP SOC handle
889  * @mpdu: mpdu for which peer is invalid
890  * @mpdu_done: if an mpdu is completed
891  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
892  * pool_id has same mapping)
893  *
894  * return: integer type
895  */
896 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
897 					qdf_nbuf_t mpdu, bool mpdu_done,
898 					uint8_t mac_id)
899 {
900 	/* Only trigger the process when mpdu is completed */
901 	if (mpdu_done)
902 		dp_rx_process_invalid_peer(soc, mpdu, mac_id);
903 }
904 #else
905 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
906 				   uint8_t mac_id)
907 {
908 	qdf_nbuf_t curr_nbuf, next_nbuf;
909 	struct dp_pdev *pdev;
910 	struct dp_vdev *vdev = NULL;
911 	struct ieee80211_frame *wh;
912 	uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
913 	uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
914 
915 	wh = (struct ieee80211_frame *)rx_pkt_hdr;
916 
917 	if (!DP_FRAME_IS_DATA(wh)) {
918 		QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP,
919 				   "only for data frames");
920 		goto free;
921 	}
922 
923 	if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
924 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
925 			  "Invalid nbuf length");
926 		goto free;
927 	}
928 
929 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
930 	if (!pdev) {
931 		QDF_TRACE(QDF_MODULE_ID_DP,
932 			  QDF_TRACE_LEVEL_ERROR,
933 			  "PDEV not found");
934 		goto free;
935 	}
936 
937 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
938 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
939 		if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
940 				QDF_MAC_ADDR_SIZE) == 0) {
941 			qdf_spin_unlock_bh(&pdev->vdev_list_lock);
942 			goto out;
943 		}
944 	}
945 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
946 
947 	if (!vdev) {
948 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
949 			  "VDEV not found");
950 		goto free;
951 	}
952 
953 out:
954 	if (soc->cdp_soc.ol_ops->rx_invalid_peer)
955 		soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh);
956 free:
957 	/* reset the head and tail pointers */
958 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
959 	if (pdev) {
960 		pdev->invalid_peer_head_msdu = NULL;
961 		pdev->invalid_peer_tail_msdu = NULL;
962 	}
963 
964 	/* Drop and free packet */
965 	curr_nbuf = mpdu;
966 	while (curr_nbuf) {
967 		next_nbuf = qdf_nbuf_next(curr_nbuf);
968 		qdf_nbuf_free(curr_nbuf);
969 		curr_nbuf = next_nbuf;
970 	}
971 
972 	/* Reset the head and tail pointers */
973 	pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
974 	if (pdev) {
975 		pdev->invalid_peer_head_msdu = NULL;
976 		pdev->invalid_peer_tail_msdu = NULL;
977 	}
978 
979 	return 0;
980 }
981 
982 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
983 					qdf_nbuf_t mpdu, bool mpdu_done,
984 					uint8_t mac_id)
985 {
986 	/* Process the nbuf */
987 	dp_rx_process_invalid_peer(soc, mpdu, mac_id);
988 }
989 #endif
990 
991 #ifdef RECEIVE_OFFLOAD
992 /**
993  * dp_rx_print_offload_info() - Print offload info from RX TLV
994  * @soc: dp soc handle
995  * @rx_tlv: RX TLV for which offload information is to be printed
996  *
997  * Return: None
998  */
999 static void dp_rx_print_offload_info(struct dp_soc *soc, uint8_t *rx_tlv)
1000 {
1001 	dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------");
1002 	dp_verbose_debug("lro_eligible 0x%x", HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv));
1003 	dp_verbose_debug("pure_ack 0x%x", HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv));
1004 	dp_verbose_debug("chksum 0x%x", hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
1005 								  rx_tlv));
1006 	dp_verbose_debug("TCP seq num 0x%x", HAL_RX_TLV_GET_TCP_SEQ(rx_tlv));
1007 	dp_verbose_debug("TCP ack num 0x%x", HAL_RX_TLV_GET_TCP_ACK(rx_tlv));
1008 	dp_verbose_debug("TCP window 0x%x", HAL_RX_TLV_GET_TCP_WIN(rx_tlv));
1009 	dp_verbose_debug("TCP protocol 0x%x", HAL_RX_TLV_GET_TCP_PROTO(rx_tlv));
1010 	dp_verbose_debug("TCP offset 0x%x", HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv));
1011 	dp_verbose_debug("toeplitz 0x%x", HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv));
1012 	dp_verbose_debug("---------------------------------------------------------");
1013 }
1014 
1015 /**
1016  * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
1017  * @soc: DP SOC handle
1018  * @rx_tlv: RX TLV received for the msdu
1019  * @msdu: msdu for which GRO info needs to be filled
1020  * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
1021  *
1022  * Return: None
1023  */
1024 static
1025 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
1026 			 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
1027 {
1028 	if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
1029 		return;
1030 
1031 	/* Filling up RX offload info only for TCP packets */
1032 	if (!HAL_RX_TLV_GET_TCP_PROTO(rx_tlv))
1033 		return;
1034 
1035 	*rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1;
1036 
1037 	QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) =
1038 		 HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv);
1039 	QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) =
1040 			HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv);
1041 	QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
1042 			hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
1043 						  rx_tlv);
1044 	QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) =
1045 			 HAL_RX_TLV_GET_TCP_SEQ(rx_tlv);
1046 	QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) =
1047 			 HAL_RX_TLV_GET_TCP_ACK(rx_tlv);
1048 	QDF_NBUF_CB_RX_TCP_WIN(msdu) =
1049 			 HAL_RX_TLV_GET_TCP_WIN(rx_tlv);
1050 	QDF_NBUF_CB_RX_TCP_PROTO(msdu) =
1051 			 HAL_RX_TLV_GET_TCP_PROTO(rx_tlv);
1052 	QDF_NBUF_CB_RX_IPV6_PROTO(msdu) =
1053 			 HAL_RX_TLV_GET_IPV6(rx_tlv);
1054 	QDF_NBUF_CB_RX_TCP_OFFSET(msdu) =
1055 			 HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv);
1056 	QDF_NBUF_CB_RX_FLOW_ID(msdu) =
1057 			 HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv);
1058 
1059 	dp_rx_print_offload_info(soc, rx_tlv);
1060 }
1061 #else
1062 static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
1063 				qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
1064 {
1065 }
1066 #endif /* RECEIVE_OFFLOAD */
1067 
1068 /**
1069  * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
1070  *
1071  * @nbuf: pointer to msdu.
1072  * @mpdu_len: mpdu length
1073  *
1074  * Return: returns true if nbuf is last msdu of mpdu else retuns false.
1075  */
1076 static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len)
1077 {
1078 	bool last_nbuf;
1079 
1080 	if (*mpdu_len > (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN)) {
1081 		qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE);
1082 		last_nbuf = false;
1083 	} else {
1084 		qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN));
1085 		last_nbuf = true;
1086 	}
1087 
1088 	*mpdu_len -= (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN);
1089 
1090 	return last_nbuf;
1091 }
1092 
1093 /**
1094  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
1095  *		     multiple nbufs.
1096  * @nbuf: pointer to the first msdu of an amsdu.
1097  *
1098  * This function implements the creation of RX frag_list for cases
1099  * where an MSDU is spread across multiple nbufs.
1100  *
1101  * Return: returns the head nbuf which contains complete frag_list.
1102  */
1103 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf)
1104 {
1105 	qdf_nbuf_t parent, frag_list, next = NULL;
1106 	uint16_t frag_list_len = 0;
1107 	uint16_t mpdu_len;
1108 	bool last_nbuf;
1109 
1110 	/*
1111 	 * Use msdu len got from REO entry descriptor instead since
1112 	 * there is case the RX PKT TLV is corrupted while msdu_len
1113 	 * from REO descriptor is right for non-raw RX scatter msdu.
1114 	 */
1115 	mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1116 	/*
1117 	 * this is a case where the complete msdu fits in one single nbuf.
1118 	 * in this case HW sets both start and end bit and we only need to
1119 	 * reset these bits for RAW mode simulator to decap the pkt
1120 	 */
1121 	if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
1122 					qdf_nbuf_is_rx_chfrag_end(nbuf)) {
1123 		qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN);
1124 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1125 		return nbuf;
1126 	}
1127 
1128 	/*
1129 	 * This is a case where we have multiple msdus (A-MSDU) spread across
1130 	 * multiple nbufs. here we create a fraglist out of these nbufs.
1131 	 *
1132 	 * the moment we encounter a nbuf with continuation bit set we
1133 	 * know for sure we have an MSDU which is spread across multiple
1134 	 * nbufs. We loop through and reap nbufs till we reach last nbuf.
1135 	 */
1136 	parent = nbuf;
1137 	frag_list = nbuf->next;
1138 	nbuf = nbuf->next;
1139 
1140 	/*
1141 	 * set the start bit in the first nbuf we encounter with continuation
1142 	 * bit set. This has the proper mpdu length set as it is the first
1143 	 * msdu of the mpdu. this becomes the parent nbuf and the subsequent
1144 	 * nbufs will form the frag_list of the parent nbuf.
1145 	 */
1146 	qdf_nbuf_set_rx_chfrag_start(parent, 1);
1147 	last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len);
1148 
1149 	/*
1150 	 * this is where we set the length of the fragments which are
1151 	 * associated to the parent nbuf. We iterate through the frag_list
1152 	 * till we hit the last_nbuf of the list.
1153 	 */
1154 	do {
1155 		last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len);
1156 		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
1157 		frag_list_len += qdf_nbuf_len(nbuf);
1158 
1159 		if (last_nbuf) {
1160 			next = nbuf->next;
1161 			nbuf->next = NULL;
1162 			break;
1163 		}
1164 
1165 		nbuf = nbuf->next;
1166 	} while (!last_nbuf);
1167 
1168 	qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
1169 	qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
1170 	parent->next = next;
1171 
1172 	qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN);
1173 	return parent;
1174 }
1175 
1176 /**
1177  * dp_rx_compute_delay() - Compute and fill in all timestamps
1178  *				to pass in correct fields
1179  *
1180  * @vdev: pdev handle
1181  * @tx_desc: tx descriptor
1182  * @tid: tid value
1183  * Return: none
1184  */
1185 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1186 {
1187 	uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
1188 	int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get());
1189 	uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
1190 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
1191 	uint32_t interframe_delay =
1192 		(uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp);
1193 
1194 	dp_update_delay_stats(vdev->pdev, to_stack, tid,
1195 			      CDP_DELAY_STATS_REAP_STACK, ring_id);
1196 	/*
1197 	 * Update interframe delay stats calculated at deliver_data_ol point.
1198 	 * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so
1199 	 * interframe delay will not be calculate correctly for 1st frame.
1200 	 * On the other side, this will help in avoiding extra per packet check
1201 	 * of vdev->prev_rx_deliver_tstamp.
1202 	 */
1203 	dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
1204 			      CDP_DELAY_STATS_RX_INTERFRAME, ring_id);
1205 	vdev->prev_rx_deliver_tstamp = current_ts;
1206 }
1207 
1208 /**
1209  * dp_rx_drop_nbuf_list() - drop an nbuf list
1210  * @pdev: dp pdev reference
1211  * @buf_list: buffer list to be dropepd
1212  *
1213  * Return: int (number of bufs dropped)
1214  */
1215 static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
1216 				       qdf_nbuf_t buf_list)
1217 {
1218 	struct cdp_tid_rx_stats *stats = NULL;
1219 	uint8_t tid = 0, ring_id = 0;
1220 	int num_dropped = 0;
1221 	qdf_nbuf_t buf, next_buf;
1222 
1223 	buf = buf_list;
1224 	while (buf) {
1225 		ring_id = QDF_NBUF_CB_RX_CTX_ID(buf);
1226 		next_buf = qdf_nbuf_queue_next(buf);
1227 		tid = qdf_nbuf_get_tid_val(buf);
1228 		if (qdf_likely(pdev)) {
1229 			stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
1230 			stats->fail_cnt[INVALID_PEER_VDEV]++;
1231 			stats->delivered_to_stack--;
1232 		}
1233 		qdf_nbuf_free(buf);
1234 		buf = next_buf;
1235 		num_dropped++;
1236 	}
1237 
1238 	return num_dropped;
1239 }
1240 
1241 #ifdef PEER_CACHE_RX_PKTS
1242 /**
1243  * dp_rx_flush_rx_cached() - flush cached rx frames
1244  * @peer: peer
1245  * @drop: flag to drop frames or forward to net stack
1246  *
1247  * Return: None
1248  */
1249 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
1250 {
1251 	struct dp_peer_cached_bufq *bufqi;
1252 	struct dp_rx_cached_buf *cache_buf = NULL;
1253 	ol_txrx_rx_fp data_rx = NULL;
1254 	int num_buff_elem;
1255 	QDF_STATUS status;
1256 
1257 	if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
1258 		qdf_atomic_dec(&peer->flush_in_progress);
1259 		return;
1260 	}
1261 
1262 	qdf_spin_lock_bh(&peer->peer_info_lock);
1263 	if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx)
1264 		data_rx = peer->vdev->osif_rx;
1265 	else
1266 		drop = true;
1267 	qdf_spin_unlock_bh(&peer->peer_info_lock);
1268 
1269 	bufqi = &peer->bufq_info;
1270 
1271 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1272 	qdf_list_remove_front(&bufqi->cached_bufq,
1273 			      (qdf_list_node_t **)&cache_buf);
1274 	while (cache_buf) {
1275 		num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(
1276 								cache_buf->buf);
1277 		bufqi->entries -= num_buff_elem;
1278 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
1279 		if (drop) {
1280 			bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1281 							      cache_buf->buf);
1282 		} else {
1283 			/* Flush the cached frames to OSIF DEV */
1284 			status = data_rx(peer->vdev->osif_vdev, cache_buf->buf);
1285 			if (status != QDF_STATUS_SUCCESS)
1286 				bufqi->dropped = dp_rx_drop_nbuf_list(
1287 							peer->vdev->pdev,
1288 							cache_buf->buf);
1289 		}
1290 		qdf_mem_free(cache_buf);
1291 		cache_buf = NULL;
1292 		qdf_spin_lock_bh(&bufqi->bufq_lock);
1293 		qdf_list_remove_front(&bufqi->cached_bufq,
1294 				      (qdf_list_node_t **)&cache_buf);
1295 	}
1296 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1297 	qdf_atomic_dec(&peer->flush_in_progress);
1298 }
1299 
1300 /**
1301  * dp_rx_enqueue_rx() - cache rx frames
1302  * @peer: peer
1303  * @rx_buf_list: cache buffer list
1304  *
1305  * Return: None
1306  */
1307 static QDF_STATUS
1308 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1309 {
1310 	struct dp_rx_cached_buf *cache_buf;
1311 	struct dp_peer_cached_bufq *bufqi = &peer->bufq_info;
1312 	int num_buff_elem;
1313 
1314 	dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries,
1315 		    bufqi->dropped);
1316 	if (!peer->valid) {
1317 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1318 						      rx_buf_list);
1319 		return QDF_STATUS_E_INVAL;
1320 	}
1321 
1322 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1323 	if (bufqi->entries >= bufqi->thresh) {
1324 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1325 						      rx_buf_list);
1326 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
1327 		return QDF_STATUS_E_RESOURCES;
1328 	}
1329 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1330 
1331 	num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list);
1332 
1333 	cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf));
1334 	if (!cache_buf) {
1335 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1336 			  "Failed to allocate buf to cache rx frames");
1337 		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
1338 						      rx_buf_list);
1339 		return QDF_STATUS_E_NOMEM;
1340 	}
1341 
1342 	cache_buf->buf = rx_buf_list;
1343 
1344 	qdf_spin_lock_bh(&bufqi->bufq_lock);
1345 	qdf_list_insert_back(&bufqi->cached_bufq,
1346 			     &cache_buf->node);
1347 	bufqi->entries += num_buff_elem;
1348 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
1349 
1350 	return QDF_STATUS_SUCCESS;
1351 }
1352 
1353 static inline
1354 bool dp_rx_is_peer_cache_bufq_supported(void)
1355 {
1356 	return true;
1357 }
1358 #else
1359 static inline
1360 bool dp_rx_is_peer_cache_bufq_supported(void)
1361 {
1362 	return false;
1363 }
1364 
1365 static inline QDF_STATUS
1366 dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
1367 {
1368 	return QDF_STATUS_SUCCESS;
1369 }
1370 #endif
1371 
1372 void dp_rx_deliver_to_stack(struct dp_soc *soc,
1373 			    struct dp_vdev *vdev,
1374 			    struct dp_peer *peer,
1375 			    qdf_nbuf_t nbuf_head,
1376 			    qdf_nbuf_t nbuf_tail)
1377 {
1378 	int num_nbuf = 0;
1379 
1380 	if (qdf_unlikely(!vdev || vdev->delete.pending)) {
1381 		num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head);
1382 		/*
1383 		 * This is a special case where vdev is invalid,
1384 		 * so we cannot know the pdev to which this packet
1385 		 * belonged. Hence we update the soc rx error stats.
1386 		 */
1387 		DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf);
1388 		return;
1389 	}
1390 
1391 	/*
1392 	 * highly unlikely to have a vdev without a registered rx
1393 	 * callback function. if so let us free the nbuf_list.
1394 	 */
1395 	if (qdf_unlikely(!vdev->osif_rx)) {
1396 		if (peer && dp_rx_is_peer_cache_bufq_supported()) {
1397 			dp_rx_enqueue_rx(peer, nbuf_head);
1398 		} else {
1399 			num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev,
1400 							nbuf_head);
1401 			DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf);
1402 		}
1403 		return;
1404 	}
1405 
1406 	if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
1407 			(vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
1408 		vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
1409 				&nbuf_tail, peer->mac_addr.raw);
1410 	}
1411 
1412 	/* Function pointer initialized only when FISA is enabled */
1413 	if (vdev->osif_fisa_rx)
1414 		/* on failure send it via regular path */
1415 		vdev->osif_fisa_rx(soc, vdev, nbuf_head);
1416 	else
1417 		vdev->osif_rx(vdev->osif_vdev, nbuf_head);
1418 }
1419 
1420 /**
1421  * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
1422  * @nbuf: pointer to the first msdu of an amsdu.
1423  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1424  *
1425  * The ipsumed field of the skb is set based on whether HW validated the
1426  * IP/TCP/UDP checksum.
1427  *
1428  * Return: void
1429  */
1430 static inline void dp_rx_cksum_offload(struct dp_pdev *pdev,
1431 				       qdf_nbuf_t nbuf,
1432 				       uint8_t *rx_tlv_hdr)
1433 {
1434 	qdf_nbuf_rx_cksum_t cksum = {0};
1435 	bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr);
1436 	bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr);
1437 
1438 	if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) {
1439 		cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
1440 		qdf_nbuf_set_rx_cksum(nbuf, &cksum);
1441 	} else {
1442 		DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err);
1443 		DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er);
1444 	}
1445 }
1446 
1447 #ifdef VDEV_PEER_PROTOCOL_COUNT
1448 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer) \
1449 { \
1450 	qdf_nbuf_t nbuf_local; \
1451 	struct dp_peer *peer_local; \
1452 	struct dp_vdev *vdev_local = vdev_hdl; \
1453 	do { \
1454 		if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
1455 			break; \
1456 		nbuf_local = nbuf; \
1457 		peer_local = peer; \
1458 		if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \
1459 			break; \
1460 		else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \
1461 			break; \
1462 		dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
1463 						       (nbuf_local), \
1464 						       (peer_local), 0, 1); \
1465 	} while (0); \
1466 }
1467 #else
1468 #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer)
1469 #endif
1470 
1471 /**
1472  * dp_rx_msdu_stats_update() - update per msdu stats.
1473  * @soc: core txrx main context
1474  * @nbuf: pointer to the first msdu of an amsdu.
1475  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1476  * @peer: pointer to the peer object.
1477  * @ring_id: reo dest ring number on which pkt is reaped.
1478  * @tid_stats: per tid rx stats.
1479  *
1480  * update all the per msdu stats for that nbuf.
1481  * Return: void
1482  */
1483 static void dp_rx_msdu_stats_update(struct dp_soc *soc,
1484 				    qdf_nbuf_t nbuf,
1485 				    uint8_t *rx_tlv_hdr,
1486 				    struct dp_peer *peer,
1487 				    uint8_t ring_id,
1488 				    struct cdp_tid_rx_stats *tid_stats)
1489 {
1490 	bool is_ampdu, is_not_amsdu;
1491 	uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
1492 	struct dp_vdev *vdev = peer->vdev;
1493 	qdf_ether_header_t *eh;
1494 	uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1495 
1496 	dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, peer);
1497 	is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
1498 			qdf_nbuf_is_rx_chfrag_end(nbuf);
1499 
1500 	DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len);
1501 	DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu);
1502 	DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu);
1503 	DP_STATS_INCC(peer, rx.rx_retries, 1, qdf_nbuf_is_rx_retry_flag(nbuf));
1504 
1505 	tid_stats->msdu_cnt++;
1506 	if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
1507 			 (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
1508 		eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
1509 		DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len);
1510 		tid_stats->mcast_msdu_cnt++;
1511 		if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
1512 			DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len);
1513 			tid_stats->bcast_msdu_cnt++;
1514 		}
1515 	}
1516 
1517 	/*
1518 	 * currently we can return from here as we have similar stats
1519 	 * updated at per ppdu level instead of msdu level
1520 	 */
1521 	if (!soc->process_rx_status)
1522 		return;
1523 
1524 	is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr);
1525 	DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu);
1526 	DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
1527 
1528 	sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr);
1529 	mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
1530 	tid = qdf_nbuf_get_tid_val(nbuf);
1531 	bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
1532 	reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
1533 							      rx_tlv_hdr);
1534 	nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
1535 	pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
1536 
1537 	DP_STATS_INC(peer, rx.bw[bw], 1);
1538 	/*
1539 	 * only if nss > 0 and pkt_type is 11N/AC/AX,
1540 	 * then increase index [nss - 1] in array counter.
1541 	 */
1542 	if (nss > 0 && (pkt_type == DOT11_N ||
1543 			pkt_type == DOT11_AC ||
1544 			pkt_type == DOT11_AX))
1545 		DP_STATS_INC(peer, rx.nss[nss - 1], 1);
1546 
1547 	DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
1548 	DP_STATS_INCC(peer, rx.err.mic_err, 1,
1549 		      hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr));
1550 	DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
1551 		      hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr));
1552 
1553 	DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
1554 	DP_STATS_INC(peer, rx.reception_type[reception_type], 1);
1555 
1556 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1557 		      ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1558 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1559 		      ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A)));
1560 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1561 		      ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1562 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1563 		      ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B)));
1564 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1565 		      ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1566 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1567 		      ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N)));
1568 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1569 		      ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1570 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1571 		      ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
1572 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
1573 		      ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX)));
1574 	DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
1575 		      ((mcs < MAX_MCS) && (pkt_type == DOT11_AX)));
1576 
1577 	if ((soc->process_rx_status) &&
1578 	    hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) {
1579 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
1580 		if (!vdev->pdev)
1581 			return;
1582 
1583 		dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
1584 				     &peer->stats, peer->peer_ids[0],
1585 				     UPDATE_PEER_STATS,
1586 				     vdev->pdev->pdev_id);
1587 #endif
1588 
1589 	}
1590 }
1591 
1592 static inline bool is_sa_da_idx_valid(struct dp_soc *soc,
1593 				      uint8_t *rx_tlv_hdr,
1594 				      qdf_nbuf_t nbuf,
1595 				      struct hal_rx_msdu_metadata msdu_info)
1596 {
1597 	if ((qdf_nbuf_is_sa_valid(nbuf) &&
1598 	    (msdu_info.sa_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) ||
1599 	    (!qdf_nbuf_is_da_mcbc(nbuf) &&
1600 	     qdf_nbuf_is_da_valid(nbuf) &&
1601 	     (msdu_info.da_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))))
1602 		return false;
1603 
1604 	return true;
1605 }
1606 
1607 #ifndef WDS_VENDOR_EXTENSION
1608 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
1609 			   struct dp_vdev *vdev,
1610 			   struct dp_peer *peer)
1611 {
1612 	return 1;
1613 }
1614 #endif
1615 
1616 #ifdef RX_DESC_DEBUG_CHECK
1617 /**
1618  * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
1619  *				  corruption
1620  *
1621  * @ring_desc: REO ring descriptor
1622  * @rx_desc: Rx descriptor
1623  *
1624  * Return: NONE
1625  */
1626 static inline
1627 void dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc,
1628 				  struct dp_rx_desc *rx_desc)
1629 {
1630 	struct hal_buf_info hbi;
1631 
1632 	hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
1633 	/* Sanity check for possible buffer paddr corruption */
1634 	qdf_assert_always((&hbi)->paddr ==
1635 			  qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0));
1636 }
1637 #else
1638 static inline
1639 void dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc,
1640 				  struct dp_rx_desc *rx_desc)
1641 {
1642 }
1643 #endif
1644 
1645 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1646 static inline
1647 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
1648 {
1649 	bool limit_hit = false;
1650 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
1651 
1652 	limit_hit =
1653 		(num_reaped >= cfg->rx_reap_loop_pkt_limit) ? true : false;
1654 
1655 	if (limit_hit)
1656 		DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1)
1657 
1658 	return limit_hit;
1659 }
1660 
1661 static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1662 {
1663 	return soc->wlan_cfg_ctx->rx_enable_eol_data_check;
1664 }
1665 
1666 #else
1667 static inline
1668 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
1669 {
1670 	return false;
1671 }
1672 
1673 static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1674 {
1675 	return false;
1676 }
1677 
1678 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
1679 
1680 #ifdef DP_RX_PKT_NO_PEER_DELIVER
1681 /**
1682  * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
1683  *				      no corresbonding peer found
1684  * @soc: core txrx main context
1685  * @nbuf: pkt skb pointer
1686  *
1687  * This function will try to deliver some RX special frames to stack
1688  * even there is no peer matched found. for instance, LFR case, some
1689  * eapol data will be sent to host before peer_map done.
1690  *
1691  * Return: None
1692  */
1693 static
1694 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
1695 {
1696 	uint16_t peer_id;
1697 	uint8_t vdev_id;
1698 	struct dp_vdev *vdev;
1699 	uint32_t l2_hdr_offset = 0;
1700 	uint16_t msdu_len = 0;
1701 	uint32_t pkt_len = 0;
1702 	uint8_t *rx_tlv_hdr;
1703 	uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
1704 				FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
1705 
1706 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
1707 	if (peer_id > soc->max_peers)
1708 		goto deliver_fail;
1709 
1710 	vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
1711 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
1712 	if (!vdev || vdev->delete.pending || !vdev->osif_rx)
1713 		goto deliver_fail;
1714 
1715 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf)))
1716 		goto deliver_fail;
1717 
1718 	rx_tlv_hdr = qdf_nbuf_data(nbuf);
1719 	l2_hdr_offset =
1720 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
1721 
1722 	msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
1723 	pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
1724 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
1725 
1726 	qdf_nbuf_set_pktlen(nbuf, pkt_len);
1727 	qdf_nbuf_pull_head(nbuf,
1728 			   RX_PKT_TLVS_LEN +
1729 			   l2_hdr_offset);
1730 
1731 	if (dp_rx_is_special_frame(nbuf, frame_mask)) {
1732 		vdev->osif_rx(vdev->osif_vdev, nbuf);
1733 		DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1);
1734 		return;
1735 	}
1736 
1737 deliver_fail:
1738 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1739 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
1740 	qdf_nbuf_free(nbuf);
1741 }
1742 #else
1743 static inline
1744 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
1745 {
1746 	DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
1747 			 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
1748 	qdf_nbuf_free(nbuf);
1749 }
1750 #endif
1751 
1752 /**
1753  * dp_rx_srng_get_num_pending() - get number of pending entries
1754  * @hal_soc: hal soc opaque pointer
1755  * @hal_ring: opaque pointer to the HAL Rx Ring
1756  * @num_entries: number of entries in the hal_ring.
1757  * @near_full: pointer to a boolean. This is set if ring is near full.
1758  *
1759  * The function returns the number of entries in a destination ring which are
1760  * yet to be reaped. The function also checks if the ring is near full.
1761  * If more than half of the ring needs to be reaped, the ring is considered
1762  * approaching full.
1763  * The function useses hal_srng_dst_num_valid_locked to get the number of valid
1764  * entries. It should not be called within a SRNG lock. HW pointer value is
1765  * synced into cached_hp.
1766  *
1767  * Return: Number of pending entries if any
1768  */
1769 static
1770 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
1771 				    hal_ring_handle_t hal_ring_hdl,
1772 				    uint32_t num_entries,
1773 				    bool *near_full)
1774 {
1775 	uint32_t num_pending = 0;
1776 
1777 	num_pending = hal_srng_dst_num_valid_locked(hal_soc,
1778 						    hal_ring_hdl,
1779 						    true);
1780 
1781 	if (num_entries && (num_pending >= num_entries >> 1))
1782 		*near_full = true;
1783 	else
1784 		*near_full = false;
1785 
1786 	return num_pending;
1787 }
1788 
1789 #ifdef WLAN_SUPPORT_RX_FISA
1790 void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding)
1791 {
1792 	QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
1793 	qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN);
1794 }
1795 
1796 /**
1797  * dp_rx_set_hdr_pad() - set l3 padding in nbuf cb
1798  * @nbuf: pkt skb pointer
1799  * @l3_padding: l3 padding
1800  *
1801  * Return: None
1802  */
1803 static inline
1804 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
1805 {
1806 	QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
1807 }
1808 #else
1809 void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding)
1810 {
1811 	qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN);
1812 }
1813 
1814 static inline
1815 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
1816 {
1817 }
1818 #endif
1819 
1820 
1821 /**
1822  * dp_rx_process() - Brain of the Rx processing functionality
1823  *		     Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
1824  * @int_ctx: per interrupt context
1825  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1826  * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
1827  * @quota: No. of units (packets) that can be serviced in one shot.
1828  *
1829  * This function implements the core of Rx functionality. This is
1830  * expected to handle only non-error frames.
1831  *
1832  * Return: uint32_t: No. of elements processed
1833  */
1834 uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
1835 			    uint8_t reo_ring_num, uint32_t quota)
1836 {
1837 	hal_ring_desc_t ring_desc;
1838 	hal_soc_handle_t hal_soc;
1839 	struct dp_rx_desc *rx_desc = NULL;
1840 	qdf_nbuf_t nbuf, next;
1841 	bool near_full;
1842 	union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT];
1843 	union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT];
1844 	uint32_t num_pending;
1845 	uint32_t rx_bufs_used = 0, rx_buf_cookie;
1846 	uint16_t msdu_len = 0;
1847 	uint16_t peer_id;
1848 	uint8_t vdev_id;
1849 	struct dp_peer *peer;
1850 	struct dp_vdev *vdev;
1851 	uint32_t pkt_len = 0;
1852 	struct hal_rx_mpdu_desc_info mpdu_desc_info;
1853 	struct hal_rx_msdu_desc_info msdu_desc_info;
1854 	enum hal_reo_error_status error;
1855 	uint32_t peer_mdata;
1856 	uint8_t *rx_tlv_hdr;
1857 	uint32_t rx_bufs_reaped[MAX_PDEV_CNT];
1858 	uint8_t mac_id = 0;
1859 	struct dp_pdev *rx_pdev;
1860 	struct dp_srng *dp_rxdma_srng;
1861 	struct rx_desc_pool *rx_desc_pool;
1862 	struct dp_soc *soc = int_ctx->soc;
1863 	uint8_t ring_id = 0;
1864 	uint8_t core_id = 0;
1865 	struct cdp_tid_rx_stats *tid_stats;
1866 	qdf_nbuf_t nbuf_head;
1867 	qdf_nbuf_t nbuf_tail;
1868 	qdf_nbuf_t deliver_list_head;
1869 	qdf_nbuf_t deliver_list_tail;
1870 	uint32_t num_rx_bufs_reaped = 0;
1871 	uint32_t intr_id;
1872 	struct hif_opaque_softc *scn;
1873 	int32_t tid = 0;
1874 	bool is_prev_msdu_last = true;
1875 	uint32_t num_entries_avail = 0;
1876 	uint32_t rx_ol_pkt_cnt = 0;
1877 	uint32_t num_entries = 0;
1878 	struct hal_rx_msdu_metadata msdu_metadata;
1879 	QDF_STATUS status;
1880 
1881 	DP_HIST_INIT();
1882 
1883 	qdf_assert_always(soc && hal_ring_hdl);
1884 	hal_soc = soc->hal_soc;
1885 	qdf_assert_always(hal_soc);
1886 
1887 	scn = soc->hif_handle;
1888 	hif_pm_runtime_mark_dp_rx_busy(scn);
1889 	intr_id = int_ctx->dp_intr_id;
1890 	num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl);
1891 
1892 more_data:
1893 	/* reset local variables here to be re-used in the function */
1894 	nbuf_head = NULL;
1895 	nbuf_tail = NULL;
1896 	deliver_list_head = NULL;
1897 	deliver_list_tail = NULL;
1898 	peer = NULL;
1899 	vdev = NULL;
1900 	num_rx_bufs_reaped = 0;
1901 
1902 	qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped));
1903 	qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
1904 	qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
1905 	qdf_mem_zero(head, sizeof(head));
1906 	qdf_mem_zero(tail, sizeof(tail));
1907 
1908 	if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
1909 
1910 		/*
1911 		 * Need API to convert from hal_ring pointer to
1912 		 * Ring Type / Ring Id combo
1913 		 */
1914 		DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
1915 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1916 			FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
1917 		goto done;
1918 	}
1919 
1920 	/*
1921 	 * start reaping the buffers from reo ring and queue
1922 	 * them in per vdev queue.
1923 	 * Process the received pkts in a different per vdev loop.
1924 	 */
1925 	while (qdf_likely(quota &&
1926 			  (ring_desc = hal_srng_dst_peek(hal_soc,
1927 							 hal_ring_hdl)))) {
1928 
1929 		error = HAL_RX_ERROR_STATUS_GET(ring_desc);
1930 		ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1931 
1932 		if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
1933 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1934 			FL("HAL RING 0x%pK:error %d"), hal_ring_hdl, error);
1935 			DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1);
1936 			/* Don't know how to deal with this -- assert */
1937 			qdf_assert(0);
1938 		}
1939 
1940 		rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
1941 
1942 		rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
1943 		status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl,
1944 					   ring_desc, rx_desc);
1945 		if (QDF_IS_STATUS_ERROR(status)) {
1946 			if (qdf_unlikely(rx_desc && rx_desc->nbuf)) {
1947 				qdf_assert_always(rx_desc->unmapped);
1948 				qdf_nbuf_unmap_nbytes_single(
1949 							soc->osdev,
1950 							rx_desc->nbuf,
1951 							QDF_DMA_FROM_DEVICE,
1952 							RX_DATA_BUFFER_SIZE);
1953 				rx_desc->unmapped = 1;
1954 				qdf_nbuf_free(rx_desc->nbuf);
1955 				dp_rx_add_to_free_desc_list(
1956 							&head[rx_desc->pool_id],
1957 							&tail[rx_desc->pool_id],
1958 							rx_desc);
1959 			}
1960 			hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
1961 			continue;
1962 		}
1963 
1964 		dp_rx_desc_nbuf_sanity_check(ring_desc, rx_desc);
1965 
1966 		/*
1967 		 * this is a unlikely scenario where the host is reaping
1968 		 * a descriptor which it already reaped just a while ago
1969 		 * but is yet to replenish it back to HW.
1970 		 * In this case host will dump the last 128 descriptors
1971 		 * including the software descriptor rx_desc and assert.
1972 		 */
1973 
1974 		if (qdf_unlikely(!rx_desc->in_use)) {
1975 			DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
1976 			dp_info_rl("Reaping rx_desc not in use!");
1977 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1978 						   ring_desc, rx_desc);
1979 			/* ignore duplicate RX desc and continue to process */
1980 			/* Pop out the descriptor */
1981 			hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
1982 			continue;
1983 		}
1984 
1985 		if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) {
1986 			dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie);
1987 			DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1);
1988 			dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
1989 						   ring_desc, rx_desc);
1990 		}
1991 
1992 		/* Get MPDU DESC info */
1993 		hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
1994 
1995 		/* Get MSDU DESC info */
1996 		hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info);
1997 
1998 		if (qdf_unlikely(msdu_desc_info.msdu_flags &
1999 				 HAL_MSDU_F_MSDU_CONTINUATION)) {
2000 			/* previous msdu has end bit set, so current one is
2001 			 * the new MPDU
2002 			 */
2003 			if (is_prev_msdu_last) {
2004 				/* Get number of entries available in HW ring */
2005 				num_entries_avail =
2006 				hal_srng_dst_num_valid(hal_soc,
2007 						       hal_ring_hdl, 1);
2008 
2009 				/* For new MPDU check if we can read complete
2010 				 * MPDU by comparing the number of buffers
2011 				 * available and number of buffers needed to
2012 				 * reap this MPDU
2013 				 */
2014 				if (((msdu_desc_info.msdu_len /
2015 				     (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN) +
2016 				     1)) > num_entries_avail) {
2017 					DP_STATS_INC(
2018 						soc,
2019 						rx.msdu_scatter_wait_break,
2020 						1);
2021 					break;
2022 				}
2023 				is_prev_msdu_last = false;
2024 			}
2025 
2026 		}
2027 
2028 		core_id = smp_processor_id();
2029 		DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1);
2030 
2031 		if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
2032 			qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1);
2033 
2034 		if (qdf_unlikely(mpdu_desc_info.mpdu_flags &
2035 				 HAL_MPDU_F_RAW_AMPDU))
2036 			qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1);
2037 
2038 		if (!is_prev_msdu_last &&
2039 		    msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
2040 			is_prev_msdu_last = true;
2041 
2042 		/* Pop out the descriptor*/
2043 		hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
2044 
2045 		rx_bufs_reaped[rx_desc->pool_id]++;
2046 		peer_mdata = mpdu_desc_info.peer_meta_data;
2047 		QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) =
2048 			DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
2049 		QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) =
2050 			DP_PEER_METADATA_VDEV_ID_GET(peer_mdata);
2051 
2052 		/*
2053 		 * save msdu flags first, last and continuation msdu in
2054 		 * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
2055 		 * length to nbuf->cb. This ensures the info required for
2056 		 * per pkt processing is always in the same cache line.
2057 		 * This helps in improving throughput for smaller pkt
2058 		 * sizes.
2059 		 */
2060 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
2061 			qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
2062 
2063 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
2064 			qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
2065 
2066 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
2067 			qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
2068 
2069 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
2070 			qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1);
2071 
2072 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
2073 			qdf_nbuf_set_da_valid(rx_desc->nbuf, 1);
2074 
2075 		if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
2076 			qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1);
2077 
2078 		qdf_nbuf_set_tid_val(rx_desc->nbuf,
2079 				     HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc));
2080 
2081 		QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len;
2082 
2083 		QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num;
2084 
2085 		/*
2086 		 * move unmap after scattered msdu waiting break logic
2087 		 * in case double skb unmap happened.
2088 		 */
2089 		rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2090 		qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
2091 					     QDF_DMA_FROM_DEVICE,
2092 					     rx_desc_pool->buf_size);
2093 		rx_desc->unmapped = 1;
2094 		DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
2095 
2096 		/*
2097 		 * if continuation bit is set then we have MSDU spread
2098 		 * across multiple buffers, let us not decrement quota
2099 		 * till we reap all buffers of that MSDU.
2100 		 */
2101 		if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)))
2102 			quota -= 1;
2103 
2104 		dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
2105 						&tail[rx_desc->pool_id],
2106 						rx_desc);
2107 
2108 		num_rx_bufs_reaped++;
2109 		/*
2110 		 * only if complete msdu is received for scatter case,
2111 		 * then allow break.
2112 		 */
2113 		if (is_prev_msdu_last &&
2114 		    dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped))
2115 			break;
2116 	}
2117 done:
2118 	dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl);
2119 
2120 	for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
2121 		/*
2122 		 * continue with next mac_id if no pkts were reaped
2123 		 * from that pool
2124 		 */
2125 		if (!rx_bufs_reaped[mac_id])
2126 			continue;
2127 
2128 		dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
2129 
2130 		rx_desc_pool = &soc->rx_desc_buf[mac_id];
2131 
2132 		dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
2133 					rx_desc_pool, rx_bufs_reaped[mac_id],
2134 					&head[mac_id], &tail[mac_id]);
2135 	}
2136 
2137 	dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]);
2138 	/* Peer can be NULL is case of LFR */
2139 	if (qdf_likely(peer))
2140 		vdev = NULL;
2141 
2142 	/*
2143 	 * BIG loop where each nbuf is dequeued from global queue,
2144 	 * processed and queued back on a per vdev basis. These nbufs
2145 	 * are sent to stack as and when we run out of nbufs
2146 	 * or a new nbuf dequeued from global queue has a different
2147 	 * vdev when compared to previous nbuf.
2148 	 */
2149 	nbuf = nbuf_head;
2150 	while (nbuf) {
2151 		next = nbuf->next;
2152 		rx_tlv_hdr = qdf_nbuf_data(nbuf);
2153 		vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
2154 
2155 		if (deliver_list_head && vdev && (vdev->vdev_id != vdev_id)) {
2156 			dp_rx_deliver_to_stack(soc, vdev, peer,
2157 					       deliver_list_head,
2158 					       deliver_list_tail);
2159 			deliver_list_head = NULL;
2160 			deliver_list_tail = NULL;
2161 		}
2162 
2163 		/* Get TID from struct cb->tid_val, save to tid */
2164 		if (qdf_nbuf_is_rx_chfrag_start(nbuf))
2165 			tid = qdf_nbuf_get_tid_val(nbuf);
2166 
2167 		peer_id =  QDF_NBUF_CB_RX_PEER_ID(nbuf);
2168 		peer = dp_peer_find_by_id(soc, peer_id);
2169 
2170 		if (peer) {
2171 			QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
2172 			qdf_dp_trace_set_track(nbuf, QDF_RX);
2173 			QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
2174 			QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
2175 				QDF_NBUF_RX_PKT_DATA_TRACK;
2176 		}
2177 
2178 		rx_bufs_used++;
2179 
2180 		if (qdf_likely(peer)) {
2181 			vdev = peer->vdev;
2182 		} else {
2183 			nbuf->next = NULL;
2184 			dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2185 			nbuf = next;
2186 			continue;
2187 		}
2188 
2189 		if (qdf_unlikely(!vdev)) {
2190 			qdf_nbuf_free(nbuf);
2191 			nbuf = next;
2192 			DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
2193 			dp_peer_unref_del_find_by_id(peer);
2194 			continue;
2195 		}
2196 
2197 		rx_pdev = vdev->pdev;
2198 		DP_RX_TID_SAVE(nbuf, tid);
2199 		if (qdf_unlikely(rx_pdev->delay_stats_flag))
2200 			qdf_nbuf_set_timestamp(nbuf);
2201 
2202 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
2203 		tid_stats =
2204 			&rx_pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
2205 
2206 		/*
2207 		 * Check if DMA completed -- msdu_done is the last bit
2208 		 * to be written
2209 		 */
2210 		if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) &&
2211 				 !hal_rx_attn_msdu_done_get(rx_tlv_hdr))) {
2212 			dp_err("MSDU DONE failure");
2213 			DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
2214 			hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
2215 					     QDF_TRACE_LEVEL_INFO);
2216 			tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
2217 			qdf_nbuf_free(nbuf);
2218 			qdf_assert(0);
2219 			nbuf = next;
2220 			continue;
2221 		}
2222 
2223 		DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
2224 		/*
2225 		 * First IF condition:
2226 		 * 802.11 Fragmented pkts are reinjected to REO
2227 		 * HW block as SG pkts and for these pkts we only
2228 		 * need to pull the RX TLVS header length.
2229 		 * Second IF condition:
2230 		 * The below condition happens when an MSDU is spread
2231 		 * across multiple buffers. This can happen in two cases
2232 		 * 1. The nbuf size is smaller then the received msdu.
2233 		 *    ex: we have set the nbuf size to 2048 during
2234 		 *        nbuf_alloc. but we received an msdu which is
2235 		 *        2304 bytes in size then this msdu is spread
2236 		 *        across 2 nbufs.
2237 		 *
2238 		 * 2. AMSDUs when RAW mode is enabled.
2239 		 *    ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
2240 		 *        across 1st nbuf and 2nd nbuf and last MSDU is
2241 		 *        spread across 2nd nbuf and 3rd nbuf.
2242 		 *
2243 		 * for these scenarios let us create a skb frag_list and
2244 		 * append these buffers till the last MSDU of the AMSDU
2245 		 * Third condition:
2246 		 * This is the most likely case, we receive 802.3 pkts
2247 		 * decapsulated by HW, here we need to set the pkt length.
2248 		 */
2249 		hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata);
2250 		if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
2251 			bool is_mcbc, is_sa_vld, is_da_vld;
2252 
2253 			is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
2254 								 rx_tlv_hdr);
2255 			is_sa_vld =
2256 				hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
2257 								rx_tlv_hdr);
2258 			is_da_vld =
2259 				hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
2260 								rx_tlv_hdr);
2261 
2262 			qdf_nbuf_set_da_mcbc(nbuf, is_mcbc);
2263 			qdf_nbuf_set_da_valid(nbuf, is_da_vld);
2264 			qdf_nbuf_set_sa_valid(nbuf, is_sa_vld);
2265 
2266 			qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
2267 		} else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
2268 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
2269 			nbuf = dp_rx_sg_create(nbuf);
2270 			next = nbuf->next;
2271 
2272 			if (qdf_nbuf_is_raw_frame(nbuf)) {
2273 				DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
2274 				DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len);
2275 			} else {
2276 				qdf_nbuf_free(nbuf);
2277 				DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
2278 				dp_info_rl("scatter msdu len %d, dropped",
2279 					   msdu_len);
2280 				nbuf = next;
2281 				dp_peer_unref_del_find_by_id(peer);
2282 				continue;
2283 			}
2284 		} else {
2285 
2286 			msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
2287 			pkt_len = msdu_len +
2288 				  msdu_metadata.l3_hdr_pad +
2289 				  RX_PKT_TLVS_LEN;
2290 
2291 			qdf_nbuf_set_pktlen(nbuf, pkt_len);
2292 			dp_rx_skip_tlvs(nbuf, msdu_metadata.l3_hdr_pad);
2293 		}
2294 
2295 		/*
2296 		 * process frame for mulitpass phrase processing
2297 		 */
2298 		if (qdf_unlikely(vdev->multipass_en)) {
2299 			if (dp_rx_multipass_process(peer, nbuf, tid) == false) {
2300 				DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1);
2301 				qdf_nbuf_free(nbuf);
2302 				nbuf = next;
2303 				dp_peer_unref_del_find_by_id(peer);
2304 				continue;
2305 			}
2306 		}
2307 
2308 		if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
2309 			QDF_TRACE(QDF_MODULE_ID_DP,
2310 					QDF_TRACE_LEVEL_ERROR,
2311 					FL("Policy Check Drop pkt"));
2312 			tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
2313 			/* Drop & free packet */
2314 			qdf_nbuf_free(nbuf);
2315 			/* Statistics */
2316 			nbuf = next;
2317 			dp_peer_unref_del_find_by_id(peer);
2318 			continue;
2319 		}
2320 
2321 		if (qdf_unlikely(peer && (peer->nawds_enabled) &&
2322 				 (qdf_nbuf_is_da_mcbc(nbuf)) &&
2323 				 (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc,
2324 								rx_tlv_hdr) ==
2325 				  false))) {
2326 			tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
2327 			DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
2328 			qdf_nbuf_free(nbuf);
2329 			nbuf = next;
2330 			dp_peer_unref_del_find_by_id(peer);
2331 			continue;
2332 		}
2333 
2334 		if (soc->process_rx_status)
2335 			dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
2336 
2337 		/* Update the protocol tag in SKB based on CCE metadata */
2338 		dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
2339 					  reo_ring_num, false, true);
2340 
2341 		/* Update the flow tag in SKB based on FSE metadata */
2342 		dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
2343 
2344 		dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer,
2345 					ring_id, tid_stats);
2346 
2347 		if (qdf_unlikely(vdev->mesh_vdev)) {
2348 			if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
2349 					== QDF_STATUS_SUCCESS) {
2350 				QDF_TRACE(QDF_MODULE_ID_DP,
2351 						QDF_TRACE_LEVEL_INFO_MED,
2352 						FL("mesh pkt filtered"));
2353 				tid_stats->fail_cnt[MESH_FILTER_DROP]++;
2354 				DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
2355 					     1);
2356 
2357 				qdf_nbuf_free(nbuf);
2358 				nbuf = next;
2359 				dp_peer_unref_del_find_by_id(peer);
2360 				continue;
2361 			}
2362 			dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
2363 		}
2364 
2365 		if (qdf_likely(vdev->rx_decap_type ==
2366 			       htt_cmn_pkt_type_ethernet) &&
2367 		    qdf_likely(!vdev->mesh_vdev)) {
2368 			/* WDS Destination Address Learning */
2369 			dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf);
2370 
2371 			/* Due to HW issue, sometimes we see that the sa_idx
2372 			 * and da_idx are invalid with sa_valid and da_valid
2373 			 * bits set
2374 			 *
2375 			 * in this case we also see that value of
2376 			 * sa_sw_peer_id is set as 0
2377 			 *
2378 			 * Drop the packet if sa_idx and da_idx OOB or
2379 			 * sa_sw_peerid is 0
2380 			 */
2381 			if (!is_sa_da_idx_valid(soc, rx_tlv_hdr, nbuf,
2382 						msdu_metadata)) {
2383 				qdf_nbuf_free(nbuf);
2384 				nbuf = next;
2385 				DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
2386 				dp_peer_unref_del_find_by_id(peer);
2387 				continue;
2388 			}
2389 			/* WDS Source Port Learning */
2390 			if (qdf_likely(vdev->wds_enabled))
2391 				dp_rx_wds_srcport_learn(soc,
2392 							rx_tlv_hdr,
2393 							peer,
2394 							nbuf,
2395 							msdu_metadata);
2396 
2397 			/* Intrabss-fwd */
2398 			if (dp_rx_check_ap_bridge(vdev))
2399 				if (dp_rx_intrabss_fwd(soc,
2400 							peer,
2401 							rx_tlv_hdr,
2402 							nbuf,
2403 							msdu_metadata)) {
2404 					nbuf = next;
2405 					dp_peer_unref_del_find_by_id(peer);
2406 					tid_stats->intrabss_cnt++;
2407 					continue; /* Get next desc */
2408 				}
2409 		}
2410 
2411 		dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt);
2412 
2413 		DP_RX_LIST_APPEND(deliver_list_head,
2414 				  deliver_list_tail,
2415 				  nbuf);
2416 		DP_STATS_INC_PKT(peer, rx.to_stack, 1,
2417 				 QDF_NBUF_CB_RX_PKT_LEN(nbuf));
2418 
2419 		tid_stats->delivered_to_stack++;
2420 		nbuf = next;
2421 		dp_peer_unref_del_find_by_id(peer);
2422 	}
2423 
2424 	if (qdf_likely(deliver_list_head)) {
2425 		if (qdf_likely(peer))
2426 			dp_rx_deliver_to_stack(soc, vdev, peer,
2427 					       deliver_list_head,
2428 					       deliver_list_tail);
2429 		else {
2430 			nbuf = deliver_list_head;
2431 			while (nbuf) {
2432 				next = nbuf->next;
2433 				nbuf->next = NULL;
2434 				dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2435 				nbuf = next;
2436 			}
2437 		}
2438 	}
2439 
2440 	if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
2441 		if (quota) {
2442 			num_pending =
2443 				dp_rx_srng_get_num_pending(hal_soc,
2444 							   hal_ring_hdl,
2445 							   num_entries,
2446 							   &near_full);
2447 			if (num_pending) {
2448 				DP_STATS_INC(soc, rx.hp_oos2, 1);
2449 
2450 				if (!hif_exec_should_yield(scn, intr_id))
2451 					goto more_data;
2452 
2453 				if (qdf_unlikely(near_full)) {
2454 					DP_STATS_INC(soc, rx.near_full, 1);
2455 					goto more_data;
2456 				}
2457 			}
2458 		}
2459 
2460 		if (vdev && vdev->osif_fisa_flush)
2461 			vdev->osif_fisa_flush(soc, reo_ring_num);
2462 
2463 		if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) {
2464 			vdev->osif_gro_flush(vdev->osif_vdev,
2465 					     reo_ring_num);
2466 		}
2467 	}
2468 
2469 	/* Update histogram statistics by looping through pdev's */
2470 	DP_RX_HIST_STATS_PER_PDEV();
2471 
2472 	return rx_bufs_used; /* Assume no scale factor for now */
2473 }
2474 
2475 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
2476 {
2477 	QDF_STATUS ret;
2478 
2479 	if (vdev->osif_rx_flush) {
2480 		ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id);
2481 		if (!QDF_IS_STATUS_SUCCESS(ret)) {
2482 			dp_err("Failed to flush rx pkts for vdev %d\n",
2483 			       vdev->vdev_id);
2484 			return ret;
2485 		}
2486 	}
2487 
2488 	return QDF_STATUS_SUCCESS;
2489 }
2490 
2491 static QDF_STATUS
2492 dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, qdf_nbuf_t *nbuf,
2493 			   struct dp_pdev *dp_pdev,
2494 			   struct rx_desc_pool *rx_desc_pool)
2495 {
2496 	qdf_dma_addr_t paddr;
2497 	QDF_STATUS ret = QDF_STATUS_E_FAILURE;
2498 
2499 	*nbuf = qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size,
2500 			       RX_BUFFER_RESERVATION,
2501 			       rx_desc_pool->buf_alignment, FALSE);
2502 	if (!(*nbuf)) {
2503 		dp_err("nbuf alloc failed");
2504 		DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
2505 		return ret;
2506 	}
2507 
2508 	ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev, *nbuf,
2509 					 QDF_DMA_FROM_DEVICE,
2510 					 rx_desc_pool->buf_size);
2511 
2512 	if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
2513 		qdf_nbuf_free(*nbuf);
2514 		dp_err("nbuf map failed");
2515 		DP_STATS_INC(dp_pdev, replenish.map_err, 1);
2516 		return ret;
2517 	}
2518 
2519 	paddr = qdf_nbuf_get_frag_paddr(*nbuf, 0);
2520 
2521 	ret = check_x86_paddr(dp_soc, nbuf, &paddr, rx_desc_pool);
2522 	if (ret == QDF_STATUS_E_FAILURE) {
2523 		qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, *nbuf,
2524 					     QDF_DMA_FROM_DEVICE,
2525 					     rx_desc_pool->buf_size);
2526 		qdf_nbuf_free(*nbuf);
2527 		dp_err("nbuf check x86 failed");
2528 		DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
2529 		return ret;
2530 	}
2531 
2532 	return QDF_STATUS_SUCCESS;
2533 }
2534 
2535 QDF_STATUS
2536 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
2537 			  struct dp_srng *dp_rxdma_srng,
2538 			  struct rx_desc_pool *rx_desc_pool,
2539 			  uint32_t num_req_buffers)
2540 {
2541 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
2542 	hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng;
2543 	union dp_rx_desc_list_elem_t *next;
2544 	void *rxdma_ring_entry;
2545 	qdf_dma_addr_t paddr;
2546 	qdf_nbuf_t *rx_nbuf_arr;
2547 	uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0;
2548 	uint32_t buffer_index, nbuf_ptrs_per_page;
2549 	qdf_nbuf_t nbuf;
2550 	QDF_STATUS ret;
2551 	int page_idx, total_pages;
2552 	union dp_rx_desc_list_elem_t *desc_list = NULL;
2553 	union dp_rx_desc_list_elem_t *tail = NULL;
2554 	int sync_hw_ptr = 1;
2555 	uint32_t num_entries_avail;
2556 
2557 	if (qdf_unlikely(!rxdma_srng)) {
2558 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
2559 		return QDF_STATUS_E_FAILURE;
2560 	}
2561 
2562 	dp_debug("requested %u RX buffers for driver attach", num_req_buffers);
2563 
2564 	hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
2565 	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
2566 						   rxdma_srng,
2567 						   sync_hw_ptr);
2568 	hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
2569 
2570 	if (!num_entries_avail) {
2571 		dp_err("Num of available entries is zero, nothing to do");
2572 		return QDF_STATUS_E_NOMEM;
2573 	}
2574 
2575 	if (num_entries_avail < num_req_buffers)
2576 		num_req_buffers = num_entries_avail;
2577 
2578 	nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool,
2579 					    num_req_buffers, &desc_list, &tail);
2580 	if (!nr_descs) {
2581 		dp_err("no free rx_descs in freelist");
2582 		DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
2583 		return QDF_STATUS_E_NOMEM;
2584 	}
2585 
2586 	dp_debug("got %u RX descs for driver attach", nr_descs);
2587 
2588 	/*
2589 	 * Try to allocate pointers to the nbuf one page at a time.
2590 	 * Take pointers that can fit in one page of memory and
2591 	 * iterate through the total descriptors that need to be
2592 	 * allocated in order of pages. Reuse the pointers that
2593 	 * have been allocated to fit in one page across each
2594 	 * iteration to index into the nbuf.
2595 	 */
2596 	total_pages = (nr_descs * sizeof(*rx_nbuf_arr)) / PAGE_SIZE;
2597 
2598 	/*
2599 	 * Add an extra page to store the remainder if any
2600 	 */
2601 	if ((nr_descs * sizeof(*rx_nbuf_arr)) % PAGE_SIZE)
2602 		total_pages++;
2603 	rx_nbuf_arr = qdf_mem_malloc(PAGE_SIZE);
2604 	if (!rx_nbuf_arr) {
2605 		dp_err("failed to allocate nbuf array");
2606 		DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
2607 		QDF_BUG(0);
2608 		return QDF_STATUS_E_NOMEM;
2609 	}
2610 	nbuf_ptrs_per_page = PAGE_SIZE / sizeof(*rx_nbuf_arr);
2611 
2612 	for (page_idx = 0; page_idx < total_pages; page_idx++) {
2613 		qdf_mem_zero(rx_nbuf_arr, PAGE_SIZE);
2614 
2615 		for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) {
2616 			/*
2617 			 * The last page of buffer pointers may not be required
2618 			 * completely based on the number of descriptors. Below
2619 			 * check will ensure we are allocating only the
2620 			 * required number of descriptors.
2621 			 */
2622 			if (nr_nbuf_total >= nr_descs)
2623 				break;
2624 			ret = dp_pdev_nbuf_alloc_and_map(dp_soc,
2625 							 &rx_nbuf_arr[nr_nbuf],
2626 							 dp_pdev, rx_desc_pool);
2627 			if (QDF_IS_STATUS_ERROR(ret))
2628 				break;
2629 
2630 			nr_nbuf_total++;
2631 		}
2632 
2633 		hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
2634 
2635 		for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) {
2636 			rxdma_ring_entry =
2637 				hal_srng_src_get_next(dp_soc->hal_soc,
2638 						      rxdma_srng);
2639 			qdf_assert_always(rxdma_ring_entry);
2640 
2641 			next = desc_list->next;
2642 			nbuf = rx_nbuf_arr[buffer_index];
2643 			paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
2644 
2645 			dp_rx_desc_prep(&desc_list->rx_desc, nbuf);
2646 			desc_list->rx_desc.in_use = 1;
2647 
2648 			hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
2649 						     desc_list->rx_desc.cookie,
2650 						     rx_desc_pool->owner);
2651 
2652 			dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, nbuf, true);
2653 
2654 			desc_list = next;
2655 		}
2656 
2657 		hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
2658 	}
2659 
2660 	dp_info("filled %u RX buffers for driver attach", nr_nbuf_total);
2661 	qdf_mem_free(rx_nbuf_arr);
2662 
2663 	if (!nr_nbuf_total) {
2664 		dp_err("No nbuf's allocated");
2665 		QDF_BUG(0);
2666 		return QDF_STATUS_E_RESOURCES;
2667 	}
2668 
2669 	/* No need to count the number of bytes received during replenish.
2670 	 * Therefore set replenish.pkts.bytes as 0.
2671 	 */
2672 	DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0);
2673 
2674 	return QDF_STATUS_SUCCESS;
2675 }
2676 
2677 /*
2678  * dp_rx_pdev_desc_pool_alloc() -  allocate memory for software rx descriptor
2679  *				   pool
2680  *
2681  * @pdev: core txrx pdev context
2682  *
2683  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
2684  *			QDF_STATUS_E_NOMEM
2685  */
2686 QDF_STATUS
2687 dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev)
2688 {
2689 	struct dp_soc *soc = pdev->soc;
2690 	uint32_t rxdma_entries;
2691 	uint32_t rx_sw_desc_weight;
2692 	struct dp_srng *dp_rxdma_srng;
2693 	struct rx_desc_pool *rx_desc_pool;
2694 	uint32_t status = QDF_STATUS_SUCCESS;
2695 	int mac_for_pdev;
2696 
2697 	mac_for_pdev = pdev->lmac_id;
2698 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
2699 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2700 			  "nss-wifi<4> skip Rx refil %d", mac_for_pdev);
2701 		return status;
2702 	}
2703 
2704 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
2705 	rxdma_entries = dp_rxdma_srng->num_entries;
2706 
2707 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2708 	rx_sw_desc_weight = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx);
2709 
2710 	status = dp_rx_desc_pool_alloc(soc,
2711 				       rx_sw_desc_weight * rxdma_entries,
2712 				       rx_desc_pool);
2713 	if (status != QDF_STATUS_SUCCESS)
2714 		return status;
2715 
2716 	return status;
2717 }
2718 
2719 /*
2720  * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool
2721  *
2722  * @pdev: core txrx pdev context
2723  */
2724 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev)
2725 {
2726 	int mac_for_pdev = pdev->lmac_id;
2727 	struct dp_soc *soc = pdev->soc;
2728 	struct rx_desc_pool *rx_desc_pool;
2729 
2730 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2731 
2732 	dp_rx_desc_pool_free(soc, rx_desc_pool);
2733 }
2734 
2735 /*
2736  * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors
2737  *
2738  * @pdev: core txrx pdev context
2739  *
2740  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
2741  *			QDF_STATUS_E_NOMEM
2742  */
2743 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev)
2744 {
2745 	int mac_for_pdev = pdev->lmac_id;
2746 	struct dp_soc *soc = pdev->soc;
2747 	uint32_t rxdma_entries;
2748 	uint32_t rx_sw_desc_weight;
2749 	struct dp_srng *dp_rxdma_srng;
2750 	struct rx_desc_pool *rx_desc_pool;
2751 
2752 	if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
2753 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
2754 			  "nss-wifi<4> skip Rx refil %d", mac_for_pdev);
2755 		return QDF_STATUS_SUCCESS;
2756 	}
2757 
2758 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2759 	if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM)
2760 		return QDF_STATUS_E_NOMEM;
2761 
2762 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
2763 	rxdma_entries = dp_rxdma_srng->num_entries;
2764 
2765 	soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
2766 
2767 	rx_sw_desc_weight =
2768 	wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx);
2769 
2770 	rx_desc_pool->owner = DP_WBM2SW_RBM;
2771 	rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
2772 	rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
2773 
2774 	dp_rx_desc_pool_init(soc, mac_for_pdev,
2775 			     rx_sw_desc_weight * rxdma_entries,
2776 			     rx_desc_pool);
2777 	return QDF_STATUS_SUCCESS;
2778 }
2779 
2780 /*
2781  * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools
2782  * @pdev: core txrx pdev context
2783  *
2784  * This function resets the freelist of rx descriptors and destroys locks
2785  * associated with this list of descriptors.
2786  */
2787 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev)
2788 {
2789 	int mac_for_pdev = pdev->lmac_id;
2790 	struct dp_soc *soc = pdev->soc;
2791 	struct rx_desc_pool *rx_desc_pool;
2792 
2793 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2794 
2795 	dp_rx_desc_pool_deinit(soc, rx_desc_pool);
2796 }
2797 
2798 /*
2799  * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring
2800  *
2801  * @pdev: core txrx pdev context
2802  *
2803  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
2804  *			QDF_STATUS_E_NOMEM
2805  */
2806 QDF_STATUS
2807 dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev)
2808 {
2809 	int mac_for_pdev = pdev->lmac_id;
2810 	struct dp_soc *soc = pdev->soc;
2811 	struct dp_srng *dp_rxdma_srng;
2812 	struct rx_desc_pool *rx_desc_pool;
2813 	uint32_t rxdma_entries;
2814 
2815 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
2816 	rxdma_entries = dp_rxdma_srng->num_entries;
2817 
2818 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2819 
2820 	return dp_pdev_rx_buffers_attach(soc, mac_for_pdev, dp_rxdma_srng,
2821 					 rx_desc_pool, rxdma_entries - 1);
2822 }
2823 
2824 /*
2825  * dp_rx_pdev_buffers_free - Free nbufs (skbs)
2826  *
2827  * @pdev: core txrx pdev context
2828  */
2829 void
2830 dp_rx_pdev_buffers_free(struct dp_pdev *pdev)
2831 {
2832 	int mac_for_pdev = pdev->lmac_id;
2833 	struct dp_soc *soc = pdev->soc;
2834 	struct rx_desc_pool *rx_desc_pool;
2835 
2836 	rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2837 
2838 	dp_rx_desc_nbuf_free(soc, rx_desc_pool);
2839 }
2840 
2841 /*
2842  * dp_rx_nbuf_prepare() - prepare RX nbuf
2843  * @soc: core txrx main context
2844  * @pdev: core txrx pdev context
2845  *
2846  * This function alloc & map nbuf for RX dma usage, retry it if failed
2847  * until retry times reaches max threshold or succeeded.
2848  *
2849  * Return: qdf_nbuf_t pointer if succeeded, NULL if failed.
2850  */
2851 qdf_nbuf_t
2852 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev)
2853 {
2854 	uint8_t *buf;
2855 	int32_t nbuf_retry_count;
2856 	QDF_STATUS ret;
2857 	qdf_nbuf_t nbuf = NULL;
2858 
2859 	for (nbuf_retry_count = 0; nbuf_retry_count <
2860 		QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD;
2861 			nbuf_retry_count++) {
2862 		/* Allocate a new skb */
2863 		nbuf = qdf_nbuf_alloc(soc->osdev,
2864 					RX_DATA_BUFFER_SIZE,
2865 					RX_BUFFER_RESERVATION,
2866 					RX_DATA_BUFFER_ALIGNMENT,
2867 					FALSE);
2868 
2869 		if (!nbuf) {
2870 			DP_STATS_INC(pdev,
2871 				replenish.nbuf_alloc_fail, 1);
2872 			continue;
2873 		}
2874 
2875 		buf = qdf_nbuf_data(nbuf);
2876 
2877 		memset(buf, 0, RX_DATA_BUFFER_SIZE);
2878 
2879 		ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
2880 						 QDF_DMA_FROM_DEVICE,
2881 						 RX_DATA_BUFFER_SIZE);
2882 
2883 		/* nbuf map failed */
2884 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
2885 			qdf_nbuf_free(nbuf);
2886 			DP_STATS_INC(pdev, replenish.map_err, 1);
2887 			continue;
2888 		}
2889 		/* qdf_nbuf alloc and map succeeded */
2890 		break;
2891 	}
2892 
2893 	/* qdf_nbuf still alloc or map failed */
2894 	if (qdf_unlikely(nbuf_retry_count >=
2895 			QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD))
2896 		return NULL;
2897 
2898 	return nbuf;
2899 }
2900 
2901 #ifdef DP_RX_SPECIAL_FRAME_NEED
2902 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
2903 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
2904 				 uint8_t *rx_tlv_hdr)
2905 {
2906 	uint32_t l2_hdr_offset = 0;
2907 	uint16_t msdu_len = 0;
2908 	uint32_t skip_len;
2909 
2910 	l2_hdr_offset =
2911 		hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
2912 
2913 	if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
2914 		skip_len = l2_hdr_offset;
2915 	} else {
2916 		msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
2917 		skip_len = l2_hdr_offset + RX_PKT_TLVS_LEN;
2918 		qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
2919 	}
2920 
2921 	QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
2922 	dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
2923 	qdf_nbuf_pull_head(nbuf, skip_len);
2924 
2925 	if (dp_rx_is_special_frame(nbuf, frame_mask)) {
2926 		dp_rx_deliver_to_stack(soc, peer->vdev, peer,
2927 				       nbuf, NULL);
2928 		return true;
2929 	}
2930 
2931 	return false;
2932 }
2933 #endif
2934