xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/1.0/dp_rx_mon_dest_1.0.c (revision 436c73ee609b16309acf59b55716e25add074049)
1 /*
2  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "hal_hw_headers.h"
19 #include "dp_types.h"
20 #include "dp_rx.h"
21 #include "dp_peer.h"
22 #include "hal_rx.h"
23 #include "hal_api.h"
24 #include "qdf_trace.h"
25 #include "qdf_nbuf.h"
26 #include "hal_api_mon.h"
27 #include "dp_htt.h"
28 #include "dp_mon.h"
29 #include "dp_rx_mon.h"
30 #include "wlan_cfg.h"
31 #include "dp_internal.h"
32 #include "dp_rx_buffer_pool.h"
33 #include <dp_mon_1.0.h>
34 #include <dp_rx_mon_1.0.h>
35 
36 #ifdef WLAN_TX_PKT_CAPTURE_ENH
37 #include "dp_rx_mon_feature.h"
38 #endif
39 
40 /*
41  * PPDU id is from 0 to 64k-1. PPDU id read from status ring and PPDU id
42  * read from destination ring shall track each other. If the distance of
43  * two ppdu id is less than 20000. It is assume no wrap around. Otherwise,
44  * It is assume wrap around.
45  */
46 #define NOT_PPDU_ID_WRAP_AROUND 20000
47 /*
48  * The destination ring processing is stuck if the destrination is not
49  * moving while status ring moves 16 ppdu. the destination ring processing
50  * skips this destination ring ppdu as walkaround
51  */
52 #define MON_DEST_RING_STUCK_MAX_CNT 16
53 
54 #ifdef WLAN_TX_PKT_CAPTURE_ENH
55 void
56 dp_handle_tx_capture(struct dp_soc *soc, struct dp_pdev *pdev,
57 		     qdf_nbuf_t mon_mpdu)
58 {
59 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
60 	struct hal_rx_ppdu_info *ppdu_info = &mon_pdev->ppdu_info;
61 
62 	if (mon_pdev->tx_capture_enabled
63 	    == CDP_TX_ENH_CAPTURE_DISABLED)
64 		return;
65 
66 	if ((ppdu_info->sw_frame_group_id ==
67 	      HAL_MPDU_SW_FRAME_GROUP_CTRL_NDPA) ||
68 	     (ppdu_info->sw_frame_group_id ==
69 	      HAL_MPDU_SW_FRAME_GROUP_CTRL_BAR))
70 		dp_handle_tx_capture_from_dest(soc, pdev, mon_mpdu);
71 }
72 
73 #ifdef QCA_MONITOR_PKT_SUPPORT
74 static void
75 dp_tx_capture_get_user_id(struct dp_pdev *dp_pdev, void *rx_desc_tlv)
76 {
77 	struct dp_mon_pdev *mon_pdev = dp_pdev->monitor_pdev;
78 
79 	if (mon_pdev->tx_capture_enabled
80 	    != CDP_TX_ENH_CAPTURE_DISABLED)
81 		mon_pdev->ppdu_info.rx_info.user_id =
82 			hal_rx_hw_desc_mpdu_user_id(dp_pdev->soc->hal_soc,
83 						    rx_desc_tlv);
84 }
85 #endif
86 #else
87 static void
88 dp_tx_capture_get_user_id(struct dp_pdev *dp_pdev, void *rx_desc_tlv)
89 {
90 }
91 #endif
92 
93 #ifdef QCA_MONITOR_PKT_SUPPORT
94 /**
95  * dp_rx_mon_link_desc_return() - Return a MPDU link descriptor to HW
96  *			      (WBM), following error handling
97  *
98  * @dp_pdev: core txrx pdev context
99  * @buf_addr_info: void pointer to monitor link descriptor buf addr info
100  * Return: QDF_STATUS
101  */
102 QDF_STATUS
103 dp_rx_mon_link_desc_return(struct dp_pdev *dp_pdev,
104 	hal_buff_addrinfo_t buf_addr_info, int mac_id)
105 {
106 	struct dp_srng *dp_srng;
107 	hal_ring_handle_t hal_ring_hdl;
108 	hal_soc_handle_t hal_soc;
109 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
110 	void *src_srng_desc;
111 
112 	hal_soc = dp_pdev->soc->hal_soc;
113 
114 	dp_srng = &dp_pdev->soc->rxdma_mon_desc_ring[mac_id];
115 	hal_ring_hdl = dp_srng->hal_srng;
116 
117 	qdf_assert(hal_ring_hdl);
118 
119 	if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring_hdl))) {
120 
121 		/* TODO */
122 		/*
123 		 * Need API to convert from hal_ring pointer to
124 		 * Ring Type / Ring Id combo
125 		 */
126 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
127 			"%s %d : \
128 			HAL RING Access For WBM Release SRNG Failed -- %pK",
129 			__func__, __LINE__, hal_ring_hdl);
130 		goto done;
131 	}
132 
133 	src_srng_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
134 
135 	if (qdf_likely(src_srng_desc)) {
136 		/* Return link descriptor through WBM ring (SW2WBM)*/
137 		hal_rx_mon_msdu_link_desc_set(hal_soc,
138 				src_srng_desc, buf_addr_info);
139 		status = QDF_STATUS_SUCCESS;
140 	} else {
141 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
142 			"%s %d -- Monitor Link Desc WBM Release Ring Full",
143 			__func__, __LINE__);
144 	}
145 done:
146 	hal_srng_access_end(hal_soc, hal_ring_hdl);
147 	return status;
148 }
149 
150 /**
151  * dp_rx_mon_mpdu_pop() - Return a MPDU link descriptor to HW
152  *			      (WBM), following error handling
153  *
154  * @soc: core DP main context
155  * @mac_id: mac id which is one of 3 mac_ids
156  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
157  * @head_msdu: head of msdu to be popped
158  * @tail_msdu: tail of msdu to be popped
159  * @npackets: number of packet to be popped
160  * @ppdu_id: ppdu id of processing ppdu
161  * @head: head of descs list to be freed
162  * @tail: tail of decs list to be freed
163  *
164  * Return: number of msdu in MPDU to be popped
165  */
166 static inline uint32_t
167 dp_rx_mon_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
168 	hal_rxdma_desc_t rxdma_dst_ring_desc, qdf_nbuf_t *head_msdu,
169 	qdf_nbuf_t *tail_msdu, uint32_t *npackets, uint32_t *ppdu_id,
170 	union dp_rx_desc_list_elem_t **head,
171 	union dp_rx_desc_list_elem_t **tail)
172 {
173 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
174 	void *rx_desc_tlv, *first_rx_desc_tlv = NULL;
175 	void *rx_msdu_link_desc;
176 	qdf_nbuf_t msdu;
177 	qdf_nbuf_t last;
178 	struct hal_rx_msdu_list msdu_list;
179 	uint16_t num_msdus;
180 	uint32_t rx_buf_size, rx_pkt_offset;
181 	struct hal_buf_info buf_info;
182 	uint32_t rx_bufs_used = 0;
183 	uint32_t msdu_ppdu_id, msdu_cnt;
184 	uint8_t *data = NULL;
185 	uint32_t i;
186 	uint32_t total_frag_len = 0, frag_len = 0;
187 	bool is_frag, is_first_msdu;
188 	bool drop_mpdu = false, is_frag_non_raw = false;
189 	uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
190 	qdf_dma_addr_t buf_paddr = 0;
191 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
192 	struct cdp_mon_status *rs;
193 	struct dp_mon_pdev *mon_pdev;
194 
195 	if (qdf_unlikely(!dp_pdev)) {
196 		dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d", soc, mac_id);
197 		return rx_bufs_used;
198 	}
199 
200 	mon_pdev = dp_pdev->monitor_pdev;
201 	msdu = 0;
202 
203 	last = NULL;
204 
205 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
206 				     &buf_info, &msdu_cnt);
207 
208 	rs = &mon_pdev->rx_mon_recv_status;
209 	rs->cdp_rs_rxdma_err = false;
210 	if ((hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc) ==
211 		HAL_RX_WBM_RXDMA_PSH_RSN_ERROR)) {
212 		uint8_t rxdma_err =
213 			hal_rx_reo_ent_rxdma_error_code_get(
214 				rxdma_dst_ring_desc);
215 		if (qdf_unlikely((rxdma_err == HAL_RXDMA_ERR_FLUSH_REQUEST) ||
216 		   (rxdma_err == HAL_RXDMA_ERR_MPDU_LENGTH) ||
217 		   (rxdma_err == HAL_RXDMA_ERR_OVERFLOW) ||
218 		   (rxdma_err == HAL_RXDMA_ERR_FCS && mon_pdev->mcopy_mode) ||
219 		   (rxdma_err == HAL_RXDMA_ERR_FCS &&
220 		    mon_pdev->rx_pktlog_cbf))) {
221 			drop_mpdu = true;
222 			mon_pdev->rx_mon_stats.dest_mpdu_drop++;
223 		}
224 		rs->cdp_rs_rxdma_err = true;
225 	}
226 
227 	is_frag = false;
228 	is_first_msdu = true;
229 
230 	do {
231 		/* WAR for duplicate link descriptors received from HW */
232 		if (qdf_unlikely(mon_pdev->mon_last_linkdesc_paddr ==
233 		    buf_info.paddr)) {
234 			mon_pdev->rx_mon_stats.dup_mon_linkdesc_cnt++;
235 			return rx_bufs_used;
236 		}
237 
238 		rx_msdu_link_desc =
239 			dp_rx_cookie_2_mon_link_desc(dp_pdev,
240 						     buf_info, mac_id);
241 
242 		qdf_assert_always(rx_msdu_link_desc);
243 
244 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
245 				     &msdu_list, &num_msdus);
246 
247 		for (i = 0; i < num_msdus; i++) {
248 			uint16_t l2_hdr_offset;
249 			struct dp_rx_desc *rx_desc = NULL;
250 			struct rx_desc_pool *rx_desc_pool;
251 
252 			rx_desc = dp_rx_get_mon_desc(soc,
253 						     msdu_list.sw_cookie[i]);
254 
255 			qdf_assert_always(rx_desc);
256 
257 			msdu = DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc);
258 			buf_paddr = dp_rx_mon_get_paddr_from_desc(rx_desc);
259 
260 			/* WAR for duplicate buffers received from HW */
261 			if (qdf_unlikely(mon_pdev->mon_last_buf_cookie ==
262 				msdu_list.sw_cookie[i] ||
263 				DP_RX_MON_IS_BUFFER_ADDR_NULL(rx_desc) ||
264 				msdu_list.paddr[i] != buf_paddr ||
265 				!rx_desc->in_use)) {
266 				/* Skip duplicate buffer and drop subsequent
267 				 * buffers in this MPDU
268 				 */
269 				drop_mpdu = true;
270 				mon_pdev->rx_mon_stats.dup_mon_buf_cnt++;
271 				mon_pdev->mon_last_linkdesc_paddr =
272 					buf_info.paddr;
273 				continue;
274 			}
275 
276 			if (rx_desc->unmapped == 0) {
277 				rx_desc_pool = dp_rx_get_mon_desc_pool(soc,
278 								       mac_id,
279 								dp_pdev->pdev_id);
280 				dp_rx_mon_buffer_unmap(soc, rx_desc,
281 						       rx_desc_pool->buf_size);
282 				rx_desc->unmapped = 1;
283 			}
284 
285 			if (dp_rx_buffer_pool_refill(soc, msdu,
286 						     rx_desc->pool_id)) {
287 				drop_mpdu = true;
288 				msdu = NULL;
289 				mon_pdev->mon_last_linkdesc_paddr =
290 					buf_info.paddr;
291 				goto next_msdu;
292 			}
293 
294 			if (drop_mpdu) {
295 				mon_pdev->mon_last_linkdesc_paddr =
296 					buf_info.paddr;
297 				dp_rx_mon_buffer_free(rx_desc);
298 				msdu = NULL;
299 				goto next_msdu;
300 			}
301 
302 			data = dp_rx_mon_get_buffer_data(rx_desc);
303 			rx_desc_tlv = HAL_RX_MON_DEST_GET_DESC(data);
304 
305 			dp_rx_mon_dest_debug("%pK: i=%d, ppdu_id=%x, num_msdus = %u",
306 					     soc, i, *ppdu_id, num_msdus);
307 
308 			if (is_first_msdu) {
309 				if (!hal_rx_mpdu_start_tlv_tag_valid(
310 						soc->hal_soc,
311 						rx_desc_tlv)) {
312 					drop_mpdu = true;
313 					dp_rx_mon_buffer_free(rx_desc);
314 					msdu = NULL;
315 					mon_pdev->mon_last_linkdesc_paddr =
316 						buf_info.paddr;
317 					goto next_msdu;
318 				}
319 
320 				msdu_ppdu_id = hal_rx_hw_desc_get_ppduid_get(
321 						soc->hal_soc,
322 						rx_desc_tlv,
323 						rxdma_dst_ring_desc);
324 				is_first_msdu = false;
325 
326 				dp_rx_mon_dest_debug("%pK: msdu_ppdu_id=%x",
327 						     soc, msdu_ppdu_id);
328 
329 				if (*ppdu_id > msdu_ppdu_id)
330 					dp_rx_mon_dest_debug("%pK: ppdu_id=%d "
331 							     "msdu_ppdu_id=%d", soc,
332 							     *ppdu_id, msdu_ppdu_id);
333 
334 				if ((*ppdu_id < msdu_ppdu_id) && (
335 					(msdu_ppdu_id - *ppdu_id) <
336 						NOT_PPDU_ID_WRAP_AROUND)) {
337 					*ppdu_id = msdu_ppdu_id;
338 					return rx_bufs_used;
339 				} else if ((*ppdu_id > msdu_ppdu_id) && (
340 					(*ppdu_id - msdu_ppdu_id) >
341 						NOT_PPDU_ID_WRAP_AROUND)) {
342 					*ppdu_id = msdu_ppdu_id;
343 					return rx_bufs_used;
344 				}
345 
346 				dp_tx_capture_get_user_id(dp_pdev,
347 							  rx_desc_tlv);
348 
349 				if (*ppdu_id == msdu_ppdu_id)
350 					mon_pdev->rx_mon_stats.ppdu_id_match++;
351 				else
352 					mon_pdev->rx_mon_stats.ppdu_id_mismatch
353 						++;
354 
355 				mon_pdev->mon_last_linkdesc_paddr =
356 					buf_info.paddr;
357 
358 				if (dp_rx_mon_alloc_parent_buffer(head_msdu)
359 				    != QDF_STATUS_SUCCESS) {
360 					DP_STATS_INC(dp_pdev,
361 						     replenish.nbuf_alloc_fail,
362 						     1);
363 					qdf_frag_free(rx_desc_tlv);
364 					dp_rx_mon_dest_debug("failed to allocate parent buffer to hold all frag");
365 					drop_mpdu = true;
366 					goto next_msdu;
367 				}
368 			}
369 
370 			if (hal_rx_desc_is_first_msdu(soc->hal_soc,
371 						      rx_desc_tlv))
372 				hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc,
373 					rx_desc_tlv,
374 					&mon_pdev->ppdu_info.rx_status);
375 
376 			dp_rx_mon_parse_desc_buffer(soc,
377 						    &(msdu_list.msdu_info[i]),
378 						    &is_frag,
379 						    &total_frag_len,
380 						    &frag_len,
381 						    &l2_hdr_offset,
382 						    rx_desc_tlv,
383 						    &first_rx_desc_tlv,
384 						    &is_frag_non_raw, data);
385 			if (!is_frag)
386 				msdu_cnt--;
387 
388 			dp_rx_mon_dest_debug("total_len %u frag_len %u flags %u",
389 					     total_frag_len, frag_len,
390 				      msdu_list.msdu_info[i].msdu_flags);
391 
392 			rx_pkt_offset = dp_rx_mon_get_rx_pkt_tlv_size(soc);
393 
394 			rx_buf_size = rx_pkt_offset + l2_hdr_offset
395 					+ frag_len;
396 
397 			dp_rx_mon_buffer_set_pktlen(msdu, rx_buf_size);
398 #if 0
399 			/* Disable it.see packet on msdu done set to 0 */
400 			/*
401 			 * Check if DMA completed -- msdu_done is the
402 			 * last bit to be written
403 			 */
404 			if (!hal_rx_attn_msdu_done_get(rx_desc_tlv)) {
405 
406 				QDF_TRACE(QDF_MODULE_ID_DP,
407 					  QDF_TRACE_LEVEL_ERROR,
408 					  "%s:%d: Pkt Desc",
409 					  __func__, __LINE__);
410 
411 				QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
412 					QDF_TRACE_LEVEL_ERROR,
413 					rx_desc_tlv, 128);
414 
415 				qdf_assert_always(0);
416 			}
417 #endif
418 			dp_rx_mon_dest_debug("%pK: rx_pkt_offset=%d, l2_hdr_offset=%d, msdu_len=%d, frag_len %u",
419 					     soc, rx_pkt_offset, l2_hdr_offset,
420 					     msdu_list.msdu_info[i].msdu_len,
421 					     frag_len);
422 
423 			if (dp_rx_mon_add_msdu_to_list(soc, head_msdu, msdu,
424 						       &last, rx_desc_tlv,
425 						       frag_len, l2_hdr_offset)
426 					!= QDF_STATUS_SUCCESS) {
427 				dp_rx_mon_add_msdu_to_list_failure_handler(rx_desc_tlv,
428 						dp_pdev, &last, head_msdu,
429 						tail_msdu, __func__);
430 				drop_mpdu = true;
431 				goto next_msdu;
432 			}
433 
434 next_msdu:
435 			mon_pdev->mon_last_buf_cookie = msdu_list.sw_cookie[i];
436 			rx_bufs_used++;
437 			dp_rx_add_to_free_desc_list(head,
438 				tail, rx_desc);
439 		}
440 
441 		/*
442 		 * Store the current link buffer into to the local
443 		 * structure to be  used for release purpose.
444 		 */
445 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
446 					     buf_info.paddr,
447 					     buf_info.sw_cookie, buf_info.rbm);
448 
449 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
450 					      &buf_info);
451 		if (dp_rx_monitor_link_desc_return(dp_pdev,
452 						   (hal_buff_addrinfo_t)
453 						   rx_link_buf_info,
454 						   mac_id,
455 						   bm_action)
456 						   != QDF_STATUS_SUCCESS)
457 			dp_err_rl("monitor link desc return failed");
458 	} while (buf_info.paddr && msdu_cnt);
459 
460 	dp_rx_mon_init_tail_msdu(head_msdu, msdu, last, tail_msdu);
461 	dp_rx_mon_remove_raw_frame_fcs_len(soc, head_msdu, tail_msdu);
462 
463 	return rx_bufs_used;
464 }
465 
466 #if !defined(DISABLE_MON_CONFIG) && \
467 	(defined(MON_ENABLE_DROP_FOR_NON_MON_PMAC) || \
468 	 defined(MON_ENABLE_DROP_FOR_MAC))
469 /**
470  * dp_rx_mon_drop_one_mpdu() - Drop one mpdu from one rxdma monitor destination
471  *			       ring.
472  * @pdev: DP pdev handle
473  * @mac_id: MAC id which is being currently processed
474  * @rxdma_dst_ring_desc: RXDMA monitor destination ring entry
475  * @head: HEAD if the rx_desc list to be freed
476  * @tail: TAIL of the rx_desc list to be freed
477  *
478  * Return: Number of msdus which are dropped.
479  */
480 static int dp_rx_mon_drop_one_mpdu(struct dp_pdev *pdev,
481 				   uint32_t mac_id,
482 				   hal_rxdma_desc_t rxdma_dst_ring_desc,
483 				   union dp_rx_desc_list_elem_t **head,
484 				   union dp_rx_desc_list_elem_t **tail)
485 {
486 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
487 	struct dp_soc *soc = pdev->soc;
488 	hal_soc_handle_t hal_soc = soc->hal_soc;
489 	struct hal_buf_info buf_info;
490 	uint32_t msdu_count = 0;
491 	uint32_t rx_bufs_used = 0;
492 	void *rx_msdu_link_desc;
493 	struct hal_rx_msdu_list msdu_list;
494 	uint16_t num_msdus;
495 	qdf_nbuf_t nbuf;
496 	uint32_t i;
497 	uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
498 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
499 	struct rx_desc_pool *rx_desc_pool;
500 
501 	rx_desc_pool = dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id);
502 	hal_rx_reo_ent_buf_paddr_get(hal_soc, rxdma_dst_ring_desc,
503 				     &buf_info, &msdu_count);
504 
505 	do {
506 		rx_msdu_link_desc = dp_rx_cookie_2_mon_link_desc(pdev,
507 								 buf_info,
508 								 mac_id);
509 		if (qdf_unlikely(!rx_msdu_link_desc)) {
510 			mon_pdev->rx_mon_stats.mon_link_desc_invalid++;
511 			return rx_bufs_used;
512 		}
513 
514 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
515 				     &msdu_list, &num_msdus);
516 
517 		for (i = 0; i < num_msdus; i++) {
518 			struct dp_rx_desc *rx_desc;
519 			qdf_dma_addr_t buf_paddr;
520 
521 			rx_desc = dp_rx_get_mon_desc(soc,
522 						     msdu_list.sw_cookie[i]);
523 
524 			if (qdf_unlikely(!rx_desc)) {
525 				mon_pdev->rx_mon_stats.
526 						mon_rx_desc_invalid++;
527 				continue;
528 			}
529 
530 			nbuf = DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc);
531 			buf_paddr =
532 				 dp_rx_mon_get_paddr_from_desc(rx_desc);
533 
534 			if (qdf_unlikely(!rx_desc->in_use || !nbuf ||
535 					 msdu_list.paddr[i] !=
536 					 buf_paddr)) {
537 				mon_pdev->rx_mon_stats.
538 						mon_nbuf_sanity_err++;
539 				continue;
540 			}
541 			rx_bufs_used++;
542 
543 			if (!rx_desc->unmapped) {
544 				dp_rx_mon_buffer_unmap(soc, rx_desc,
545 						       rx_desc_pool->buf_size);
546 				rx_desc->unmapped = 1;
547 			}
548 
549 			qdf_nbuf_free(nbuf);
550 			dp_rx_add_to_free_desc_list(head, tail, rx_desc);
551 
552 			if (!(msdu_list.msdu_info[i].msdu_flags &
553 			      HAL_MSDU_F_MSDU_CONTINUATION))
554 				msdu_count--;
555 		}
556 
557 		/*
558 		 * Store the current link buffer into to the local
559 		 * structure to be  used for release purpose.
560 		 */
561 		hal_rxdma_buff_addr_info_set(soc->hal_soc,
562 					     rx_link_buf_info,
563 					     buf_info.paddr,
564 					     buf_info.sw_cookie,
565 					     buf_info.rbm);
566 
567 		hal_rx_mon_next_link_desc_get(soc->hal_soc,
568 					      rx_msdu_link_desc,
569 					      &buf_info);
570 		if (dp_rx_monitor_link_desc_return(pdev,
571 						   (hal_buff_addrinfo_t)
572 						   rx_link_buf_info,
573 						   mac_id, bm_action) !=
574 		    QDF_STATUS_SUCCESS)
575 			dp_info_rl("monitor link desc return failed");
576 	} while (buf_info.paddr && msdu_count);
577 
578 	return rx_bufs_used;
579 }
580 #endif
581 
582 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_NON_MON_PMAC)
583 /**
584  * dp_rx_mon_check_n_drop_mpdu() - Check if the current MPDU is not from the
585  *				   PMAC which is being currently processed, and
586  *				   if yes, drop the MPDU.
587  * @pdev: DP pdev handle
588  * @mac_id: MAC id which is being currently processed
589  * @rxdma_dst_ring_desc: RXDMA monitor destination ring entry
590  * @head: HEAD if the rx_desc list to be freed
591  * @tail: TAIL of the rx_desc list to be freed
592  * @rx_bufs_dropped: Number of msdus dropped
593  *
594  * Return: QDF_STATUS_SUCCESS, if the mpdu was to be dropped
595  *	   QDF_STATUS_E_INVAL/QDF_STATUS_E_FAILURE, if the mdpu was not dropped
596  */
597 static QDF_STATUS
598 dp_rx_mon_check_n_drop_mpdu(struct dp_pdev *pdev, uint32_t mac_id,
599 			    hal_rxdma_desc_t rxdma_dst_ring_desc,
600 			    union dp_rx_desc_list_elem_t **head,
601 			    union dp_rx_desc_list_elem_t **tail,
602 			    uint32_t *rx_bufs_dropped)
603 {
604 	struct dp_soc *soc = pdev->soc;
605 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
606 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
607 	uint8_t src_link_id;
608 	QDF_STATUS status;
609 
610 	if (mon_pdev->mon_chan_band == REG_BAND_UNKNOWN)
611 		return QDF_STATUS_E_INVAL;
612 
613 	lmac_id = pdev->ch_band_lmac_id_mapping[mon_pdev->mon_chan_band];
614 
615 	status = hal_rx_reo_ent_get_src_link_id(soc->hal_soc,
616 						rxdma_dst_ring_desc,
617 						&src_link_id);
618 	if (QDF_IS_STATUS_ERROR(status))
619 		return QDF_STATUS_E_INVAL;
620 
621 	if (src_link_id == lmac_id)
622 		return QDF_STATUS_E_INVAL;
623 
624 	*rx_bufs_dropped = dp_rx_mon_drop_one_mpdu(pdev, mac_id,
625 						   rxdma_dst_ring_desc,
626 						   head, tail);
627 
628 	return QDF_STATUS_SUCCESS;
629 }
630 #else
631 static inline QDF_STATUS
632 dp_rx_mon_check_n_drop_mpdu(struct dp_pdev *pdev, uint32_t mac_id,
633 			    hal_rxdma_desc_t rxdma_dst_ring_desc,
634 			    union dp_rx_desc_list_elem_t **head,
635 			    union dp_rx_desc_list_elem_t **tail,
636 			    uint32_t *rx_bufs_dropped)
637 {
638 	return QDF_STATUS_E_FAILURE;
639 }
640 #endif
641 
642 void dp_rx_mon_dest_process(struct dp_soc *soc, struct dp_intr *int_ctx,
643 			    uint32_t mac_id, uint32_t quota)
644 {
645 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
646 	uint8_t pdev_id;
647 	hal_rxdma_desc_t rxdma_dst_ring_desc;
648 	hal_soc_handle_t hal_soc;
649 	void *mon_dst_srng;
650 	union dp_rx_desc_list_elem_t *head = NULL;
651 	union dp_rx_desc_list_elem_t *tail = NULL;
652 	uint32_t ppdu_id;
653 	uint32_t rx_bufs_used;
654 	uint32_t mpdu_rx_bufs_used;
655 	int mac_for_pdev = mac_id;
656 	struct cdp_pdev_mon_stats *rx_mon_stats;
657 	struct dp_mon_pdev *mon_pdev;
658 
659 	if (!pdev) {
660 		dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d", soc, mac_id);
661 		return;
662 	}
663 
664 	mon_pdev = pdev->monitor_pdev;
665 	mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_for_pdev);
666 
667 	if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) {
668 		dp_rx_mon_dest_err("%pK: : HAL Monitor Destination Ring Init Failed -- %pK",
669 				   soc, mon_dst_srng);
670 		return;
671 	}
672 
673 	hal_soc = soc->hal_soc;
674 
675 	qdf_assert((hal_soc && pdev));
676 
677 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
678 
679 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_dst_srng))) {
680 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
681 			  "%s %d : HAL Mon Dest Ring access Failed -- %pK",
682 			  __func__, __LINE__, mon_dst_srng);
683 		qdf_spin_unlock_bh(&mon_pdev->mon_lock);
684 		return;
685 	}
686 
687 	pdev_id = pdev->pdev_id;
688 	ppdu_id = mon_pdev->ppdu_info.com_info.ppdu_id;
689 	rx_bufs_used = 0;
690 	rx_mon_stats = &mon_pdev->rx_mon_stats;
691 
692 	while (qdf_likely(rxdma_dst_ring_desc =
693 		hal_srng_dst_peek(hal_soc, mon_dst_srng))) {
694 		qdf_nbuf_t head_msdu, tail_msdu;
695 		uint32_t npackets;
696 		uint32_t rx_bufs_dropped;
697 
698 		rx_bufs_dropped = 0;
699 		head_msdu = (qdf_nbuf_t)NULL;
700 		tail_msdu = (qdf_nbuf_t)NULL;
701 
702 		if (QDF_STATUS_SUCCESS ==
703 		    dp_rx_mon_check_n_drop_mpdu(pdev, mac_id,
704 						rxdma_dst_ring_desc,
705 						&head, &tail,
706 						&rx_bufs_dropped)) {
707 			/* Increment stats */
708 			rx_bufs_used += rx_bufs_dropped;
709 			hal_srng_dst_get_next(hal_soc, mon_dst_srng);
710 			continue;
711 		}
712 
713 		mpdu_rx_bufs_used =
714 			dp_rx_mon_mpdu_pop(soc, mac_id,
715 					   rxdma_dst_ring_desc,
716 					   &head_msdu, &tail_msdu,
717 					   &npackets, &ppdu_id,
718 					   &head, &tail);
719 
720 		rx_bufs_used += mpdu_rx_bufs_used;
721 
722 		if (mpdu_rx_bufs_used)
723 			mon_pdev->mon_dest_ring_stuck_cnt = 0;
724 		else
725 			mon_pdev->mon_dest_ring_stuck_cnt++;
726 
727 		if (mon_pdev->mon_dest_ring_stuck_cnt >
728 		    MON_DEST_RING_STUCK_MAX_CNT) {
729 			dp_info("destination ring stuck");
730 			dp_info("ppdu_id status=%d dest=%d",
731 				mon_pdev->ppdu_info.com_info.ppdu_id, ppdu_id);
732 			rx_mon_stats->mon_rx_dest_stuck++;
733 			mon_pdev->ppdu_info.com_info.ppdu_id = ppdu_id;
734 			continue;
735 		}
736 
737 		if (ppdu_id != mon_pdev->ppdu_info.com_info.ppdu_id) {
738 			rx_mon_stats->stat_ring_ppdu_id_hist[
739 				rx_mon_stats->ppdu_id_hist_idx] =
740 				mon_pdev->ppdu_info.com_info.ppdu_id;
741 			rx_mon_stats->dest_ring_ppdu_id_hist[
742 				rx_mon_stats->ppdu_id_hist_idx] = ppdu_id;
743 			rx_mon_stats->ppdu_id_hist_idx =
744 				(rx_mon_stats->ppdu_id_hist_idx + 1) &
745 					(MAX_PPDU_ID_HIST - 1);
746 			mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
747 			qdf_mem_zero(&mon_pdev->ppdu_info.rx_status,
748 				     sizeof(mon_pdev->ppdu_info.rx_status));
749 			dp_rx_mon_dest_debug("%pK: ppdu_id %x != ppdu_info.com_info.ppdu_id %x",
750 					     soc, ppdu_id,
751 					     mon_pdev->ppdu_info.com_info.ppdu_id);
752 			break;
753 		}
754 
755 		if (qdf_likely((head_msdu) && (tail_msdu))) {
756 			rx_mon_stats->dest_mpdu_done++;
757 			dp_rx_mon_deliver(soc, mac_id, head_msdu, tail_msdu);
758 		}
759 
760 		rxdma_dst_ring_desc =
761 			hal_srng_dst_get_next(hal_soc,
762 					      mon_dst_srng);
763 	}
764 
765 	dp_srng_access_end(int_ctx, soc, mon_dst_srng);
766 
767 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
768 
769 	if (rx_bufs_used) {
770 		rx_mon_stats->dest_ppdu_done++;
771 		dp_rx_buffers_replenish(soc, mac_id,
772 					dp_rxdma_get_mon_buf_ring(pdev,
773 								  mac_for_pdev),
774 					dp_rx_get_mon_desc_pool(soc, mac_id,
775 								pdev_id),
776 					rx_bufs_used, &head, &tail, false);
777 	}
778 }
779 
780 QDF_STATUS
781 dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id,
782 				 bool delayed_replenish)
783 {
784 	uint8_t pdev_id = pdev->pdev_id;
785 	struct dp_soc *soc = pdev->soc;
786 	struct dp_srng *mon_buf_ring;
787 	uint32_t num_entries;
788 	struct rx_desc_pool *rx_desc_pool;
789 	QDF_STATUS status = QDF_STATUS_SUCCESS;
790 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
791 
792 	mon_buf_ring = dp_rxdma_get_mon_buf_ring(pdev, mac_id);
793 
794 	num_entries = mon_buf_ring->num_entries;
795 
796 	rx_desc_pool = dp_rx_get_mon_desc_pool(soc, mac_id, pdev_id);
797 
798 	dp_debug("Mon RX Desc Pool[%d] entries=%u", pdev_id, num_entries);
799 
800 	/* Replenish RXDMA monitor buffer ring with 8 buffers only
801 	 * delayed_replenish_entries is actually 8 but when we call
802 	 * dp_pdev_rx_buffers_attach() we pass 1 less than 8, hence
803 	 * added 1 to delayed_replenish_entries to ensure we have 8
804 	 * entries. Once the monitor VAP is configured we replenish
805 	 * the complete RXDMA monitor buffer ring.
806 	 */
807 	if (delayed_replenish) {
808 		num_entries = soc_cfg_ctx->delayed_replenish_entries + 1;
809 		status = dp_pdev_rx_buffers_attach(soc, mac_id, mon_buf_ring,
810 						   rx_desc_pool,
811 						   num_entries - 1);
812 	} else {
813 		union dp_rx_desc_list_elem_t *tail = NULL;
814 		union dp_rx_desc_list_elem_t *desc_list = NULL;
815 
816 		status = dp_rx_buffers_replenish(soc, mac_id,
817 						 mon_buf_ring,
818 						 rx_desc_pool,
819 						 num_entries,
820 						 &desc_list,
821 						 &tail, false);
822 	}
823 
824 	return status;
825 }
826 
827 void
828 dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id)
829 {
830 	uint8_t pdev_id = pdev->pdev_id;
831 	struct dp_soc *soc = pdev->soc;
832 	struct dp_srng *mon_buf_ring;
833 	uint32_t num_entries;
834 	struct rx_desc_pool *rx_desc_pool;
835 	uint32_t rx_desc_pool_size;
836 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
837 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
838 
839 	mon_buf_ring = &soc->rxdma_mon_buf_ring[mac_id];
840 
841 	num_entries = mon_buf_ring->num_entries;
842 
843 	rx_desc_pool = &soc->rx_desc_mon[mac_id];
844 
845 	/* If descriptor pool is already initialized, do not initialize it */
846 	if (rx_desc_pool->freelist)
847 		return;
848 
849 	dp_debug("Mon RX Desc buf Pool[%d] init entries=%u",
850 		 pdev_id, num_entries);
851 
852 	rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc_cfg_ctx) *
853 		num_entries;
854 
855 	rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id);
856 	rx_desc_pool->buf_size = RX_MONITOR_BUFFER_SIZE;
857 	rx_desc_pool->buf_alignment = RX_MONITOR_BUFFER_ALIGNMENT;
858 	/* Enable frag processing if feature is enabled */
859 	dp_rx_enable_mon_dest_frag(rx_desc_pool, true);
860 
861 	dp_rx_desc_pool_init(soc, mac_id, rx_desc_pool_size, rx_desc_pool);
862 
863 	mon_pdev->mon_last_linkdesc_paddr = 0;
864 
865 	mon_pdev->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
866 
867 	/* Attach full monitor mode resources */
868 	dp_full_mon_attach(pdev);
869 }
870 
871 static void
872 dp_rx_pdev_mon_buf_desc_pool_deinit(struct dp_pdev *pdev, uint32_t mac_id)
873 {
874 	uint8_t pdev_id = pdev->pdev_id;
875 	struct dp_soc *soc = pdev->soc;
876 	struct rx_desc_pool *rx_desc_pool;
877 
878 	rx_desc_pool = &soc->rx_desc_mon[mac_id];
879 
880 	dp_debug("Mon RX Desc buf Pool[%d] deinit", pdev_id);
881 
882 	dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_id);
883 
884 	/* Detach full monitor mode resources */
885 	dp_full_mon_detach(pdev);
886 }
887 
888 static void
889 dp_rx_pdev_mon_buf_desc_pool_free(struct dp_pdev *pdev, uint32_t mac_id)
890 {
891 	uint8_t pdev_id = pdev->pdev_id;
892 	struct dp_soc *soc = pdev->soc;
893 	struct rx_desc_pool *rx_desc_pool;
894 
895 	rx_desc_pool = &soc->rx_desc_mon[mac_id];
896 
897 	dp_debug("Mon RX Buf Desc Pool Free pdev[%d]", pdev_id);
898 
899 	dp_rx_desc_pool_free(soc, rx_desc_pool);
900 }
901 
902 void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id)
903 {
904 	uint8_t pdev_id = pdev->pdev_id;
905 	struct dp_soc *soc = pdev->soc;
906 	struct rx_desc_pool *rx_desc_pool;
907 
908 	rx_desc_pool = &soc->rx_desc_mon[mac_id];
909 
910 	dp_debug("Mon RX Buf buffers Free pdev[%d]", pdev_id);
911 
912 	if (rx_desc_pool->rx_mon_dest_frag_enable)
913 		dp_rx_desc_frag_free(soc, rx_desc_pool);
914 	else
915 		dp_rx_desc_nbuf_free(soc, rx_desc_pool);
916 }
917 
918 QDF_STATUS
919 dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id)
920 {
921 	uint8_t pdev_id = pdev->pdev_id;
922 	struct dp_soc *soc = pdev->soc;
923 	struct dp_srng *mon_buf_ring;
924 	uint32_t num_entries;
925 	struct rx_desc_pool *rx_desc_pool;
926 	uint32_t rx_desc_pool_size;
927 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
928 
929 	mon_buf_ring = &soc->rxdma_mon_buf_ring[mac_id];
930 
931 	num_entries = mon_buf_ring->num_entries;
932 
933 	rx_desc_pool = &soc->rx_desc_mon[mac_id];
934 
935 	dp_debug("Mon RX Desc Pool[%d] entries=%u",
936 		 pdev_id, num_entries);
937 
938 	rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc_cfg_ctx) *
939 		num_entries;
940 
941 	if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_SUCCESS)
942 		return QDF_STATUS_SUCCESS;
943 
944 	return dp_rx_desc_pool_alloc(soc, rx_desc_pool_size, rx_desc_pool);
945 }
946 
947 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
948 uint32_t
949 dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id)
950 {
951 	struct dp_soc *soc = pdev->soc;
952 	hal_rxdma_desc_t rxdma_dst_ring_desc;
953 	hal_soc_handle_t hal_soc;
954 	void *mon_dst_srng;
955 	union dp_rx_desc_list_elem_t *head = NULL;
956 	union dp_rx_desc_list_elem_t *tail = NULL;
957 	uint32_t rx_bufs_used = 0;
958 	struct rx_desc_pool *rx_desc_pool;
959 	uint32_t reap_cnt = 0;
960 	struct dp_mon_pdev *mon_pdev;
961 
962 	if (qdf_unlikely(!soc || !soc->hal_soc))
963 		return reap_cnt;
964 
965 	mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_id);
966 
967 	if (qdf_unlikely(!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)))
968 		return reap_cnt;
969 
970 	hal_soc = soc->hal_soc;
971 	mon_pdev = pdev->monitor_pdev;
972 
973 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
974 
975 	if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_dst_srng))) {
976 		qdf_spin_unlock_bh(&mon_pdev->mon_lock);
977 		return reap_cnt;
978 	}
979 
980 	rx_desc_pool = dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id);
981 
982 	while ((rxdma_dst_ring_desc =
983 		hal_srng_dst_peek(hal_soc, mon_dst_srng)) &&
984 		reap_cnt < MON_DROP_REAP_LIMIT) {
985 
986 		rx_bufs_used += dp_rx_mon_drop_one_mpdu(pdev, mac_id,
987 							rxdma_dst_ring_desc,
988 							&head, &tail);
989 		reap_cnt++;
990 		rxdma_dst_ring_desc = hal_srng_dst_get_next(hal_soc,
991 							    mon_dst_srng);
992 	}
993 
994 	hal_srng_access_end(hal_soc, mon_dst_srng);
995 
996 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
997 
998 	if (rx_bufs_used) {
999 		dp_rx_buffers_replenish(soc, mac_id,
1000 					dp_rxdma_get_mon_buf_ring(pdev, mac_id),
1001 					rx_desc_pool,
1002 					rx_bufs_used, &head, &tail, false);
1003 	}
1004 
1005 	return reap_cnt;
1006 }
1007 #endif
1008 
1009 static void
1010 dp_rx_pdev_mon_dest_desc_pool_free(struct dp_pdev *pdev, int mac_for_pdev)
1011 {
1012 	struct dp_soc *soc = pdev->soc;
1013 
1014 	dp_rx_pdev_mon_buf_desc_pool_free(pdev, mac_for_pdev);
1015 	dp_hw_link_desc_pool_banks_free(soc, mac_for_pdev);
1016 }
1017 
1018 static void
1019 dp_rx_pdev_mon_dest_desc_pool_deinit(struct dp_pdev *pdev, int mac_for_pdev)
1020 {
1021 	struct dp_soc *soc = pdev->soc;
1022 
1023 	if (!soc->wlan_cfg_ctx->rxdma1_enable)
1024 		return;
1025 
1026 	dp_rx_pdev_mon_buf_desc_pool_deinit(pdev, mac_for_pdev);
1027 }
1028 
1029 static void
1030 dp_rx_pdev_mon_dest_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_for_pdev)
1031 {
1032 	struct dp_soc *soc = pdev->soc;
1033 
1034 	if (!soc->wlan_cfg_ctx->rxdma1_enable ||
1035 	    !wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx))
1036 		return;
1037 
1038 	dp_rx_pdev_mon_buf_desc_pool_init(pdev, mac_for_pdev);
1039 	dp_link_desc_ring_replenish(soc, mac_for_pdev);
1040 }
1041 
1042 static void
1043 dp_rx_pdev_mon_dest_buffers_free(struct dp_pdev *pdev, int mac_for_pdev)
1044 {
1045 	struct dp_soc *soc = pdev->soc;
1046 
1047 	if (!soc->wlan_cfg_ctx->rxdma1_enable)
1048 		return;
1049 
1050 	dp_rx_pdev_mon_buf_buffers_free(pdev, mac_for_pdev);
1051 }
1052 
1053 static QDF_STATUS
1054 dp_rx_pdev_mon_dest_buffers_alloc(struct dp_pdev *pdev, int mac_for_pdev)
1055 {
1056 	struct dp_soc *soc = pdev->soc;
1057 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
1058 	bool delayed_replenish;
1059 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1060 
1061 	delayed_replenish = soc_cfg_ctx->delayed_replenish_entries ? 1 : 0;
1062 	if (!soc->wlan_cfg_ctx->rxdma1_enable ||
1063 	    !wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx))
1064 		return status;
1065 
1066 	status = dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
1067 						  delayed_replenish);
1068 	if (!QDF_IS_STATUS_SUCCESS(status))
1069 		dp_err("dp_rx_pdev_mon_buf_desc_pool_alloc() failed");
1070 
1071 	return status;
1072 }
1073 
1074 static QDF_STATUS
1075 dp_rx_pdev_mon_dest_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_for_pdev)
1076 {
1077 	struct dp_soc *soc = pdev->soc;
1078 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1079 
1080 	if (!soc->wlan_cfg_ctx->rxdma1_enable ||
1081 	    !wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx))
1082 		return status;
1083 
1084 	/* Allocate sw rx descriptor pool for monitor RxDMA buffer ring */
1085 	status = dp_rx_pdev_mon_buf_desc_pool_alloc(pdev, mac_for_pdev);
1086 	if (!QDF_IS_STATUS_SUCCESS(status)) {
1087 		dp_err("dp_rx_pdev_mon_buf_desc_pool_alloc() failed");
1088 		goto fail;
1089 	}
1090 
1091 	/* Allocate link descriptors for the monitor link descriptor ring */
1092 	status = dp_hw_link_desc_pool_banks_alloc(soc, mac_for_pdev);
1093 	if (!QDF_IS_STATUS_SUCCESS(status)) {
1094 		dp_err("dp_hw_link_desc_pool_banks_alloc() failed");
1095 		goto mon_buf_dealloc;
1096 	}
1097 
1098 	return status;
1099 
1100 mon_buf_dealloc:
1101 	dp_rx_pdev_mon_status_desc_pool_free(pdev, mac_for_pdev);
1102 fail:
1103 	return status;
1104 }
1105 #else
1106 static void
1107 dp_rx_pdev_mon_dest_desc_pool_free(struct dp_pdev *pdev, int mac_for_pdev)
1108 {
1109 }
1110 
1111 static void
1112 dp_rx_pdev_mon_dest_desc_pool_deinit(struct dp_pdev *pdev, int mac_for_pdev)
1113 {
1114 }
1115 
1116 static void
1117 dp_rx_pdev_mon_dest_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_for_pdev)
1118 {
1119 }
1120 
1121 static void
1122 dp_rx_pdev_mon_dest_buffers_free(struct dp_pdev *pdev, int mac_for_pdev)
1123 {
1124 }
1125 
1126 static QDF_STATUS
1127 dp_rx_pdev_mon_dest_buffers_alloc(struct dp_pdev *pdev, int mac_for_pdev)
1128 {
1129 	return QDF_STATUS_SUCCESS;
1130 }
1131 
1132 static QDF_STATUS
1133 dp_rx_pdev_mon_dest_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_for_pdev)
1134 {
1135 	return QDF_STATUS_SUCCESS;
1136 }
1137 
1138 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
1139 uint32_t
1140 dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id)
1141 {
1142 	return 0;
1143 }
1144 #endif
1145 
1146 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_NON_MON_PMAC)
1147 static QDF_STATUS
1148 dp_rx_mon_check_n_drop_mpdu(struct dp_pdev *pdev, uint32_t mac_id,
1149 			    hal_rxdma_desc_t rxdma_dst_ring_desc,
1150 			    union dp_rx_desc_list_elem_t **head,
1151 			    union dp_rx_desc_list_elem_t **tail,
1152 			    uint32_t *rx_bufs_dropped)
1153 {
1154 	return QDF_STATUS_E_FAILURE;
1155 }
1156 #endif
1157 #endif
1158 
1159 static void
1160 dp_rx_pdev_mon_cmn_desc_pool_free(struct dp_pdev *pdev, int mac_id)
1161 {
1162 	struct dp_soc *soc = pdev->soc;
1163 	uint8_t pdev_id = pdev->pdev_id;
1164 	int mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id);
1165 
1166 	dp_rx_pdev_mon_status_desc_pool_free(pdev, mac_for_pdev);
1167 	dp_rx_pdev_mon_dest_desc_pool_free(pdev, mac_for_pdev);
1168 }
1169 
1170 void dp_rx_pdev_mon_desc_pool_free(struct dp_pdev *pdev)
1171 {
1172 	int mac_id;
1173 
1174 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
1175 		dp_rx_pdev_mon_cmn_desc_pool_free(pdev, mac_id);
1176 }
1177 
1178 static void
1179 dp_rx_pdev_mon_cmn_desc_pool_deinit(struct dp_pdev *pdev, int mac_id)
1180 {
1181 	struct dp_soc *soc = pdev->soc;
1182 	uint8_t pdev_id = pdev->pdev_id;
1183 	int mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id);
1184 
1185 	dp_rx_pdev_mon_status_desc_pool_deinit(pdev, mac_for_pdev);
1186 
1187 	dp_rx_pdev_mon_dest_desc_pool_deinit(pdev, mac_for_pdev);
1188 }
1189 
1190 void
1191 dp_rx_pdev_mon_desc_pool_deinit(struct dp_pdev *pdev)
1192 {
1193 	int mac_id;
1194 
1195 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
1196 		dp_rx_pdev_mon_cmn_desc_pool_deinit(pdev, mac_id);
1197 	qdf_spinlock_destroy(&pdev->monitor_pdev->mon_lock);
1198 }
1199 
1200 static void
1201 dp_rx_pdev_mon_cmn_desc_pool_init(struct dp_pdev *pdev, int mac_id)
1202 {
1203 	struct dp_soc *soc = pdev->soc;
1204 	uint32_t mac_for_pdev;
1205 
1206 	mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
1207 	dp_rx_pdev_mon_status_desc_pool_init(pdev, mac_for_pdev);
1208 
1209 	dp_rx_pdev_mon_dest_desc_pool_init(pdev, mac_for_pdev);
1210 }
1211 
1212 void
1213 dp_rx_pdev_mon_desc_pool_init(struct dp_pdev *pdev)
1214 {
1215 	int mac_id;
1216 
1217 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
1218 		dp_rx_pdev_mon_cmn_desc_pool_init(pdev, mac_id);
1219 	qdf_spinlock_create(&pdev->monitor_pdev->mon_lock);
1220 }
1221 
1222 static void
1223 dp_rx_pdev_mon_cmn_buffers_free(struct dp_pdev *pdev, int mac_id)
1224 {
1225 	uint8_t pdev_id = pdev->pdev_id;
1226 	int mac_for_pdev;
1227 
1228 	mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id, pdev_id);
1229 	dp_rx_pdev_mon_status_buffers_free(pdev, mac_for_pdev);
1230 
1231 	dp_rx_pdev_mon_dest_buffers_free(pdev, mac_for_pdev);
1232 }
1233 
1234 void
1235 dp_rx_pdev_mon_buffers_free(struct dp_pdev *pdev)
1236 {
1237 	int mac_id;
1238 
1239 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
1240 		dp_rx_pdev_mon_cmn_buffers_free(pdev, mac_id);
1241 	pdev->monitor_pdev->pdev_mon_init = 0;
1242 }
1243 
1244 QDF_STATUS
1245 dp_rx_pdev_mon_buffers_alloc(struct dp_pdev *pdev)
1246 {
1247 	int mac_id;
1248 	int mac_for_pdev;
1249 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1250 	uint8_t pdev_id = pdev->pdev_id;
1251 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = pdev->soc->wlan_cfg_ctx;
1252 
1253 	for (mac_id = 0; mac_id < soc_cfg_ctx->num_rxdma_status_rings_per_pdev;
1254 	     mac_id++) {
1255 		mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id,
1256 							  pdev_id);
1257 		status = dp_rx_pdev_mon_status_buffers_alloc(pdev,
1258 							     mac_for_pdev);
1259 		if (!QDF_IS_STATUS_SUCCESS(status)) {
1260 			dp_err("dp_rx_pdev_mon_status_desc_pool_alloc() failed");
1261 			goto mon_status_buf_fail;
1262 		}
1263 	}
1264 
1265 	for (mac_id = 0; mac_id < soc_cfg_ctx->num_rxdma_dst_rings_per_pdev;
1266 	     mac_id++) {
1267 		mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id,
1268 							  pdev_id);
1269 		status = dp_rx_pdev_mon_dest_buffers_alloc(pdev, mac_for_pdev);
1270 		if (!QDF_IS_STATUS_SUCCESS(status))
1271 			goto mon_stat_buf_dealloc;
1272 	}
1273 
1274 	return status;
1275 
1276 mon_stat_buf_dealloc:
1277 	dp_rx_pdev_mon_status_buffers_free(pdev, mac_for_pdev);
1278 mon_status_buf_fail:
1279 	return status;
1280 }
1281 
1282 static QDF_STATUS
1283 dp_rx_pdev_mon_cmn_desc_pool_alloc(struct dp_pdev *pdev, int mac_id)
1284 {
1285 	struct dp_soc *soc = pdev->soc;
1286 	uint8_t pdev_id = pdev->pdev_id;
1287 	uint32_t mac_for_pdev;
1288 	QDF_STATUS status;
1289 
1290 	mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id);
1291 
1292 	/* Allocate sw rx descriptor pool for monitor status ring */
1293 	status = dp_rx_pdev_mon_status_desc_pool_alloc(pdev, mac_for_pdev);
1294 	if (!QDF_IS_STATUS_SUCCESS(status)) {
1295 		dp_err("dp_rx_pdev_mon_status_desc_pool_alloc() failed");
1296 		goto fail;
1297 	}
1298 
1299 	status = dp_rx_pdev_mon_dest_desc_pool_alloc(pdev, mac_for_pdev);
1300 	if (!QDF_IS_STATUS_SUCCESS(status))
1301 		goto mon_status_dealloc;
1302 
1303 	return status;
1304 
1305 mon_status_dealloc:
1306 	dp_rx_pdev_mon_status_desc_pool_free(pdev, mac_for_pdev);
1307 fail:
1308 	return status;
1309 }
1310 
1311 QDF_STATUS
1312 dp_rx_pdev_mon_desc_pool_alloc(struct dp_pdev *pdev)
1313 {
1314 	QDF_STATUS status;
1315 	int mac_id, count;
1316 
1317 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1318 		status = dp_rx_pdev_mon_cmn_desc_pool_alloc(pdev, mac_id);
1319 		if (!QDF_IS_STATUS_SUCCESS(status)) {
1320 			dp_rx_mon_dest_err("%pK: %d failed\n",
1321 					   pdev->soc, mac_id);
1322 
1323 			for (count = 0; count < mac_id; count++)
1324 				dp_rx_pdev_mon_cmn_desc_pool_free(pdev, count);
1325 
1326 			return status;
1327 		}
1328 	}
1329 	return status;
1330 }
1331 
1332 #ifdef QCA_WIFI_MONITOR_MODE_NO_MSDU_START_TLV_SUPPORT
1333 static inline void
1334 hal_rx_populate_buf_info(struct dp_soc *soc,
1335 			 struct hal_rx_mon_dest_buf_info *buf_info,
1336 			 void *rx_desc)
1337 {
1338 	hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_desc,
1339 				      (uint8_t *)buf_info,
1340 				      sizeof(*buf_info));
1341 }
1342 
1343 static inline uint8_t
1344 hal_rx_frag_msdu_get_l2_hdr_offset(struct dp_soc *soc,
1345 				   struct hal_rx_mon_dest_buf_info *buf_info,
1346 				   void *rx_desc, bool is_first_frag)
1347 {
1348 	if (is_first_frag)
1349 		return buf_info->l2_hdr_pad;
1350 	else
1351 		return DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
1352 }
1353 #else
1354 static inline void
1355 hal_rx_populate_buf_info(struct dp_soc *soc,
1356 			 struct hal_rx_mon_dest_buf_info *buf_info,
1357 			 void *rx_desc)
1358 {
1359 	if (hal_rx_tlv_decap_format_get(soc->hal_soc, rx_desc) ==
1360 	    HAL_HW_RX_DECAP_FORMAT_RAW)
1361 		buf_info->is_decap_raw = 1;
1362 
1363 	if (hal_rx_tlv_mpdu_len_err_get(soc->hal_soc, rx_desc))
1364 		buf_info->mpdu_len_err = 1;
1365 }
1366 
1367 static inline uint8_t
1368 hal_rx_frag_msdu_get_l2_hdr_offset(struct dp_soc *soc,
1369 				   struct hal_rx_mon_dest_buf_info *buf_info,
1370 				   void *rx_desc, bool is_first_frag)
1371 {
1372 	return hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_desc);
1373 }
1374 #endif
1375 
1376 static inline
1377 void dp_rx_msdus_set_payload(struct dp_soc *soc, qdf_nbuf_t msdu,
1378 			     uint8_t l2_hdr_offset)
1379 {
1380 	uint8_t *data;
1381 	uint32_t rx_pkt_offset;
1382 
1383 	data = qdf_nbuf_data(msdu);
1384 	rx_pkt_offset = dp_rx_mon_get_rx_pkt_tlv_size(soc);
1385 	qdf_nbuf_pull_head(msdu, rx_pkt_offset + l2_hdr_offset);
1386 }
1387 
1388 static inline qdf_nbuf_t
1389 dp_rx_mon_restitch_mpdu_from_msdus(struct dp_soc *soc,
1390 				   uint32_t mac_id,
1391 				   qdf_nbuf_t head_msdu,
1392 				   qdf_nbuf_t last_msdu,
1393 				   struct cdp_mon_status *rx_status)
1394 {
1395 	qdf_nbuf_t msdu, mpdu_buf, prev_buf, msdu_orig, head_frag_list;
1396 	uint32_t wifi_hdr_len, sec_hdr_len, msdu_llc_len,
1397 		mpdu_buf_len, decap_hdr_pull_bytes, frag_list_sum_len, dir,
1398 		is_amsdu, is_first_frag, amsdu_pad;
1399 	void *rx_desc;
1400 	char *hdr_desc;
1401 	unsigned char *dest;
1402 	struct ieee80211_frame *wh;
1403 	struct ieee80211_qoscntl *qos;
1404 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1405 	struct dp_mon_pdev *mon_pdev;
1406 	struct hal_rx_mon_dest_buf_info buf_info;
1407 	uint8_t l2_hdr_offset;
1408 
1409 	head_frag_list = NULL;
1410 	mpdu_buf = NULL;
1411 
1412 	if (qdf_unlikely(!dp_pdev)) {
1413 		dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d",
1414 				     soc, mac_id);
1415 		return NULL;
1416 	}
1417 
1418 	mon_pdev = dp_pdev->monitor_pdev;
1419 
1420 	/* The nbuf has been pulled just beyond the status and points to the
1421 	 * payload
1422 	 */
1423 	if (!head_msdu)
1424 		goto mpdu_stitch_fail;
1425 
1426 	msdu_orig = head_msdu;
1427 
1428 	rx_desc = qdf_nbuf_data(msdu_orig);
1429 	qdf_mem_zero(&buf_info, sizeof(buf_info));
1430 	hal_rx_populate_buf_info(soc, &buf_info, rx_desc);
1431 
1432 	if (buf_info.mpdu_len_err) {
1433 		/* It looks like there is some issue on MPDU len err */
1434 		/* Need further investigate if drop the packet */
1435 		DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1);
1436 		return NULL;
1437 	}
1438 
1439 	rx_desc = qdf_nbuf_data(last_msdu);
1440 
1441 	rx_status->cdp_rs_fcs_err = hal_rx_tlv_mpdu_fcs_err_get(soc->hal_soc,
1442 								rx_desc);
1443 	mon_pdev->ppdu_info.rx_status.rs_fcs_err = rx_status->cdp_rs_fcs_err;
1444 
1445 	/* Fill out the rx_status from the PPDU start and end fields */
1446 	/*   HAL_RX_GET_PPDU_STATUS(soc, mac_id, rx_status); */
1447 
1448 	rx_desc = qdf_nbuf_data(head_msdu);
1449 
1450 	/* Easy case - The MSDU status indicates that this is a non-decapped
1451 	 * packet in RAW mode.
1452 	 */
1453 	if (buf_info.is_decap_raw) {
1454 		/* Note that this path might suffer from headroom unavailabilty
1455 		 * - but the RX status is usually enough
1456 		 */
1457 
1458 		l2_hdr_offset = hal_rx_frag_msdu_get_l2_hdr_offset(soc,
1459 								   &buf_info,
1460 								   rx_desc,
1461 								   true);
1462 		dp_rx_msdus_set_payload(soc, head_msdu, l2_hdr_offset);
1463 
1464 		dp_rx_mon_dest_debug("%pK: decap format raw head %pK head->next %pK last_msdu %pK last_msdu->next %pK",
1465 				     soc, head_msdu, head_msdu->next,
1466 				     last_msdu, last_msdu->next);
1467 
1468 		mpdu_buf = head_msdu;
1469 
1470 		prev_buf = mpdu_buf;
1471 
1472 		frag_list_sum_len = 0;
1473 		msdu = qdf_nbuf_next(head_msdu);
1474 		is_first_frag = 1;
1475 
1476 		while (msdu) {
1477 			l2_hdr_offset = hal_rx_frag_msdu_get_l2_hdr_offset(
1478 							soc, &buf_info,
1479 							rx_desc, false);
1480 			dp_rx_msdus_set_payload(soc, msdu, l2_hdr_offset);
1481 
1482 			if (is_first_frag) {
1483 				is_first_frag = 0;
1484 				head_frag_list  = msdu;
1485 			}
1486 
1487 			frag_list_sum_len += qdf_nbuf_len(msdu);
1488 
1489 			/* Maintain the linking of the cloned MSDUS */
1490 			qdf_nbuf_set_next_ext(prev_buf, msdu);
1491 
1492 			/* Move to the next */
1493 			prev_buf = msdu;
1494 			msdu = qdf_nbuf_next(msdu);
1495 		}
1496 
1497 		qdf_nbuf_trim_tail(prev_buf, HAL_RX_FCS_LEN);
1498 
1499 		/* If there were more fragments to this RAW frame */
1500 		if (head_frag_list) {
1501 			if (frag_list_sum_len <
1502 				sizeof(struct ieee80211_frame_min_one)) {
1503 				DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1);
1504 				return NULL;
1505 			}
1506 			frag_list_sum_len -= HAL_RX_FCS_LEN;
1507 			qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list,
1508 						 frag_list_sum_len);
1509 			qdf_nbuf_set_next(mpdu_buf, NULL);
1510 		}
1511 
1512 		goto mpdu_stitch_done;
1513 	}
1514 
1515 	/* Decap mode:
1516 	 * Calculate the amount of header in decapped packet to knock off based
1517 	 * on the decap type and the corresponding number of raw bytes to copy
1518 	 * status header
1519 	 */
1520 	rx_desc = qdf_nbuf_data(head_msdu);
1521 
1522 	hdr_desc = hal_rx_desc_get_80211_hdr(soc->hal_soc, rx_desc);
1523 
1524 	dp_rx_mon_dest_debug("%pK: decap format not raw", soc);
1525 
1526 	/* Base size */
1527 	wifi_hdr_len = sizeof(struct ieee80211_frame);
1528 	wh = (struct ieee80211_frame *)hdr_desc;
1529 
1530 	dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
1531 
1532 	if (dir == IEEE80211_FC1_DIR_DSTODS)
1533 		wifi_hdr_len += 6;
1534 
1535 	is_amsdu = 0;
1536 	if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
1537 		qos = (struct ieee80211_qoscntl *)
1538 			(hdr_desc + wifi_hdr_len);
1539 		wifi_hdr_len += 2;
1540 
1541 		is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
1542 	}
1543 
1544 	/* Calculate security header length based on 'Protected'
1545 	 * and 'EXT_IV' flag
1546 	 */
1547 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
1548 		char *iv = (char *)wh + wifi_hdr_len;
1549 
1550 		if (iv[3] & KEY_EXTIV)
1551 			sec_hdr_len = 8;
1552 		else
1553 			sec_hdr_len = 4;
1554 	} else {
1555 		sec_hdr_len = 0;
1556 	}
1557 	wifi_hdr_len += sec_hdr_len;
1558 
1559 	/* MSDU related stuff LLC - AMSDU subframe header etc */
1560 	msdu_llc_len = is_amsdu ? (14 + 8) : 8;
1561 
1562 	mpdu_buf_len = wifi_hdr_len + msdu_llc_len;
1563 
1564 	/* "Decap" header to remove from MSDU buffer */
1565 	decap_hdr_pull_bytes = 14;
1566 
1567 	/* Allocate a new nbuf for holding the 802.11 header retrieved from the
1568 	 * status of the now decapped first msdu. Leave enough headroom for
1569 	 * accommodating any radio-tap /prism like PHY header
1570 	 */
1571 	mpdu_buf = qdf_nbuf_alloc(soc->osdev,
1572 				  MAX_MONITOR_HEADER + mpdu_buf_len,
1573 				  MAX_MONITOR_HEADER, 4, FALSE);
1574 
1575 	if (!mpdu_buf)
1576 		goto mpdu_stitch_done;
1577 
1578 	/* Copy the MPDU related header and enc headers into the first buffer
1579 	 * - Note that there can be a 2 byte pad between heaader and enc header
1580 	 */
1581 
1582 	prev_buf = mpdu_buf;
1583 	dest = qdf_nbuf_put_tail(prev_buf, wifi_hdr_len);
1584 	if (!dest)
1585 		goto mpdu_stitch_fail;
1586 
1587 	qdf_mem_copy(dest, hdr_desc, wifi_hdr_len);
1588 	hdr_desc += wifi_hdr_len;
1589 
1590 #if 0
1591 	dest = qdf_nbuf_put_tail(prev_buf, sec_hdr_len);
1592 	adf_os_mem_copy(dest, hdr_desc, sec_hdr_len);
1593 	hdr_desc += sec_hdr_len;
1594 #endif
1595 
1596 	/* The first LLC len is copied into the MPDU buffer */
1597 	frag_list_sum_len = 0;
1598 
1599 	msdu_orig = head_msdu;
1600 	is_first_frag = 1;
1601 	amsdu_pad = 0;
1602 
1603 	while (msdu_orig) {
1604 
1605 		/* TODO: intra AMSDU padding - do we need it ??? */
1606 
1607 		msdu = msdu_orig;
1608 
1609 		if (is_first_frag) {
1610 			head_frag_list  = msdu;
1611 		} else {
1612 			/* Reload the hdr ptr only on non-first MSDUs */
1613 			rx_desc = qdf_nbuf_data(msdu_orig);
1614 			hdr_desc = hal_rx_desc_get_80211_hdr(soc->hal_soc,
1615 							     rx_desc);
1616 		}
1617 
1618 		/* Copy this buffers MSDU related status into the prev buffer */
1619 
1620 		if (is_first_frag)
1621 			is_first_frag = 0;
1622 
1623 		/* Update protocol and flow tag for MSDU */
1624 		dp_rx_mon_update_protocol_flow_tag(soc, dp_pdev,
1625 						   msdu_orig, rx_desc);
1626 
1627 		dest = qdf_nbuf_put_tail(prev_buf,
1628 					 msdu_llc_len + amsdu_pad);
1629 
1630 		if (!dest)
1631 			goto mpdu_stitch_fail;
1632 
1633 		dest += amsdu_pad;
1634 		qdf_mem_copy(dest, hdr_desc, msdu_llc_len);
1635 
1636 		l2_hdr_offset = hal_rx_frag_msdu_get_l2_hdr_offset(soc,
1637 								   &buf_info,
1638 								   rx_desc,
1639 								   true);
1640 		dp_rx_msdus_set_payload(soc, msdu, l2_hdr_offset);
1641 
1642 		/* Push the MSDU buffer beyond the decap header */
1643 		qdf_nbuf_pull_head(msdu, decap_hdr_pull_bytes);
1644 		frag_list_sum_len += msdu_llc_len + qdf_nbuf_len(msdu)
1645 			+ amsdu_pad;
1646 
1647 		/* Set up intra-AMSDU pad to be added to start of next buffer -
1648 		 * AMSDU pad is 4 byte pad on AMSDU subframe
1649 		 */
1650 		amsdu_pad = (msdu_llc_len + qdf_nbuf_len(msdu)) & 0x3;
1651 		amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
1652 
1653 		/* TODO FIXME How do we handle MSDUs that have fraglist - Should
1654 		 * probably iterate all the frags cloning them along the way and
1655 		 * and also updating the prev_buf pointer
1656 		 */
1657 
1658 		/* Move to the next */
1659 		prev_buf = msdu;
1660 		msdu_orig = qdf_nbuf_next(msdu_orig);
1661 	}
1662 
1663 #if 0
1664 	/* Add in the trailer section - encryption trailer + FCS */
1665 	qdf_nbuf_put_tail(prev_buf, HAL_RX_FCS_LEN);
1666 	frag_list_sum_len += HAL_RX_FCS_LEN;
1667 #endif
1668 
1669 	frag_list_sum_len -= msdu_llc_len;
1670 
1671 	/* TODO: Convert this to suitable adf routines */
1672 	qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list,
1673 				 frag_list_sum_len);
1674 
1675 	dp_rx_mon_dest_debug("%pK: mpdu_buf %pK mpdu_buf->len %u",
1676 			     soc, mpdu_buf, mpdu_buf->len);
1677 
1678 mpdu_stitch_done:
1679 	/* Check if this buffer contains the PPDU end status for TSF */
1680 	/* Need revist this code to see where we can get tsf timestamp */
1681 #if 0
1682 	/* PPDU end TLV will be retrieved from monitor status ring */
1683 	last_mpdu =
1684 		(*(((u_int32_t *)&rx_desc->attention)) &
1685 		RX_ATTENTION_0_LAST_MPDU_MASK) >>
1686 		RX_ATTENTION_0_LAST_MPDU_LSB;
1687 
1688 	if (last_mpdu)
1689 		rx_status->rs_tstamp.tsf = rx_desc->ppdu_end.tsf_timestamp;
1690 
1691 #endif
1692 	return mpdu_buf;
1693 
1694 mpdu_stitch_fail:
1695 	if ((mpdu_buf) && !buf_info.is_decap_raw) {
1696 		dp_rx_mon_dest_err("%pK: mpdu_stitch_fail mpdu_buf %pK",
1697 				   soc, mpdu_buf);
1698 		/* Free the head buffer */
1699 		qdf_nbuf_free(mpdu_buf);
1700 	}
1701 	return NULL;
1702 }
1703 
1704 #ifdef DP_RX_MON_MEM_FRAG
1705 /**
1706  * dp_rx_mon_fraglist_prepare() - Prepare nbuf fraglist from chained skb
1707  *
1708  * @head_msdu: Parent SKB
1709  * @tail_msdu: Last skb in the chained list
1710  *
1711  * Return: Void
1712  */
1713 void dp_rx_mon_fraglist_prepare(qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu)
1714 {
1715 	qdf_nbuf_t msdu, mpdu_buf, prev_buf, head_frag_list;
1716 	uint32_t frag_list_sum_len;
1717 
1718 	dp_err("[%s][%d] decap format raw head %pK head->next %pK last_msdu %pK last_msdu->next %pK",
1719 	       __func__, __LINE__, head_msdu, head_msdu->next,
1720 	       tail_msdu, tail_msdu->next);
1721 
1722 	/* Single skb accommodating MPDU worth Data */
1723 	if (tail_msdu == head_msdu)
1724 		return;
1725 
1726 	mpdu_buf = head_msdu;
1727 	prev_buf = mpdu_buf;
1728 	frag_list_sum_len = 0;
1729 
1730 	msdu = qdf_nbuf_next(head_msdu);
1731 	/* msdu can't be NULL here as it is multiple skb case here */
1732 
1733 	/* Head frag list to point to second skb */
1734 	head_frag_list  = msdu;
1735 
1736 	while (msdu) {
1737 		frag_list_sum_len += qdf_nbuf_len(msdu);
1738 		prev_buf = msdu;
1739 		msdu = qdf_nbuf_next(msdu);
1740 	}
1741 
1742 	qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list, frag_list_sum_len);
1743 
1744 	/* Make Parent skb next to NULL */
1745 	qdf_nbuf_set_next(mpdu_buf, NULL);
1746 }
1747 
1748 /**
1749  * dp_rx_mon_frag_restitch_mpdu_from_msdus() - Restitch logic to
1750  *      convert to 802.3 header and adjust frag memory pointing to
1751  *      dot3 header and payload in case of Non-Raw frame.
1752  *
1753  * @soc: struct dp_soc *
1754  * @mac_id: MAC id
1755  * @head_msdu: MPDU containing all MSDU as a frag
1756  * @tail_msdu: last skb which accommodate MPDU info
1757  * @rx_status: struct cdp_mon_status *
1758  *
1759  * Return: Adjusted nbuf containing MPDU worth info.
1760  */
1761 static inline qdf_nbuf_t
1762 dp_rx_mon_frag_restitch_mpdu_from_msdus(struct dp_soc *soc,
1763 					uint32_t mac_id,
1764 					qdf_nbuf_t head_msdu,
1765 					qdf_nbuf_t tail_msdu,
1766 					struct cdp_mon_status *rx_status)
1767 {
1768 	uint32_t wifi_hdr_len, sec_hdr_len, msdu_llc_len,
1769 		mpdu_buf_len, decap_hdr_pull_bytes, dir,
1770 		is_amsdu, amsdu_pad, frag_size, tot_msdu_len;
1771 	qdf_frag_t rx_desc, rx_src_desc, rx_dest_desc, frag_addr;
1772 	char *hdr_desc;
1773 	uint8_t num_frags, frags_iter, l2_hdr_offset;
1774 	struct ieee80211_frame *wh;
1775 	struct ieee80211_qoscntl *qos;
1776 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1777 	int16_t frag_page_offset = 0;
1778 	struct hal_rx_mon_dest_buf_info buf_info;
1779 	uint32_t pad_byte_pholder = 0;
1780 	qdf_nbuf_t msdu_curr;
1781 	uint16_t rx_mon_tlv_size = soc->rx_mon_pkt_tlv_size;
1782 	struct dp_mon_pdev *mon_pdev;
1783 
1784 	if (qdf_unlikely(!dp_pdev)) {
1785 		dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d",
1786 				     soc, mac_id);
1787 		return NULL;
1788 	}
1789 
1790 	mon_pdev = dp_pdev->monitor_pdev;
1791 	qdf_mem_zero(&buf_info, sizeof(struct hal_rx_mon_dest_buf_info));
1792 
1793 	if (!head_msdu || !tail_msdu)
1794 		goto mpdu_stitch_fail;
1795 
1796 	rx_desc = qdf_nbuf_get_frag_addr(head_msdu, 0) - rx_mon_tlv_size;
1797 
1798 	if (hal_rx_tlv_mpdu_len_err_get(soc->hal_soc, rx_desc)) {
1799 		/* It looks like there is some issue on MPDU len err */
1800 		/* Need further investigate if drop the packet */
1801 		DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1);
1802 		return NULL;
1803 	}
1804 
1805 	/* Look for FCS error */
1806 	num_frags = qdf_nbuf_get_nr_frags(tail_msdu);
1807 	rx_desc = qdf_nbuf_get_frag_addr(tail_msdu, num_frags - 1) -
1808 				rx_mon_tlv_size;
1809 	rx_status->cdp_rs_fcs_err = hal_rx_tlv_mpdu_fcs_err_get(soc->hal_soc,
1810 								rx_desc);
1811 	mon_pdev->ppdu_info.rx_status.rs_fcs_err = rx_status->cdp_rs_fcs_err;
1812 
1813 	rx_desc = qdf_nbuf_get_frag_addr(head_msdu, 0) - rx_mon_tlv_size;
1814 	hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_desc,
1815 				      (uint8_t *)&buf_info,
1816 				      sizeof(buf_info));
1817 
1818 	/* Easy case - The MSDU status indicates that this is a non-decapped
1819 	 * packet in RAW mode.
1820 	 */
1821 	if (buf_info.is_decap_raw == 1) {
1822 		dp_rx_mon_fraglist_prepare(head_msdu, tail_msdu);
1823 		goto mpdu_stitch_done;
1824 	}
1825 
1826 	l2_hdr_offset = DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
1827 
1828 	/* Decap mode:
1829 	 * Calculate the amount of header in decapped packet to knock off based
1830 	 * on the decap type and the corresponding number of raw bytes to copy
1831 	 * status header
1832 	 */
1833 	hdr_desc = hal_rx_desc_get_80211_hdr(soc->hal_soc, rx_desc);
1834 
1835 	dp_rx_mon_dest_debug("%pK: decap format not raw", soc);
1836 
1837 	/* Base size */
1838 	wifi_hdr_len = sizeof(struct ieee80211_frame);
1839 	wh = (struct ieee80211_frame *)hdr_desc;
1840 
1841 	dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
1842 
1843 	if (dir == IEEE80211_FC1_DIR_DSTODS)
1844 		wifi_hdr_len += 6;
1845 
1846 	is_amsdu = 0;
1847 	if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
1848 		qos = (struct ieee80211_qoscntl *)
1849 			(hdr_desc + wifi_hdr_len);
1850 		wifi_hdr_len += 2;
1851 
1852 		is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
1853 	}
1854 
1855 	/*Calculate security header length based on 'Protected'
1856 	 * and 'EXT_IV' flag
1857 	 */
1858 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
1859 		char *iv = (char *)wh + wifi_hdr_len;
1860 
1861 		if (iv[3] & KEY_EXTIV)
1862 			sec_hdr_len = 8;
1863 		else
1864 			sec_hdr_len = 4;
1865 	} else {
1866 		sec_hdr_len = 0;
1867 	}
1868 	wifi_hdr_len += sec_hdr_len;
1869 
1870 	/* MSDU related stuff LLC - AMSDU subframe header etc */
1871 	msdu_llc_len = is_amsdu ? (14 + 8) : 8;
1872 
1873 	mpdu_buf_len = wifi_hdr_len + msdu_llc_len;
1874 
1875 	/* "Decap" header to remove from MSDU buffer */
1876 	decap_hdr_pull_bytes = 14;
1877 
1878 	amsdu_pad = 0;
1879 	tot_msdu_len = 0;
1880 
1881 	/*
1882 	 * keeping first MSDU ops outside of loop to avoid multiple
1883 	 * check handling
1884 	 */
1885 
1886 	/* Construct src header */
1887 	rx_src_desc = hdr_desc;
1888 
1889 	/*
1890 	 * Update protocol and flow tag for MSDU
1891 	 * update frag index in ctx_idx field.
1892 	 * Reset head pointer data of nbuf before updating.
1893 	 */
1894 	QDF_NBUF_CB_RX_CTX_ID(head_msdu) = 0;
1895 	dp_rx_mon_update_protocol_flow_tag(soc, dp_pdev, head_msdu, rx_desc);
1896 
1897 	/* Construct destination address */
1898 	frag_addr = qdf_nbuf_get_frag_addr(head_msdu, 0);
1899 	frag_size = qdf_nbuf_get_frag_size_by_idx(head_msdu, 0);
1900 	/* We will come here in 2 scenario:
1901 	 * 1. First MSDU of MPDU with single buffer
1902 	 * 2. First buffer of First MSDU of MPDU with continuation
1903 	 *
1904 	 *  ------------------------------------------------------------
1905 	 * | SINGLE BUFFER (<= RX_MONITOR_BUFFER_SIZE - RX_PKT_TLVS_LEN)|
1906 	 *  ------------------------------------------------------------
1907 	 *
1908 	 *  ------------------------------------------------------------
1909 	 * | First BUFFER with Continuation             | ...           |
1910 	 * | (RX_MONITOR_BUFFER_SIZE - RX_PKT_TLVS_LEN) |               |
1911 	 *  ------------------------------------------------------------
1912 	 */
1913 	pad_byte_pholder =
1914 		(RX_MONITOR_BUFFER_SIZE - soc->rx_pkt_tlv_size) - frag_size;
1915 	/* Construct destination address
1916 	 *  --------------------------------------------------------------
1917 	 * | RX_PKT_TLV | L2_HDR_PAD   |   Decap HDR   |      Payload     |
1918 	 * |            |                              /                  |
1919 	 * |            >Frag address points here     /                   |
1920 	 * |            \                            /                    |
1921 	 * |             \ This bytes needs to      /                     |
1922 	 * |              \  removed to frame pkt  /                      |
1923 	 * |               -----------------------                        |
1924 	 * |                                      |                       |
1925 	 * |                                      |                       |
1926 	 * |   WIFI +LLC HDR will be added here <-|                       |
1927 	 * |        |                             |                       |
1928 	 * |         >Dest addr will point        |                       |
1929 	 * |            somewhere in this area    |                       |
1930 	 *  --------------------------------------------------------------
1931 	 */
1932 	rx_dest_desc =
1933 		(frag_addr + decap_hdr_pull_bytes + l2_hdr_offset) -
1934 					mpdu_buf_len;
1935 	/* Add WIFI and LLC header for 1st MSDU of MPDU */
1936 	qdf_mem_copy(rx_dest_desc, rx_src_desc, mpdu_buf_len);
1937 
1938 	frag_page_offset =
1939 		(decap_hdr_pull_bytes + l2_hdr_offset) - mpdu_buf_len;
1940 
1941 	qdf_nbuf_move_frag_page_offset(head_msdu, 0, frag_page_offset);
1942 
1943 	frag_size = qdf_nbuf_get_frag_size_by_idx(head_msdu, 0);
1944 
1945 	if (buf_info.first_buffer && buf_info.last_buffer) {
1946 		/* MSDU with single buffer */
1947 		amsdu_pad = frag_size & 0x3;
1948 		amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
1949 		if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
1950 			char *frag_addr_temp;
1951 
1952 			qdf_nbuf_trim_add_frag_size(head_msdu, 0, amsdu_pad,
1953 						    0);
1954 			frag_addr_temp =
1955 				(char *)qdf_nbuf_get_frag_addr(head_msdu, 0);
1956 			frag_addr_temp = (frag_addr_temp +
1957 				qdf_nbuf_get_frag_size_by_idx(head_msdu, 0)) -
1958 					amsdu_pad;
1959 			qdf_mem_zero(frag_addr_temp, amsdu_pad);
1960 			amsdu_pad = 0;
1961 		}
1962 	} else {
1963 		/*
1964 		 * First buffer of Continuation frame and hence
1965 		 * amsdu_padding doesn't need to be added
1966 		 * Increase tot_msdu_len so that amsdu_pad byte
1967 		 * will be calculated for last frame of MSDU
1968 		 */
1969 		tot_msdu_len = frag_size;
1970 		amsdu_pad = 0;
1971 	}
1972 
1973 	/* Here amsdu_pad byte will have some value if 1sf buffer was
1974 	 * Single buffer MSDU and dint had pholder to adjust amsdu padding
1975 	 * byte in the end
1976 	 * So dont initialize to ZERO here
1977 	 */
1978 	pad_byte_pholder = 0;
1979 	for (msdu_curr = head_msdu; msdu_curr;) {
1980 		/* frag_iter will start from 0 for second skb onwards */
1981 		if (msdu_curr == head_msdu)
1982 			frags_iter = 1;
1983 		else
1984 			frags_iter = 0;
1985 
1986 		num_frags = qdf_nbuf_get_nr_frags(msdu_curr);
1987 
1988 		for (; frags_iter < num_frags; frags_iter++) {
1989 		/* Construct destination address
1990 		 *  ----------------------------------------------------------
1991 		 * | RX_PKT_TLV | L2_HDR_PAD   |   Decap HDR | Payload | Pad  |
1992 		 * |            | (First buffer)             |         |      |
1993 		 * |            |                            /        /       |
1994 		 * |            >Frag address points here   /        /        |
1995 		 * |            \                          /        /         |
1996 		 * |             \ This bytes needs to    /        /          |
1997 		 * |              \  removed to frame pkt/        /           |
1998 		 * |               ----------------------        /            |
1999 		 * |                                     |     /     Add      |
2000 		 * |                                     |    /   amsdu pad   |
2001 		 * |   LLC HDR will be added here      <-|    |   Byte for    |
2002 		 * |        |                            |    |   last frame  |
2003 		 * |         >Dest addr will point       |    |    if space   |
2004 		 * |            somewhere in this area   |    |    available  |
2005 		 * |  And amsdu_pad will be created if   |    |               |
2006 		 * | dint get added in last buffer       |    |               |
2007 		 * |       (First Buffer)                |    |               |
2008 		 *  ----------------------------------------------------------
2009 		 */
2010 			frag_addr =
2011 				qdf_nbuf_get_frag_addr(msdu_curr, frags_iter);
2012 			rx_desc = frag_addr - rx_mon_tlv_size;
2013 
2014 			/*
2015 			 * Update protocol and flow tag for MSDU
2016 			 * update frag index in ctx_idx field
2017 			 */
2018 			QDF_NBUF_CB_RX_CTX_ID(msdu_curr) = frags_iter;
2019 			dp_rx_mon_update_protocol_flow_tag(soc, dp_pdev,
2020 							   msdu_curr, rx_desc);
2021 
2022 			/* Read buffer info from stored data in tlvs */
2023 			hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_desc,
2024 						      (uint8_t *)&buf_info,
2025 						      sizeof(buf_info));
2026 
2027 			frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_curr,
2028 								  frags_iter);
2029 
2030 			/* If Middle buffer, dont add any header */
2031 			if ((!buf_info.first_buffer) &&
2032 			    (!buf_info.last_buffer)) {
2033 				tot_msdu_len += frag_size;
2034 				amsdu_pad = 0;
2035 				pad_byte_pholder = 0;
2036 				continue;
2037 			}
2038 
2039 			/* Calculate if current buffer has placeholder
2040 			 * to accommodate amsdu pad byte
2041 			 */
2042 			pad_byte_pholder =
2043 				(RX_MONITOR_BUFFER_SIZE - soc->rx_pkt_tlv_size)
2044 				- frag_size;
2045 			/*
2046 			 * We will come here only only three condition:
2047 			 * 1. Msdu with single Buffer
2048 			 * 2. First buffer in case MSDU is spread in multiple
2049 			 *    buffer
2050 			 * 3. Last buffer in case MSDU is spread in multiple
2051 			 *    buffer
2052 			 *
2053 			 *         First buffER | Last buffer
2054 			 * Case 1:      1       |     1
2055 			 * Case 2:      1       |     0
2056 			 * Case 3:      0       |     1
2057 			 *
2058 			 * In 3rd case only l2_hdr_padding byte will be Zero and
2059 			 * in other case, It will be 2 Bytes.
2060 			 */
2061 			if (buf_info.first_buffer)
2062 				l2_hdr_offset =
2063 					DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
2064 			else
2065 				l2_hdr_offset = DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
2066 
2067 			if (buf_info.first_buffer) {
2068 				/* Src addr from where llc header needs to be copied */
2069 				rx_src_desc =
2070 					hal_rx_desc_get_80211_hdr(soc->hal_soc,
2071 								  rx_desc);
2072 
2073 				/* Size of buffer with llc header */
2074 				frag_size = frag_size -
2075 					(l2_hdr_offset + decap_hdr_pull_bytes);
2076 				frag_size += msdu_llc_len;
2077 
2078 				/* Construct destination address */
2079 				rx_dest_desc = frag_addr +
2080 					decap_hdr_pull_bytes + l2_hdr_offset;
2081 				rx_dest_desc = rx_dest_desc - (msdu_llc_len);
2082 
2083 				qdf_mem_copy(rx_dest_desc, rx_src_desc,
2084 					     msdu_llc_len);
2085 
2086 				/*
2087 				 * Calculate new page offset and create hole
2088 				 * if amsdu_pad required.
2089 				 */
2090 				frag_page_offset = l2_hdr_offset +
2091 						decap_hdr_pull_bytes;
2092 				frag_page_offset = frag_page_offset -
2093 						(msdu_llc_len + amsdu_pad);
2094 
2095 				qdf_nbuf_move_frag_page_offset(msdu_curr,
2096 							       frags_iter,
2097 							       frag_page_offset);
2098 
2099 				tot_msdu_len = frag_size;
2100 				/*
2101 				 * No amsdu padding required for first frame of
2102 				 * continuation buffer
2103 				 */
2104 				if (!buf_info.last_buffer) {
2105 					amsdu_pad = 0;
2106 					continue;
2107 				}
2108 			} else {
2109 				tot_msdu_len += frag_size;
2110 			}
2111 
2112 			/* Will reach to this place in only two case:
2113 			 * 1. Single buffer MSDU
2114 			 * 2. Last buffer of MSDU in case of multiple buf MSDU
2115 			 */
2116 
2117 			/* Check size of buffer if amsdu padding required */
2118 			amsdu_pad = tot_msdu_len & 0x3;
2119 			amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
2120 
2121 			/* Create placeholder if current buffer can
2122 			 * accommodate padding.
2123 			 */
2124 			if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
2125 				char *frag_addr_temp;
2126 
2127 				qdf_nbuf_trim_add_frag_size(msdu_curr,
2128 							    frags_iter,
2129 							    amsdu_pad, 0);
2130 				frag_addr_temp = (char *)qdf_nbuf_get_frag_addr(msdu_curr,
2131 										frags_iter);
2132 				frag_addr_temp = (frag_addr_temp +
2133 					qdf_nbuf_get_frag_size_by_idx(msdu_curr, frags_iter)) -
2134 					amsdu_pad;
2135 				qdf_mem_zero(frag_addr_temp, amsdu_pad);
2136 				amsdu_pad = 0;
2137 			}
2138 
2139 			/* reset tot_msdu_len */
2140 			tot_msdu_len = 0;
2141 		}
2142 		msdu_curr = qdf_nbuf_next(msdu_curr);
2143 	}
2144 
2145 	dp_rx_mon_fraglist_prepare(head_msdu, tail_msdu);
2146 
2147 	dp_rx_mon_dest_debug("%pK: head_msdu %pK head_msdu->len %u",
2148 			     soc, head_msdu, head_msdu->len);
2149 
2150 mpdu_stitch_done:
2151 	return head_msdu;
2152 
2153 mpdu_stitch_fail:
2154 	dp_rx_mon_dest_err("%pK: mpdu_stitch_fail head_msdu %pK",
2155 			   soc, head_msdu);
2156 	return NULL;
2157 }
2158 #endif
2159 
2160 #ifdef DP_RX_MON_MEM_FRAG
2161 qdf_nbuf_t dp_rx_mon_restitch_mpdu(struct dp_soc *soc, uint32_t mac_id,
2162 				   qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu,
2163 				   struct cdp_mon_status *rs)
2164 {
2165 	if (qdf_nbuf_get_nr_frags(head_msdu))
2166 		return dp_rx_mon_frag_restitch_mpdu_from_msdus(soc, mac_id,
2167 							       head_msdu,
2168 							       tail_msdu, rs);
2169 	else
2170 		return dp_rx_mon_restitch_mpdu_from_msdus(soc, mac_id,
2171 							  head_msdu,
2172 							  tail_msdu, rs);
2173 }
2174 #else
2175 qdf_nbuf_t dp_rx_mon_restitch_mpdu(struct dp_soc *soc, uint32_t mac_id,
2176 				   qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu,
2177 				   struct cdp_mon_status *rs)
2178 {
2179 	return dp_rx_mon_restitch_mpdu_from_msdus(soc, mac_id, head_msdu,
2180 						  tail_msdu, rs);
2181 }
2182 #endif
2183 
2184 #ifdef DP_RX_MON_MEM_FRAG
2185 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
2186 	defined(WLAN_SUPPORT_RX_FLOW_TAG)
2187 void dp_rx_mon_update_pf_tag_to_buf_headroom(struct dp_soc *soc,
2188 					     qdf_nbuf_t nbuf)
2189 {
2190 	qdf_nbuf_t ext_list;
2191 
2192 	if (qdf_unlikely(!soc)) {
2193 		dp_err("Soc[%pK] Null. Can't update pftag to nbuf headroom\n",
2194 		       soc);
2195 		qdf_assert_always(0);
2196 	}
2197 
2198 	if (!wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx))
2199 		return;
2200 
2201 	if (qdf_unlikely(!nbuf))
2202 		return;
2203 
2204 	/* Return if it dint came from mon Path */
2205 	if (!qdf_nbuf_get_nr_frags(nbuf))
2206 		return;
2207 
2208 	/* Headroom must be double of PF_TAG_SIZE as we copy it 1stly to head */
2209 	if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < (DP_RX_MON_TOT_PF_TAG_LEN * 2))) {
2210 		dp_err("Nbuf avail Headroom[%d] < 2 * DP_RX_MON_PF_TAG_TOT_LEN[%lu]",
2211 		       qdf_nbuf_headroom(nbuf), DP_RX_MON_TOT_PF_TAG_LEN);
2212 		return;
2213 	}
2214 
2215 	qdf_nbuf_push_head(nbuf, DP_RX_MON_TOT_PF_TAG_LEN);
2216 	qdf_mem_copy(qdf_nbuf_data(nbuf), qdf_nbuf_head(nbuf),
2217 		     DP_RX_MON_TOT_PF_TAG_LEN);
2218 	qdf_nbuf_pull_head(nbuf, DP_RX_MON_TOT_PF_TAG_LEN);
2219 
2220 	ext_list = qdf_nbuf_get_ext_list(nbuf);
2221 	while (ext_list) {
2222 		/* Headroom must be double of PF_TAG_SIZE
2223 		 * as we copy it 1stly to head
2224 		 */
2225 		if (qdf_unlikely(qdf_nbuf_headroom(ext_list) < (DP_RX_MON_TOT_PF_TAG_LEN * 2))) {
2226 			dp_err("Fraglist Nbuf avail Headroom[%d] < 2 * DP_RX_MON_PF_TAG_TOT_LEN[%lu]",
2227 			       qdf_nbuf_headroom(ext_list),
2228 			       DP_RX_MON_TOT_PF_TAG_LEN);
2229 			ext_list = qdf_nbuf_queue_next(ext_list);
2230 			continue;
2231 		}
2232 		qdf_nbuf_push_head(ext_list, DP_RX_MON_TOT_PF_TAG_LEN);
2233 		qdf_mem_copy(qdf_nbuf_data(ext_list), qdf_nbuf_head(ext_list),
2234 			     DP_RX_MON_TOT_PF_TAG_LEN);
2235 		qdf_nbuf_pull_head(ext_list, DP_RX_MON_TOT_PF_TAG_LEN);
2236 		ext_list = qdf_nbuf_queue_next(ext_list);
2237 	}
2238 }
2239 #endif
2240 #endif
2241 
2242 #ifdef QCA_MONITOR_PKT_SUPPORT
2243 QDF_STATUS dp_mon_htt_dest_srng_setup(struct dp_soc *soc,
2244 				      struct dp_pdev *pdev,
2245 				      int mac_id,
2246 				      int mac_for_pdev)
2247 {
2248 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2249 
2250 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
2251 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
2252 					soc->rxdma_mon_buf_ring[mac_id]
2253 					.hal_srng,
2254 					RXDMA_MONITOR_BUF);
2255 
2256 		if (status != QDF_STATUS_SUCCESS) {
2257 			dp_mon_err("Failed to send htt srng setup message for Rxdma mon buf ring");
2258 			return status;
2259 		}
2260 
2261 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
2262 					soc->rxdma_mon_dst_ring[mac_id]
2263 					.hal_srng,
2264 					RXDMA_MONITOR_DST);
2265 
2266 		if (status != QDF_STATUS_SUCCESS) {
2267 			dp_mon_err("Failed to send htt srng setup message for Rxdma mon dst ring");
2268 			return status;
2269 		}
2270 
2271 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
2272 					soc->rxdma_mon_desc_ring[mac_id]
2273 					.hal_srng,
2274 					RXDMA_MONITOR_DESC);
2275 
2276 		if (status != QDF_STATUS_SUCCESS) {
2277 			dp_mon_err("Failed to send htt srng message for Rxdma mon desc ring");
2278 			return status;
2279 		}
2280 	}
2281 
2282 	return status;
2283 }
2284 #endif /* QCA_MONITOR_PKT_SUPPORT */
2285 
2286 #ifdef QCA_MONITOR_PKT_SUPPORT
2287 void dp_mon_dest_rings_deinit(struct dp_pdev *pdev, int lmac_id)
2288 {
2289 	struct dp_soc *soc = pdev->soc;
2290 
2291 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
2292 		dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[lmac_id],
2293 			       RXDMA_MONITOR_BUF, 0);
2294 		dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id],
2295 			       RXDMA_MONITOR_DST, 0);
2296 		dp_srng_deinit(soc, &soc->rxdma_mon_desc_ring[lmac_id],
2297 			       RXDMA_MONITOR_DESC, 0);
2298 	}
2299 }
2300 
2301 void dp_mon_dest_rings_free(struct dp_pdev *pdev, int lmac_id)
2302 {
2303 	struct dp_soc *soc = pdev->soc;
2304 
2305 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
2306 		dp_srng_free(soc, &soc->rxdma_mon_buf_ring[lmac_id]);
2307 		dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]);
2308 		dp_srng_free(soc, &soc->rxdma_mon_desc_ring[lmac_id]);
2309 	}
2310 }
2311 
2312 QDF_STATUS dp_mon_dest_rings_init(struct dp_pdev *pdev, int lmac_id)
2313 {
2314 	struct dp_soc *soc = pdev->soc;
2315 
2316 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
2317 		if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[lmac_id],
2318 				 RXDMA_MONITOR_BUF, 0, lmac_id)) {
2319 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_buf_ring ", soc);
2320 			goto fail1;
2321 		}
2322 
2323 		if (dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id],
2324 				 RXDMA_MONITOR_DST, 0, lmac_id)) {
2325 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
2326 			goto fail1;
2327 		}
2328 
2329 		if (dp_srng_init(soc, &soc->rxdma_mon_desc_ring[lmac_id],
2330 				 RXDMA_MONITOR_DESC, 0, lmac_id)) {
2331 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_desc_ring", soc);
2332 			goto fail1;
2333 		}
2334 	}
2335 	return QDF_STATUS_SUCCESS;
2336 
2337 fail1:
2338 	return QDF_STATUS_E_NOMEM;
2339 }
2340 
2341 QDF_STATUS dp_mon_dest_rings_alloc(struct dp_pdev *pdev, int lmac_id)
2342 {
2343 	int entries;
2344 	struct dp_soc *soc = pdev->soc;
2345 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2346 
2347 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
2348 		entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
2349 		if (dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[lmac_id],
2350 				  RXDMA_MONITOR_BUF, entries, 0)) {
2351 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_buf_ring ", soc);
2352 			goto fail1;
2353 		}
2354 		entries = wlan_cfg_get_dma_rx_mon_dest_ring_size(pdev_cfg_ctx);
2355 		if (dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id],
2356 				  RXDMA_MONITOR_DST, entries, 0)) {
2357 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
2358 			goto fail1;
2359 		}
2360 		entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
2361 		if (dp_srng_alloc(soc, &soc->rxdma_mon_desc_ring[lmac_id],
2362 				  RXDMA_MONITOR_DESC, entries, 0)) {
2363 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_desc_ring", soc);
2364 			goto fail1;
2365 		}
2366 	}
2367 	return QDF_STATUS_SUCCESS;
2368 
2369 fail1:
2370 	return QDF_STATUS_E_NOMEM;
2371 }
2372 #endif
2373