xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/1.0/dp_rx_mon_dest_1.0.c (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "hal_hw_headers.h"
19 #include "dp_types.h"
20 #include "dp_rx.h"
21 #include "dp_peer.h"
22 #include "hal_rx.h"
23 #include "hal_api.h"
24 #include "qdf_trace.h"
25 #include "qdf_nbuf.h"
26 #include "hal_api_mon.h"
27 #include "dp_htt.h"
28 #include "dp_mon.h"
29 #include "dp_rx_mon.h"
30 #include "wlan_cfg.h"
31 #include "dp_internal.h"
32 #include "dp_rx_buffer_pool.h"
33 #include <dp_mon_1.0.h>
34 #include <dp_rx_mon_1.0.h>
35 
36 #ifdef WLAN_TX_PKT_CAPTURE_ENH
37 #include "dp_rx_mon_feature.h"
38 #endif
39 
40 /*
41  * PPDU id is from 0 to 64k-1. PPDU id read from status ring and PPDU id
42  * read from destination ring shall track each other. If the distance of
43  * two ppdu id is less than 20000. It is assume no wrap around. Otherwise,
44  * It is assume wrap around.
45  */
46 #define NOT_PPDU_ID_WRAP_AROUND 20000
47 /*
48  * The destination ring processing is stuck if the destrination is not
49  * moving while status ring moves 16 ppdu. the destination ring processing
50  * skips this destination ring ppdu as walkaround
51  */
52 #define MON_DEST_RING_STUCK_MAX_CNT 16
53 
54 #ifdef WLAN_TX_PKT_CAPTURE_ENH
55 void
56 dp_handle_tx_capture(struct dp_soc *soc, struct dp_pdev *pdev,
57 		     qdf_nbuf_t mon_mpdu)
58 {
59 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
60 	struct hal_rx_ppdu_info *ppdu_info = &mon_pdev->ppdu_info;
61 
62 	if (mon_pdev->tx_capture_enabled
63 	    == CDP_TX_ENH_CAPTURE_DISABLED)
64 		return;
65 
66 	if ((ppdu_info->sw_frame_group_id ==
67 	      HAL_MPDU_SW_FRAME_GROUP_CTRL_NDPA) ||
68 	     (ppdu_info->sw_frame_group_id ==
69 	      HAL_MPDU_SW_FRAME_GROUP_CTRL_BAR))
70 		dp_handle_tx_capture_from_dest(soc, pdev, mon_mpdu);
71 }
72 
73 #ifdef QCA_MONITOR_PKT_SUPPORT
74 static void
75 dp_tx_capture_get_user_id(struct dp_pdev *dp_pdev, void *rx_desc_tlv)
76 {
77 	struct dp_mon_pdev *mon_pdev = dp_pdev->monitor_pdev;
78 
79 	if (mon_pdev->tx_capture_enabled
80 	    != CDP_TX_ENH_CAPTURE_DISABLED)
81 		mon_pdev->ppdu_info.rx_info.user_id =
82 			hal_rx_hw_desc_mpdu_user_id(dp_pdev->soc->hal_soc,
83 						    rx_desc_tlv);
84 }
85 #endif
86 #else
87 static void
88 dp_tx_capture_get_user_id(struct dp_pdev *dp_pdev, void *rx_desc_tlv)
89 {
90 }
91 #endif
92 
93 #ifdef QCA_MONITOR_PKT_SUPPORT
94 /**
95  * dp_rx_mon_link_desc_return() - Return a MPDU link descriptor to HW
96  *			      (WBM), following error handling
97  *
98  * @dp_pdev: core txrx pdev context
99  * @buf_addr_info: void pointer to monitor link descriptor buf addr info
100  * @mac_id: mac_id for which the link desc is released.
101  *
102  * Return: QDF_STATUS
103  */
104 QDF_STATUS
105 dp_rx_mon_link_desc_return(struct dp_pdev *dp_pdev,
106 	hal_buff_addrinfo_t buf_addr_info, int mac_id)
107 {
108 	struct dp_srng *dp_srng;
109 	hal_ring_handle_t hal_ring_hdl;
110 	hal_soc_handle_t hal_soc;
111 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
112 	void *src_srng_desc;
113 
114 	hal_soc = dp_pdev->soc->hal_soc;
115 
116 	dp_srng = &dp_pdev->soc->rxdma_mon_desc_ring[mac_id];
117 	hal_ring_hdl = dp_srng->hal_srng;
118 
119 	qdf_assert(hal_ring_hdl);
120 
121 	if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring_hdl))) {
122 
123 		/* TODO */
124 		/*
125 		 * Need API to convert from hal_ring pointer to
126 		 * Ring Type / Ring Id combo
127 		 */
128 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
129 			"%s %d : \
130 			HAL RING Access For WBM Release SRNG Failed -- %pK",
131 			__func__, __LINE__, hal_ring_hdl);
132 		goto done;
133 	}
134 
135 	src_srng_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
136 
137 	if (qdf_likely(src_srng_desc)) {
138 		/* Return link descriptor through WBM ring (SW2WBM)*/
139 		hal_rx_mon_msdu_link_desc_set(hal_soc,
140 				src_srng_desc, buf_addr_info);
141 		status = QDF_STATUS_SUCCESS;
142 	} else {
143 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
144 			"%s %d -- Monitor Link Desc WBM Release Ring Full",
145 			__func__, __LINE__);
146 	}
147 done:
148 	hal_srng_access_end(hal_soc, hal_ring_hdl);
149 	return status;
150 }
151 
152 /**
153  * dp_rx_mon_mpdu_pop() - Return a MPDU link descriptor to HW
154  *			      (WBM), following error handling
155  *
156  * @soc: core DP main context
157  * @mac_id: mac id which is one of 3 mac_ids
158  * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
159  * @head_msdu: head of msdu to be popped
160  * @tail_msdu: tail of msdu to be popped
161  * @npackets: number of packet to be popped
162  * @ppdu_id: ppdu id of processing ppdu
163  * @head: head of descs list to be freed
164  * @tail: tail of decs list to be freed
165  *
166  * Return: number of msdu in MPDU to be popped
167  */
168 static inline uint32_t
169 dp_rx_mon_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
170 	hal_rxdma_desc_t rxdma_dst_ring_desc, qdf_nbuf_t *head_msdu,
171 	qdf_nbuf_t *tail_msdu, uint32_t *npackets, uint32_t *ppdu_id,
172 	union dp_rx_desc_list_elem_t **head,
173 	union dp_rx_desc_list_elem_t **tail)
174 {
175 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
176 	void *rx_desc_tlv, *first_rx_desc_tlv = NULL;
177 	void *rx_msdu_link_desc;
178 	qdf_nbuf_t msdu;
179 	qdf_nbuf_t last;
180 	struct hal_rx_msdu_list msdu_list;
181 	uint16_t num_msdus;
182 	uint32_t rx_buf_size, rx_pkt_offset;
183 	struct hal_buf_info buf_info;
184 	uint32_t rx_bufs_used = 0;
185 	uint32_t msdu_ppdu_id, msdu_cnt;
186 	uint8_t *data = NULL;
187 	uint32_t i;
188 	uint32_t total_frag_len = 0, frag_len = 0;
189 	bool is_frag, is_first_msdu;
190 	bool drop_mpdu = false, is_frag_non_raw = false;
191 	uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
192 	qdf_dma_addr_t buf_paddr = 0;
193 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
194 	struct cdp_mon_status *rs;
195 	struct dp_mon_pdev *mon_pdev;
196 
197 	if (qdf_unlikely(!dp_pdev)) {
198 		dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d", soc, mac_id);
199 		return rx_bufs_used;
200 	}
201 
202 	mon_pdev = dp_pdev->monitor_pdev;
203 	msdu = 0;
204 
205 	last = NULL;
206 
207 	hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
208 				     &buf_info, &msdu_cnt);
209 
210 	rs = &mon_pdev->rx_mon_recv_status;
211 	rs->cdp_rs_rxdma_err = false;
212 	if ((hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc) ==
213 		HAL_RX_WBM_RXDMA_PSH_RSN_ERROR)) {
214 		uint8_t rxdma_err =
215 			hal_rx_reo_ent_rxdma_error_code_get(
216 				rxdma_dst_ring_desc);
217 		if (qdf_unlikely((rxdma_err == HAL_RXDMA_ERR_FLUSH_REQUEST) ||
218 		   (rxdma_err == HAL_RXDMA_ERR_MPDU_LENGTH) ||
219 		   (rxdma_err == HAL_RXDMA_ERR_OVERFLOW) ||
220 		   (rxdma_err == HAL_RXDMA_ERR_FCS && mon_pdev->mcopy_mode) ||
221 		   (rxdma_err == HAL_RXDMA_ERR_FCS &&
222 		    mon_pdev->rx_pktlog_cbf))) {
223 			drop_mpdu = true;
224 			mon_pdev->rx_mon_stats.dest_mpdu_drop++;
225 		}
226 		rs->cdp_rs_rxdma_err = true;
227 	}
228 
229 	is_frag = false;
230 	is_first_msdu = true;
231 
232 	do {
233 		/* WAR for duplicate link descriptors received from HW */
234 		if (qdf_unlikely(mon_pdev->mon_last_linkdesc_paddr ==
235 		    buf_info.paddr)) {
236 			mon_pdev->rx_mon_stats.dup_mon_linkdesc_cnt++;
237 			return rx_bufs_used;
238 		}
239 
240 		rx_msdu_link_desc =
241 			dp_rx_cookie_2_mon_link_desc(dp_pdev,
242 						     buf_info, mac_id);
243 
244 		qdf_assert_always(rx_msdu_link_desc);
245 
246 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
247 				     &msdu_list, &num_msdus);
248 
249 		for (i = 0; i < num_msdus; i++) {
250 			uint16_t l2_hdr_offset;
251 			struct dp_rx_desc *rx_desc = NULL;
252 			struct rx_desc_pool *rx_desc_pool;
253 
254 			rx_desc = dp_rx_get_mon_desc(soc,
255 						     msdu_list.sw_cookie[i]);
256 
257 			qdf_assert_always(rx_desc);
258 
259 			msdu = DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc);
260 			buf_paddr = dp_rx_mon_get_paddr_from_desc(rx_desc);
261 
262 			/* WAR for duplicate buffers received from HW */
263 			if (qdf_unlikely(mon_pdev->mon_last_buf_cookie ==
264 				msdu_list.sw_cookie[i] ||
265 				DP_RX_MON_IS_BUFFER_ADDR_NULL(rx_desc) ||
266 				msdu_list.paddr[i] != buf_paddr ||
267 				!rx_desc->in_use)) {
268 				/* Skip duplicate buffer and drop subsequent
269 				 * buffers in this MPDU
270 				 */
271 				drop_mpdu = true;
272 				mon_pdev->rx_mon_stats.dup_mon_buf_cnt++;
273 				mon_pdev->mon_last_linkdesc_paddr =
274 					buf_info.paddr;
275 				continue;
276 			}
277 
278 			if (rx_desc->unmapped == 0) {
279 				rx_desc_pool = dp_rx_get_mon_desc_pool(soc,
280 								       mac_id,
281 								dp_pdev->pdev_id);
282 				dp_rx_mon_buffer_unmap(soc, rx_desc,
283 						       rx_desc_pool->buf_size);
284 				rx_desc->unmapped = 1;
285 			}
286 
287 			if (dp_rx_buffer_pool_refill(soc, msdu,
288 						     rx_desc->pool_id)) {
289 				drop_mpdu = true;
290 				msdu = NULL;
291 				mon_pdev->mon_last_linkdesc_paddr =
292 					buf_info.paddr;
293 				goto next_msdu;
294 			}
295 
296 			if (drop_mpdu) {
297 				mon_pdev->mon_last_linkdesc_paddr =
298 					buf_info.paddr;
299 				dp_rx_mon_buffer_free(rx_desc);
300 				msdu = NULL;
301 				goto next_msdu;
302 			}
303 
304 			data = dp_rx_mon_get_buffer_data(rx_desc);
305 			rx_desc_tlv = HAL_RX_MON_DEST_GET_DESC(data);
306 
307 			dp_rx_mon_dest_debug("%pK: i=%d, ppdu_id=%x, num_msdus = %u",
308 					     soc, i, *ppdu_id, num_msdus);
309 
310 			if (is_first_msdu) {
311 				if (!hal_rx_mpdu_start_tlv_tag_valid(
312 						soc->hal_soc,
313 						rx_desc_tlv)) {
314 					drop_mpdu = true;
315 					dp_rx_mon_buffer_free(rx_desc);
316 					msdu = NULL;
317 					mon_pdev->mon_last_linkdesc_paddr =
318 						buf_info.paddr;
319 					goto next_msdu;
320 				}
321 
322 				msdu_ppdu_id = hal_rx_hw_desc_get_ppduid_get(
323 						soc->hal_soc,
324 						rx_desc_tlv,
325 						rxdma_dst_ring_desc);
326 				is_first_msdu = false;
327 
328 				dp_rx_mon_dest_debug("%pK: msdu_ppdu_id=%x",
329 						     soc, msdu_ppdu_id);
330 
331 				if (*ppdu_id > msdu_ppdu_id)
332 					dp_rx_mon_dest_debug("%pK: ppdu_id=%d "
333 							     "msdu_ppdu_id=%d", soc,
334 							     *ppdu_id, msdu_ppdu_id);
335 
336 				if ((*ppdu_id < msdu_ppdu_id) && (
337 					(msdu_ppdu_id - *ppdu_id) <
338 						NOT_PPDU_ID_WRAP_AROUND)) {
339 					*ppdu_id = msdu_ppdu_id;
340 					return rx_bufs_used;
341 				} else if ((*ppdu_id > msdu_ppdu_id) && (
342 					(*ppdu_id - msdu_ppdu_id) >
343 						NOT_PPDU_ID_WRAP_AROUND)) {
344 					*ppdu_id = msdu_ppdu_id;
345 					return rx_bufs_used;
346 				}
347 
348 				dp_tx_capture_get_user_id(dp_pdev,
349 							  rx_desc_tlv);
350 
351 				if (*ppdu_id == msdu_ppdu_id)
352 					mon_pdev->rx_mon_stats.ppdu_id_match++;
353 				else
354 					mon_pdev->rx_mon_stats.ppdu_id_mismatch
355 						++;
356 
357 				mon_pdev->mon_last_linkdesc_paddr =
358 					buf_info.paddr;
359 
360 				if (dp_rx_mon_alloc_parent_buffer(head_msdu)
361 				    != QDF_STATUS_SUCCESS) {
362 					DP_STATS_INC(dp_pdev,
363 						     replenish.nbuf_alloc_fail,
364 						     1);
365 					qdf_frag_free(rx_desc_tlv);
366 					dp_rx_mon_dest_debug("failed to allocate parent buffer to hold all frag");
367 					drop_mpdu = true;
368 					goto next_msdu;
369 				}
370 			}
371 
372 			if (hal_rx_desc_is_first_msdu(soc->hal_soc,
373 						      rx_desc_tlv))
374 				hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc,
375 					rx_desc_tlv,
376 					&mon_pdev->ppdu_info.rx_status);
377 
378 			dp_rx_mon_parse_desc_buffer(soc,
379 						    &(msdu_list.msdu_info[i]),
380 						    &is_frag,
381 						    &total_frag_len,
382 						    &frag_len,
383 						    &l2_hdr_offset,
384 						    rx_desc_tlv,
385 						    &first_rx_desc_tlv,
386 						    &is_frag_non_raw, data);
387 			if (!is_frag)
388 				msdu_cnt--;
389 
390 			dp_rx_mon_dest_debug("total_len %u frag_len %u flags %u",
391 					     total_frag_len, frag_len,
392 				      msdu_list.msdu_info[i].msdu_flags);
393 
394 			rx_pkt_offset = dp_rx_mon_get_rx_pkt_tlv_size(soc);
395 
396 			rx_buf_size = rx_pkt_offset + l2_hdr_offset
397 					+ frag_len;
398 
399 			dp_rx_mon_buffer_set_pktlen(msdu, rx_buf_size);
400 #if 0
401 			/* Disable it.see packet on msdu done set to 0 */
402 			/*
403 			 * Check if DMA completed -- msdu_done is the
404 			 * last bit to be written
405 			 */
406 			if (!hal_rx_attn_msdu_done_get(rx_desc_tlv)) {
407 
408 				QDF_TRACE(QDF_MODULE_ID_DP,
409 					  QDF_TRACE_LEVEL_ERROR,
410 					  "%s:%d: Pkt Desc",
411 					  __func__, __LINE__);
412 
413 				QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
414 					QDF_TRACE_LEVEL_ERROR,
415 					rx_desc_tlv, 128);
416 
417 				qdf_assert_always(0);
418 			}
419 #endif
420 			dp_rx_mon_dest_debug("%pK: rx_pkt_offset=%d, l2_hdr_offset=%d, msdu_len=%d, frag_len %u",
421 					     soc, rx_pkt_offset, l2_hdr_offset,
422 					     msdu_list.msdu_info[i].msdu_len,
423 					     frag_len);
424 
425 			if (dp_rx_mon_add_msdu_to_list(soc, head_msdu, msdu,
426 						       &last, rx_desc_tlv,
427 						       frag_len, l2_hdr_offset)
428 					!= QDF_STATUS_SUCCESS) {
429 				dp_rx_mon_add_msdu_to_list_failure_handler(rx_desc_tlv,
430 						dp_pdev, &last, head_msdu,
431 						tail_msdu, __func__);
432 				drop_mpdu = true;
433 				goto next_msdu;
434 			}
435 
436 next_msdu:
437 			mon_pdev->mon_last_buf_cookie = msdu_list.sw_cookie[i];
438 			rx_bufs_used++;
439 			dp_rx_add_to_free_desc_list(head,
440 				tail, rx_desc);
441 		}
442 
443 		/*
444 		 * Store the current link buffer into to the local
445 		 * structure to be  used for release purpose.
446 		 */
447 		hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
448 					     buf_info.paddr,
449 					     buf_info.sw_cookie, buf_info.rbm);
450 
451 		hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
452 					      &buf_info);
453 		if (dp_rx_monitor_link_desc_return(dp_pdev,
454 						   (hal_buff_addrinfo_t)
455 						   rx_link_buf_info,
456 						   mac_id,
457 						   bm_action)
458 						   != QDF_STATUS_SUCCESS)
459 			dp_err_rl("monitor link desc return failed");
460 	} while (buf_info.paddr && msdu_cnt);
461 
462 	dp_rx_mon_init_tail_msdu(head_msdu, msdu, last, tail_msdu);
463 	dp_rx_mon_remove_raw_frame_fcs_len(soc, head_msdu, tail_msdu);
464 
465 	return rx_bufs_used;
466 }
467 
468 #if !defined(DISABLE_MON_CONFIG) && \
469 	(defined(MON_ENABLE_DROP_FOR_NON_MON_PMAC) || \
470 	 defined(MON_ENABLE_DROP_FOR_MAC))
471 /**
472  * dp_rx_mon_drop_one_mpdu() - Drop one mpdu from one rxdma monitor destination
473  *			       ring.
474  * @pdev: DP pdev handle
475  * @mac_id: MAC id which is being currently processed
476  * @rxdma_dst_ring_desc: RXDMA monitor destination ring entry
477  * @head: HEAD if the rx_desc list to be freed
478  * @tail: TAIL of the rx_desc list to be freed
479  *
480  * Return: Number of msdus which are dropped.
481  */
482 static int dp_rx_mon_drop_one_mpdu(struct dp_pdev *pdev,
483 				   uint32_t mac_id,
484 				   hal_rxdma_desc_t rxdma_dst_ring_desc,
485 				   union dp_rx_desc_list_elem_t **head,
486 				   union dp_rx_desc_list_elem_t **tail)
487 {
488 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
489 	struct dp_soc *soc = pdev->soc;
490 	hal_soc_handle_t hal_soc = soc->hal_soc;
491 	struct hal_buf_info buf_info;
492 	uint32_t msdu_count = 0;
493 	uint32_t rx_bufs_used = 0;
494 	void *rx_msdu_link_desc;
495 	struct hal_rx_msdu_list msdu_list;
496 	uint16_t num_msdus;
497 	qdf_nbuf_t nbuf;
498 	uint32_t i;
499 	uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
500 	uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
501 	struct rx_desc_pool *rx_desc_pool;
502 
503 	rx_desc_pool = dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id);
504 	hal_rx_reo_ent_buf_paddr_get(hal_soc, rxdma_dst_ring_desc,
505 				     &buf_info, &msdu_count);
506 
507 	do {
508 		rx_msdu_link_desc = dp_rx_cookie_2_mon_link_desc(pdev,
509 								 buf_info,
510 								 mac_id);
511 		if (qdf_unlikely(!rx_msdu_link_desc)) {
512 			mon_pdev->rx_mon_stats.mon_link_desc_invalid++;
513 			return rx_bufs_used;
514 		}
515 
516 		hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
517 				     &msdu_list, &num_msdus);
518 
519 		for (i = 0; i < num_msdus; i++) {
520 			struct dp_rx_desc *rx_desc;
521 			qdf_dma_addr_t buf_paddr;
522 
523 			rx_desc = dp_rx_get_mon_desc(soc,
524 						     msdu_list.sw_cookie[i]);
525 
526 			if (qdf_unlikely(!rx_desc)) {
527 				mon_pdev->rx_mon_stats.
528 						mon_rx_desc_invalid++;
529 				continue;
530 			}
531 
532 			nbuf = DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc);
533 			buf_paddr =
534 				 dp_rx_mon_get_paddr_from_desc(rx_desc);
535 
536 			if (qdf_unlikely(!rx_desc->in_use || !nbuf ||
537 					 msdu_list.paddr[i] !=
538 					 buf_paddr)) {
539 				mon_pdev->rx_mon_stats.
540 						mon_nbuf_sanity_err++;
541 				continue;
542 			}
543 			rx_bufs_used++;
544 
545 			if (!rx_desc->unmapped) {
546 				dp_rx_mon_buffer_unmap(soc, rx_desc,
547 						       rx_desc_pool->buf_size);
548 				rx_desc->unmapped = 1;
549 			}
550 
551 			qdf_nbuf_free(nbuf);
552 			dp_rx_add_to_free_desc_list(head, tail, rx_desc);
553 
554 			if (!(msdu_list.msdu_info[i].msdu_flags &
555 			      HAL_MSDU_F_MSDU_CONTINUATION))
556 				msdu_count--;
557 		}
558 
559 		/*
560 		 * Store the current link buffer into to the local
561 		 * structure to be  used for release purpose.
562 		 */
563 		hal_rxdma_buff_addr_info_set(soc->hal_soc,
564 					     rx_link_buf_info,
565 					     buf_info.paddr,
566 					     buf_info.sw_cookie,
567 					     buf_info.rbm);
568 
569 		hal_rx_mon_next_link_desc_get(soc->hal_soc,
570 					      rx_msdu_link_desc,
571 					      &buf_info);
572 		if (dp_rx_monitor_link_desc_return(pdev,
573 						   (hal_buff_addrinfo_t)
574 						   rx_link_buf_info,
575 						   mac_id, bm_action) !=
576 		    QDF_STATUS_SUCCESS)
577 			dp_info_rl("monitor link desc return failed");
578 	} while (buf_info.paddr && msdu_count);
579 
580 	return rx_bufs_used;
581 }
582 #endif
583 
584 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_NON_MON_PMAC)
585 /**
586  * dp_rx_mon_check_n_drop_mpdu() - Check if the current MPDU is not from the
587  *				   PMAC which is being currently processed, and
588  *				   if yes, drop the MPDU.
589  * @pdev: DP pdev handle
590  * @mac_id: MAC id which is being currently processed
591  * @rxdma_dst_ring_desc: RXDMA monitor destination ring entry
592  * @head: HEAD if the rx_desc list to be freed
593  * @tail: TAIL of the rx_desc list to be freed
594  * @rx_bufs_dropped: Number of msdus dropped
595  *
596  * Return: QDF_STATUS_SUCCESS, if the mpdu was to be dropped
597  *	   QDF_STATUS_E_INVAL/QDF_STATUS_E_FAILURE, if the mdpu was not dropped
598  */
599 static QDF_STATUS
600 dp_rx_mon_check_n_drop_mpdu(struct dp_pdev *pdev, uint32_t mac_id,
601 			    hal_rxdma_desc_t rxdma_dst_ring_desc,
602 			    union dp_rx_desc_list_elem_t **head,
603 			    union dp_rx_desc_list_elem_t **tail,
604 			    uint32_t *rx_bufs_dropped)
605 {
606 	struct dp_soc *soc = pdev->soc;
607 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
608 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
609 	uint8_t src_link_id;
610 	QDF_STATUS status;
611 
612 	if (mon_pdev->mon_chan_band == REG_BAND_UNKNOWN)
613 		goto drop_mpdu;
614 
615 	lmac_id = pdev->ch_band_lmac_id_mapping[mon_pdev->mon_chan_band];
616 
617 	status = hal_rx_reo_ent_get_src_link_id(soc->hal_soc,
618 						rxdma_dst_ring_desc,
619 						&src_link_id);
620 	if (QDF_IS_STATUS_ERROR(status))
621 		return QDF_STATUS_E_INVAL;
622 
623 	if (src_link_id == lmac_id)
624 		return QDF_STATUS_E_INVAL;
625 
626 drop_mpdu:
627 	*rx_bufs_dropped = dp_rx_mon_drop_one_mpdu(pdev, mac_id,
628 						   rxdma_dst_ring_desc,
629 						   head, tail);
630 
631 	return QDF_STATUS_SUCCESS;
632 }
633 #else
634 static inline QDF_STATUS
635 dp_rx_mon_check_n_drop_mpdu(struct dp_pdev *pdev, uint32_t mac_id,
636 			    hal_rxdma_desc_t rxdma_dst_ring_desc,
637 			    union dp_rx_desc_list_elem_t **head,
638 			    union dp_rx_desc_list_elem_t **tail,
639 			    uint32_t *rx_bufs_dropped)
640 {
641 	return QDF_STATUS_E_FAILURE;
642 }
643 #endif
644 
645 void dp_rx_mon_dest_process(struct dp_soc *soc, struct dp_intr *int_ctx,
646 			    uint32_t mac_id, uint32_t quota)
647 {
648 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
649 	uint8_t pdev_id;
650 	hal_rxdma_desc_t rxdma_dst_ring_desc;
651 	hal_soc_handle_t hal_soc;
652 	void *mon_dst_srng;
653 	union dp_rx_desc_list_elem_t *head = NULL;
654 	union dp_rx_desc_list_elem_t *tail = NULL;
655 	uint32_t ppdu_id;
656 	uint32_t rx_bufs_used;
657 	uint32_t mpdu_rx_bufs_used;
658 	int mac_for_pdev = mac_id;
659 	struct cdp_pdev_mon_stats *rx_mon_stats;
660 	struct dp_mon_pdev *mon_pdev;
661 
662 	if (!pdev) {
663 		dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d", soc, mac_id);
664 		return;
665 	}
666 
667 	mon_pdev = pdev->monitor_pdev;
668 	mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_for_pdev);
669 
670 	if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) {
671 		dp_rx_mon_dest_err("%pK: : HAL Monitor Destination Ring Init Failed -- %pK",
672 				   soc, mon_dst_srng);
673 		return;
674 	}
675 
676 	hal_soc = soc->hal_soc;
677 
678 	qdf_assert((hal_soc && pdev));
679 
680 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
681 
682 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_dst_srng))) {
683 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
684 			  "%s %d : HAL Mon Dest Ring access Failed -- %pK",
685 			  __func__, __LINE__, mon_dst_srng);
686 		qdf_spin_unlock_bh(&mon_pdev->mon_lock);
687 		return;
688 	}
689 
690 	pdev_id = pdev->pdev_id;
691 	ppdu_id = mon_pdev->ppdu_info.com_info.ppdu_id;
692 	rx_bufs_used = 0;
693 	rx_mon_stats = &mon_pdev->rx_mon_stats;
694 
695 	while (qdf_likely(rxdma_dst_ring_desc =
696 		hal_srng_dst_peek(hal_soc, mon_dst_srng))) {
697 		qdf_nbuf_t head_msdu, tail_msdu;
698 		uint32_t npackets;
699 		uint32_t rx_bufs_dropped;
700 
701 		rx_bufs_dropped = 0;
702 		head_msdu = (qdf_nbuf_t)NULL;
703 		tail_msdu = (qdf_nbuf_t)NULL;
704 
705 		if (QDF_STATUS_SUCCESS ==
706 		    dp_rx_mon_check_n_drop_mpdu(pdev, mac_id,
707 						rxdma_dst_ring_desc,
708 						&head, &tail,
709 						&rx_bufs_dropped)) {
710 			/* Increment stats */
711 			rx_bufs_used += rx_bufs_dropped;
712 			hal_srng_dst_get_next(hal_soc, mon_dst_srng);
713 			continue;
714 		}
715 
716 		mpdu_rx_bufs_used =
717 			dp_rx_mon_mpdu_pop(soc, mac_id,
718 					   rxdma_dst_ring_desc,
719 					   &head_msdu, &tail_msdu,
720 					   &npackets, &ppdu_id,
721 					   &head, &tail);
722 
723 		rx_bufs_used += mpdu_rx_bufs_used;
724 
725 		if (mpdu_rx_bufs_used)
726 			mon_pdev->mon_dest_ring_stuck_cnt = 0;
727 		else
728 			mon_pdev->mon_dest_ring_stuck_cnt++;
729 
730 		if (mon_pdev->mon_dest_ring_stuck_cnt >
731 		    MON_DEST_RING_STUCK_MAX_CNT) {
732 			dp_info("destination ring stuck");
733 			dp_info("ppdu_id status=%d dest=%d",
734 				mon_pdev->ppdu_info.com_info.ppdu_id, ppdu_id);
735 			rx_mon_stats->mon_rx_dest_stuck++;
736 			mon_pdev->ppdu_info.com_info.ppdu_id = ppdu_id;
737 			continue;
738 		}
739 
740 		if (ppdu_id != mon_pdev->ppdu_info.com_info.ppdu_id) {
741 			rx_mon_stats->stat_ring_ppdu_id_hist[
742 				rx_mon_stats->ppdu_id_hist_idx] =
743 				mon_pdev->ppdu_info.com_info.ppdu_id;
744 			rx_mon_stats->dest_ring_ppdu_id_hist[
745 				rx_mon_stats->ppdu_id_hist_idx] = ppdu_id;
746 			rx_mon_stats->ppdu_id_hist_idx =
747 				(rx_mon_stats->ppdu_id_hist_idx + 1) &
748 					(MAX_PPDU_ID_HIST - 1);
749 			mon_pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
750 			qdf_mem_zero(&mon_pdev->ppdu_info.rx_status,
751 				     sizeof(mon_pdev->ppdu_info.rx_status));
752 			dp_rx_mon_dest_debug("%pK: ppdu_id %x != ppdu_info.com_info.ppdu_id %x",
753 					     soc, ppdu_id,
754 					     mon_pdev->ppdu_info.com_info.ppdu_id);
755 			break;
756 		}
757 
758 		if (qdf_likely((head_msdu) && (tail_msdu))) {
759 			rx_mon_stats->dest_mpdu_done++;
760 			dp_rx_mon_deliver(soc, mac_id, head_msdu, tail_msdu);
761 		}
762 
763 		rxdma_dst_ring_desc =
764 			hal_srng_dst_get_next(hal_soc,
765 					      mon_dst_srng);
766 	}
767 
768 	dp_srng_access_end(int_ctx, soc, mon_dst_srng);
769 
770 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
771 
772 	if (rx_bufs_used) {
773 		rx_mon_stats->dest_ppdu_done++;
774 		dp_rx_buffers_replenish(soc, mac_id,
775 					dp_rxdma_get_mon_buf_ring(pdev,
776 								  mac_for_pdev),
777 					dp_rx_get_mon_desc_pool(soc, mac_id,
778 								pdev_id),
779 					rx_bufs_used, &head, &tail, false);
780 	}
781 }
782 
783 QDF_STATUS
784 dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id,
785 				 bool delayed_replenish)
786 {
787 	uint8_t pdev_id = pdev->pdev_id;
788 	struct dp_soc *soc = pdev->soc;
789 	struct dp_srng *mon_buf_ring;
790 	uint32_t num_entries;
791 	struct rx_desc_pool *rx_desc_pool;
792 	QDF_STATUS status = QDF_STATUS_SUCCESS;
793 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
794 
795 	mon_buf_ring = dp_rxdma_get_mon_buf_ring(pdev, mac_id);
796 
797 	num_entries = mon_buf_ring->num_entries;
798 
799 	rx_desc_pool = dp_rx_get_mon_desc_pool(soc, mac_id, pdev_id);
800 
801 	dp_debug("Mon RX Desc Pool[%d] entries=%u", pdev_id, num_entries);
802 
803 	/* Replenish RXDMA monitor buffer ring with 8 buffers only
804 	 * delayed_replenish_entries is actually 8 but when we call
805 	 * dp_pdev_rx_buffers_attach() we pass 1 less than 8, hence
806 	 * added 1 to delayed_replenish_entries to ensure we have 8
807 	 * entries. Once the monitor VAP is configured we replenish
808 	 * the complete RXDMA monitor buffer ring.
809 	 */
810 	if (delayed_replenish) {
811 		num_entries = soc_cfg_ctx->delayed_replenish_entries + 1;
812 		status = dp_pdev_rx_buffers_attach(soc, mac_id, mon_buf_ring,
813 						   rx_desc_pool,
814 						   num_entries - 1);
815 	} else {
816 		union dp_rx_desc_list_elem_t *tail = NULL;
817 		union dp_rx_desc_list_elem_t *desc_list = NULL;
818 
819 		status = dp_rx_buffers_replenish(soc, mac_id,
820 						 mon_buf_ring,
821 						 rx_desc_pool,
822 						 num_entries,
823 						 &desc_list,
824 						 &tail, false);
825 	}
826 
827 	return status;
828 }
829 
830 void
831 dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id)
832 {
833 	uint8_t pdev_id = pdev->pdev_id;
834 	struct dp_soc *soc = pdev->soc;
835 	struct dp_srng *mon_buf_ring;
836 	uint32_t num_entries;
837 	struct rx_desc_pool *rx_desc_pool;
838 	uint32_t rx_desc_pool_size;
839 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
840 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
841 
842 	mon_buf_ring = &soc->rxdma_mon_buf_ring[mac_id];
843 
844 	num_entries = mon_buf_ring->num_entries;
845 
846 	rx_desc_pool = &soc->rx_desc_mon[mac_id];
847 
848 	/* If descriptor pool is already initialized, do not initialize it */
849 	if (rx_desc_pool->freelist)
850 		return;
851 
852 	dp_debug("Mon RX Desc buf Pool[%d] init entries=%u",
853 		 pdev_id, num_entries);
854 
855 	rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc_cfg_ctx) *
856 		num_entries;
857 
858 	rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id);
859 	rx_desc_pool->buf_size = RX_MONITOR_BUFFER_SIZE;
860 	rx_desc_pool->buf_alignment = RX_MONITOR_BUFFER_ALIGNMENT;
861 	/* Enable frag processing if feature is enabled */
862 	dp_rx_enable_mon_dest_frag(rx_desc_pool, true);
863 
864 	dp_rx_desc_pool_init(soc, mac_id, rx_desc_pool_size, rx_desc_pool);
865 
866 	mon_pdev->mon_last_linkdesc_paddr = 0;
867 
868 	mon_pdev->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
869 
870 	/* Attach full monitor mode resources */
871 	dp_full_mon_attach(pdev);
872 }
873 
874 static void
875 dp_rx_pdev_mon_buf_desc_pool_deinit(struct dp_pdev *pdev, uint32_t mac_id)
876 {
877 	uint8_t pdev_id = pdev->pdev_id;
878 	struct dp_soc *soc = pdev->soc;
879 	struct rx_desc_pool *rx_desc_pool;
880 
881 	rx_desc_pool = &soc->rx_desc_mon[mac_id];
882 
883 	dp_debug("Mon RX Desc buf Pool[%d] deinit", pdev_id);
884 
885 	dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_id);
886 
887 	/* Detach full monitor mode resources */
888 	dp_full_mon_detach(pdev);
889 }
890 
891 static void
892 dp_rx_pdev_mon_buf_desc_pool_free(struct dp_pdev *pdev, uint32_t mac_id)
893 {
894 	uint8_t pdev_id = pdev->pdev_id;
895 	struct dp_soc *soc = pdev->soc;
896 	struct rx_desc_pool *rx_desc_pool;
897 
898 	rx_desc_pool = &soc->rx_desc_mon[mac_id];
899 
900 	dp_debug("Mon RX Buf Desc Pool Free pdev[%d]", pdev_id);
901 
902 	dp_rx_desc_pool_free(soc, rx_desc_pool);
903 }
904 
905 void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id)
906 {
907 	uint8_t pdev_id = pdev->pdev_id;
908 	struct dp_soc *soc = pdev->soc;
909 	struct rx_desc_pool *rx_desc_pool;
910 
911 	rx_desc_pool = &soc->rx_desc_mon[mac_id];
912 
913 	dp_debug("Mon RX Buf buffers Free pdev[%d]", pdev_id);
914 
915 	if (rx_desc_pool->rx_mon_dest_frag_enable)
916 		dp_rx_desc_frag_free(soc, rx_desc_pool);
917 	else
918 		dp_rx_desc_nbuf_free(soc, rx_desc_pool, true);
919 }
920 
921 QDF_STATUS
922 dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id)
923 {
924 	uint8_t pdev_id = pdev->pdev_id;
925 	struct dp_soc *soc = pdev->soc;
926 	struct dp_srng *mon_buf_ring;
927 	uint32_t num_entries;
928 	struct rx_desc_pool *rx_desc_pool;
929 	uint32_t rx_desc_pool_size;
930 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
931 
932 	mon_buf_ring = &soc->rxdma_mon_buf_ring[mac_id];
933 
934 	num_entries = mon_buf_ring->num_entries;
935 
936 	rx_desc_pool = &soc->rx_desc_mon[mac_id];
937 
938 	dp_debug("Mon RX Desc Pool[%d] entries=%u",
939 		 pdev_id, num_entries);
940 
941 	rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc_cfg_ctx) *
942 		num_entries;
943 
944 	if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_SUCCESS)
945 		return QDF_STATUS_SUCCESS;
946 
947 	return dp_rx_desc_pool_alloc(soc, rx_desc_pool_size, rx_desc_pool);
948 }
949 
950 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
951 uint32_t
952 dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id)
953 {
954 	struct dp_soc *soc = pdev->soc;
955 	hal_rxdma_desc_t rxdma_dst_ring_desc;
956 	hal_soc_handle_t hal_soc;
957 	void *mon_dst_srng;
958 	union dp_rx_desc_list_elem_t *head = NULL;
959 	union dp_rx_desc_list_elem_t *tail = NULL;
960 	uint32_t rx_bufs_used = 0;
961 	struct rx_desc_pool *rx_desc_pool;
962 	uint32_t reap_cnt = 0;
963 	uint32_t rx_bufs_dropped;
964 	struct dp_mon_pdev *mon_pdev;
965 	bool is_rxdma_dst_ring_common;
966 
967 	if (qdf_unlikely(!soc || !soc->hal_soc))
968 		return reap_cnt;
969 
970 	mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_id);
971 
972 	if (qdf_unlikely(!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)))
973 		return reap_cnt;
974 
975 	hal_soc = soc->hal_soc;
976 	mon_pdev = pdev->monitor_pdev;
977 
978 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
979 
980 	if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_dst_srng))) {
981 		qdf_spin_unlock_bh(&mon_pdev->mon_lock);
982 		return reap_cnt;
983 	}
984 
985 	rx_desc_pool = dp_rx_get_mon_desc_pool(soc, mac_id, pdev->pdev_id);
986 	is_rxdma_dst_ring_common = dp_is_rxdma_dst_ring_common(pdev);
987 
988 	while ((rxdma_dst_ring_desc =
989 		hal_srng_dst_peek(hal_soc, mon_dst_srng)) &&
990 		reap_cnt < MON_DROP_REAP_LIMIT) {
991 		if (is_rxdma_dst_ring_common) {
992 			if (QDF_STATUS_SUCCESS ==
993 			    dp_rx_mon_check_n_drop_mpdu(pdev, mac_id,
994 							rxdma_dst_ring_desc,
995 							&head, &tail,
996 							&rx_bufs_dropped)) {
997 				/* Increment stats */
998 				rx_bufs_used += rx_bufs_dropped;
999 			} else {
1000 				/*
1001 				 * If the mpdu was not dropped, we need to
1002 				 * wait for the entry to be processed, along
1003 				 * with the status ring entry for the other
1004 				 * mac. Hence we bail out here.
1005 				 */
1006 				break;
1007 			}
1008 		} else {
1009 			rx_bufs_used += dp_rx_mon_drop_one_mpdu(pdev, mac_id,
1010 								rxdma_dst_ring_desc,
1011 								&head, &tail);
1012 		}
1013 		reap_cnt++;
1014 		rxdma_dst_ring_desc = hal_srng_dst_get_next(hal_soc,
1015 							    mon_dst_srng);
1016 	}
1017 
1018 	hal_srng_access_end(hal_soc, mon_dst_srng);
1019 
1020 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
1021 
1022 	if (rx_bufs_used) {
1023 		dp_rx_buffers_replenish(soc, mac_id,
1024 					dp_rxdma_get_mon_buf_ring(pdev, mac_id),
1025 					rx_desc_pool,
1026 					rx_bufs_used, &head, &tail, false);
1027 	}
1028 
1029 	return reap_cnt;
1030 }
1031 #endif
1032 
1033 static void
1034 dp_rx_pdev_mon_dest_desc_pool_free(struct dp_pdev *pdev, int mac_for_pdev)
1035 {
1036 	struct dp_soc *soc = pdev->soc;
1037 
1038 	dp_rx_pdev_mon_buf_desc_pool_free(pdev, mac_for_pdev);
1039 	dp_hw_link_desc_pool_banks_free(soc, mac_for_pdev);
1040 }
1041 
1042 static void
1043 dp_rx_pdev_mon_dest_desc_pool_deinit(struct dp_pdev *pdev, int mac_for_pdev)
1044 {
1045 	struct dp_soc *soc = pdev->soc;
1046 
1047 	if (!soc->wlan_cfg_ctx->rxdma1_enable)
1048 		return;
1049 
1050 	dp_rx_pdev_mon_buf_desc_pool_deinit(pdev, mac_for_pdev);
1051 }
1052 
1053 static void
1054 dp_rx_pdev_mon_dest_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_for_pdev)
1055 {
1056 	struct dp_soc *soc = pdev->soc;
1057 
1058 	if (!soc->wlan_cfg_ctx->rxdma1_enable ||
1059 	    !wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx))
1060 		return;
1061 
1062 	dp_rx_pdev_mon_buf_desc_pool_init(pdev, mac_for_pdev);
1063 	dp_link_desc_ring_replenish(soc, mac_for_pdev);
1064 }
1065 
1066 static void
1067 dp_rx_pdev_mon_dest_buffers_free(struct dp_pdev *pdev, int mac_for_pdev)
1068 {
1069 	struct dp_soc *soc = pdev->soc;
1070 
1071 	if (!soc->wlan_cfg_ctx->rxdma1_enable)
1072 		return;
1073 
1074 	dp_rx_pdev_mon_buf_buffers_free(pdev, mac_for_pdev);
1075 }
1076 
1077 static QDF_STATUS
1078 dp_rx_pdev_mon_dest_buffers_alloc(struct dp_pdev *pdev, int mac_for_pdev)
1079 {
1080 	struct dp_soc *soc = pdev->soc;
1081 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
1082 	bool delayed_replenish;
1083 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1084 
1085 	delayed_replenish = soc_cfg_ctx->delayed_replenish_entries ? 1 : 0;
1086 	if (!soc->wlan_cfg_ctx->rxdma1_enable ||
1087 	    !wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx))
1088 		return status;
1089 
1090 	status = dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
1091 						  delayed_replenish);
1092 	if (!QDF_IS_STATUS_SUCCESS(status))
1093 		dp_err("dp_rx_pdev_mon_buf_desc_pool_alloc() failed");
1094 
1095 	return status;
1096 }
1097 
1098 static QDF_STATUS
1099 dp_rx_pdev_mon_dest_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_for_pdev)
1100 {
1101 	struct dp_soc *soc = pdev->soc;
1102 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1103 
1104 	if (!soc->wlan_cfg_ctx->rxdma1_enable ||
1105 	    !wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx))
1106 		return status;
1107 
1108 	/* Allocate sw rx descriptor pool for monitor RxDMA buffer ring */
1109 	status = dp_rx_pdev_mon_buf_desc_pool_alloc(pdev, mac_for_pdev);
1110 	if (!QDF_IS_STATUS_SUCCESS(status)) {
1111 		dp_err("dp_rx_pdev_mon_buf_desc_pool_alloc() failed");
1112 		goto fail;
1113 	}
1114 
1115 	/* Allocate link descriptors for the monitor link descriptor ring */
1116 	status = dp_hw_link_desc_pool_banks_alloc(soc, mac_for_pdev);
1117 	if (!QDF_IS_STATUS_SUCCESS(status)) {
1118 		dp_err("dp_hw_link_desc_pool_banks_alloc() failed");
1119 		goto mon_buf_dealloc;
1120 	}
1121 
1122 	return status;
1123 
1124 mon_buf_dealloc:
1125 	dp_rx_pdev_mon_status_desc_pool_free(pdev, mac_for_pdev);
1126 fail:
1127 	return status;
1128 }
1129 #else
1130 static void
1131 dp_rx_pdev_mon_dest_desc_pool_free(struct dp_pdev *pdev, int mac_for_pdev)
1132 {
1133 }
1134 
1135 static void
1136 dp_rx_pdev_mon_dest_desc_pool_deinit(struct dp_pdev *pdev, int mac_for_pdev)
1137 {
1138 }
1139 
1140 static void
1141 dp_rx_pdev_mon_dest_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_for_pdev)
1142 {
1143 }
1144 
1145 static void
1146 dp_rx_pdev_mon_dest_buffers_free(struct dp_pdev *pdev, int mac_for_pdev)
1147 {
1148 }
1149 
1150 static QDF_STATUS
1151 dp_rx_pdev_mon_dest_buffers_alloc(struct dp_pdev *pdev, int mac_for_pdev)
1152 {
1153 	return QDF_STATUS_SUCCESS;
1154 }
1155 
1156 static QDF_STATUS
1157 dp_rx_pdev_mon_dest_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_for_pdev)
1158 {
1159 	return QDF_STATUS_SUCCESS;
1160 }
1161 
1162 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
1163 uint32_t
1164 dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id)
1165 {
1166 	return 0;
1167 }
1168 #endif
1169 
1170 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_NON_MON_PMAC)
1171 static QDF_STATUS
1172 dp_rx_mon_check_n_drop_mpdu(struct dp_pdev *pdev, uint32_t mac_id,
1173 			    hal_rxdma_desc_t rxdma_dst_ring_desc,
1174 			    union dp_rx_desc_list_elem_t **head,
1175 			    union dp_rx_desc_list_elem_t **tail,
1176 			    uint32_t *rx_bufs_dropped)
1177 {
1178 	return QDF_STATUS_E_FAILURE;
1179 }
1180 #endif
1181 #endif
1182 
1183 static void
1184 dp_rx_pdev_mon_cmn_desc_pool_free(struct dp_pdev *pdev, int mac_id)
1185 {
1186 	struct dp_soc *soc = pdev->soc;
1187 	uint8_t pdev_id = pdev->pdev_id;
1188 	int mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id);
1189 
1190 	dp_rx_pdev_mon_status_desc_pool_free(pdev, mac_for_pdev);
1191 	dp_rx_pdev_mon_dest_desc_pool_free(pdev, mac_for_pdev);
1192 }
1193 
1194 void dp_rx_pdev_mon_desc_pool_free(struct dp_pdev *pdev)
1195 {
1196 	int mac_id;
1197 
1198 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
1199 		dp_rx_pdev_mon_cmn_desc_pool_free(pdev, mac_id);
1200 }
1201 
1202 static void
1203 dp_rx_pdev_mon_cmn_desc_pool_deinit(struct dp_pdev *pdev, int mac_id)
1204 {
1205 	struct dp_soc *soc = pdev->soc;
1206 	uint8_t pdev_id = pdev->pdev_id;
1207 	int mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id);
1208 
1209 	dp_rx_pdev_mon_status_desc_pool_deinit(pdev, mac_for_pdev);
1210 
1211 	dp_rx_pdev_mon_dest_desc_pool_deinit(pdev, mac_for_pdev);
1212 }
1213 
1214 void
1215 dp_rx_pdev_mon_desc_pool_deinit(struct dp_pdev *pdev)
1216 {
1217 	int mac_id;
1218 
1219 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
1220 		dp_rx_pdev_mon_cmn_desc_pool_deinit(pdev, mac_id);
1221 	qdf_spinlock_destroy(&pdev->monitor_pdev->mon_lock);
1222 }
1223 
1224 static void
1225 dp_rx_pdev_mon_cmn_desc_pool_init(struct dp_pdev *pdev, int mac_id)
1226 {
1227 	struct dp_soc *soc = pdev->soc;
1228 	uint32_t mac_for_pdev;
1229 
1230 	mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
1231 	dp_rx_pdev_mon_status_desc_pool_init(pdev, mac_for_pdev);
1232 
1233 	dp_rx_pdev_mon_dest_desc_pool_init(pdev, mac_for_pdev);
1234 }
1235 
1236 void
1237 dp_rx_pdev_mon_desc_pool_init(struct dp_pdev *pdev)
1238 {
1239 	int mac_id;
1240 
1241 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
1242 		dp_rx_pdev_mon_cmn_desc_pool_init(pdev, mac_id);
1243 	qdf_spinlock_create(&pdev->monitor_pdev->mon_lock);
1244 }
1245 
1246 static void
1247 dp_rx_pdev_mon_cmn_buffers_free(struct dp_pdev *pdev, int mac_id)
1248 {
1249 	uint8_t pdev_id = pdev->pdev_id;
1250 	int mac_for_pdev;
1251 
1252 	mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id, pdev_id);
1253 	dp_rx_pdev_mon_status_buffers_free(pdev, mac_for_pdev);
1254 
1255 	dp_rx_pdev_mon_dest_buffers_free(pdev, mac_for_pdev);
1256 }
1257 
1258 void
1259 dp_rx_pdev_mon_buffers_free(struct dp_pdev *pdev)
1260 {
1261 	int mac_id;
1262 
1263 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
1264 		dp_rx_pdev_mon_cmn_buffers_free(pdev, mac_id);
1265 	pdev->monitor_pdev->pdev_mon_init = 0;
1266 }
1267 
1268 QDF_STATUS
1269 dp_rx_pdev_mon_buffers_alloc(struct dp_pdev *pdev)
1270 {
1271 	int mac_id;
1272 	int mac_for_pdev;
1273 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1274 	uint8_t pdev_id = pdev->pdev_id;
1275 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = pdev->soc->wlan_cfg_ctx;
1276 
1277 	for (mac_id = 0; mac_id < soc_cfg_ctx->num_rxdma_status_rings_per_pdev;
1278 	     mac_id++) {
1279 		mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id,
1280 							  pdev_id);
1281 		status = dp_rx_pdev_mon_status_buffers_alloc(pdev,
1282 							     mac_for_pdev);
1283 		if (!QDF_IS_STATUS_SUCCESS(status)) {
1284 			dp_err("dp_rx_pdev_mon_status_desc_pool_alloc() failed");
1285 			goto mon_status_buf_fail;
1286 		}
1287 	}
1288 
1289 	for (mac_id = 0; mac_id < soc_cfg_ctx->num_rxdma_dst_rings_per_pdev;
1290 	     mac_id++) {
1291 		mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id,
1292 							  pdev_id);
1293 		status = dp_rx_pdev_mon_dest_buffers_alloc(pdev, mac_for_pdev);
1294 		if (!QDF_IS_STATUS_SUCCESS(status))
1295 			goto mon_stat_buf_dealloc;
1296 	}
1297 
1298 	return status;
1299 
1300 mon_stat_buf_dealloc:
1301 	dp_rx_pdev_mon_status_buffers_free(pdev, mac_for_pdev);
1302 mon_status_buf_fail:
1303 	return status;
1304 }
1305 
1306 static QDF_STATUS
1307 dp_rx_pdev_mon_cmn_desc_pool_alloc(struct dp_pdev *pdev, int mac_id)
1308 {
1309 	struct dp_soc *soc = pdev->soc;
1310 	uint8_t pdev_id = pdev->pdev_id;
1311 	uint32_t mac_for_pdev;
1312 	QDF_STATUS status;
1313 
1314 	mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id);
1315 
1316 	/* Allocate sw rx descriptor pool for monitor status ring */
1317 	status = dp_rx_pdev_mon_status_desc_pool_alloc(pdev, mac_for_pdev);
1318 	if (!QDF_IS_STATUS_SUCCESS(status)) {
1319 		dp_err("dp_rx_pdev_mon_status_desc_pool_alloc() failed");
1320 		goto fail;
1321 	}
1322 
1323 	status = dp_rx_pdev_mon_dest_desc_pool_alloc(pdev, mac_for_pdev);
1324 	if (!QDF_IS_STATUS_SUCCESS(status))
1325 		goto mon_status_dealloc;
1326 
1327 	return status;
1328 
1329 mon_status_dealloc:
1330 	dp_rx_pdev_mon_status_desc_pool_free(pdev, mac_for_pdev);
1331 fail:
1332 	return status;
1333 }
1334 
1335 QDF_STATUS
1336 dp_rx_pdev_mon_desc_pool_alloc(struct dp_pdev *pdev)
1337 {
1338 	QDF_STATUS status;
1339 	int mac_id, count;
1340 
1341 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1342 		status = dp_rx_pdev_mon_cmn_desc_pool_alloc(pdev, mac_id);
1343 		if (!QDF_IS_STATUS_SUCCESS(status)) {
1344 			dp_rx_mon_dest_err("%pK: %d failed\n",
1345 					   pdev->soc, mac_id);
1346 
1347 			for (count = 0; count < mac_id; count++)
1348 				dp_rx_pdev_mon_cmn_desc_pool_free(pdev, count);
1349 
1350 			return status;
1351 		}
1352 	}
1353 	return status;
1354 }
1355 
1356 #ifdef QCA_WIFI_MONITOR_MODE_NO_MSDU_START_TLV_SUPPORT
1357 static inline void
1358 hal_rx_populate_buf_info(struct dp_soc *soc,
1359 			 struct hal_rx_mon_dest_buf_info *buf_info,
1360 			 void *rx_desc)
1361 {
1362 	hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_desc,
1363 				      (uint8_t *)buf_info,
1364 				      sizeof(*buf_info));
1365 }
1366 
1367 static inline uint8_t
1368 hal_rx_frag_msdu_get_l2_hdr_offset(struct dp_soc *soc,
1369 				   struct hal_rx_mon_dest_buf_info *buf_info,
1370 				   void *rx_desc, bool is_first_frag)
1371 {
1372 	if (is_first_frag)
1373 		return buf_info->l2_hdr_pad;
1374 	else
1375 		return DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
1376 }
1377 #else
1378 static inline void
1379 hal_rx_populate_buf_info(struct dp_soc *soc,
1380 			 struct hal_rx_mon_dest_buf_info *buf_info,
1381 			 void *rx_desc)
1382 {
1383 	if (hal_rx_tlv_decap_format_get(soc->hal_soc, rx_desc) ==
1384 	    HAL_HW_RX_DECAP_FORMAT_RAW)
1385 		buf_info->is_decap_raw = 1;
1386 
1387 	if (hal_rx_tlv_mpdu_len_err_get(soc->hal_soc, rx_desc))
1388 		buf_info->mpdu_len_err = 1;
1389 }
1390 
1391 static inline uint8_t
1392 hal_rx_frag_msdu_get_l2_hdr_offset(struct dp_soc *soc,
1393 				   struct hal_rx_mon_dest_buf_info *buf_info,
1394 				   void *rx_desc, bool is_first_frag)
1395 {
1396 	return hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_desc);
1397 }
1398 #endif
1399 
1400 static inline
1401 void dp_rx_msdus_set_payload(struct dp_soc *soc, qdf_nbuf_t msdu,
1402 			     uint8_t l2_hdr_offset)
1403 {
1404 	uint8_t *data;
1405 	uint32_t rx_pkt_offset;
1406 
1407 	data = qdf_nbuf_data(msdu);
1408 	rx_pkt_offset = dp_rx_mon_get_rx_pkt_tlv_size(soc);
1409 	qdf_nbuf_pull_head(msdu, rx_pkt_offset + l2_hdr_offset);
1410 }
1411 
1412 static inline qdf_nbuf_t
1413 dp_rx_mon_restitch_mpdu_from_msdus(struct dp_soc *soc,
1414 				   uint32_t mac_id,
1415 				   qdf_nbuf_t head_msdu,
1416 				   qdf_nbuf_t last_msdu,
1417 				   struct cdp_mon_status *rx_status)
1418 {
1419 	qdf_nbuf_t msdu, mpdu_buf, prev_buf, msdu_orig, head_frag_list;
1420 	uint32_t wifi_hdr_len, sec_hdr_len, msdu_llc_len,
1421 		mpdu_buf_len, decap_hdr_pull_bytes, frag_list_sum_len, dir,
1422 		is_amsdu, is_first_frag, amsdu_pad;
1423 	void *rx_desc;
1424 	char *hdr_desc;
1425 	unsigned char *dest;
1426 	struct ieee80211_frame *wh;
1427 	struct ieee80211_qoscntl *qos;
1428 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1429 	struct dp_mon_pdev *mon_pdev;
1430 	struct hal_rx_mon_dest_buf_info buf_info;
1431 	uint8_t l2_hdr_offset;
1432 
1433 	head_frag_list = NULL;
1434 	mpdu_buf = NULL;
1435 
1436 	if (qdf_unlikely(!dp_pdev)) {
1437 		dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d",
1438 				     soc, mac_id);
1439 		return NULL;
1440 	}
1441 
1442 	mon_pdev = dp_pdev->monitor_pdev;
1443 
1444 	/* The nbuf has been pulled just beyond the status and points to the
1445 	 * payload
1446 	 */
1447 	if (!head_msdu)
1448 		goto mpdu_stitch_fail;
1449 
1450 	msdu_orig = head_msdu;
1451 
1452 	rx_desc = qdf_nbuf_data(msdu_orig);
1453 	qdf_mem_zero(&buf_info, sizeof(buf_info));
1454 	hal_rx_populate_buf_info(soc, &buf_info, rx_desc);
1455 
1456 	if (buf_info.mpdu_len_err) {
1457 		/* It looks like there is some issue on MPDU len err */
1458 		/* Need further investigate if drop the packet */
1459 		DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1);
1460 		return NULL;
1461 	}
1462 
1463 	rx_desc = qdf_nbuf_data(last_msdu);
1464 
1465 	rx_status->cdp_rs_fcs_err = hal_rx_tlv_mpdu_fcs_err_get(soc->hal_soc,
1466 								rx_desc);
1467 	mon_pdev->ppdu_info.rx_status.rs_fcs_err = rx_status->cdp_rs_fcs_err;
1468 
1469 	/* Fill out the rx_status from the PPDU start and end fields */
1470 	/*   HAL_RX_GET_PPDU_STATUS(soc, mac_id, rx_status); */
1471 
1472 	rx_desc = qdf_nbuf_data(head_msdu);
1473 
1474 	/* Easy case - The MSDU status indicates that this is a non-decapped
1475 	 * packet in RAW mode.
1476 	 */
1477 	if (buf_info.is_decap_raw) {
1478 		/* Note that this path might suffer from headroom unavailabilty
1479 		 * - but the RX status is usually enough
1480 		 */
1481 
1482 		l2_hdr_offset = hal_rx_frag_msdu_get_l2_hdr_offset(soc,
1483 								   &buf_info,
1484 								   rx_desc,
1485 								   true);
1486 		dp_rx_msdus_set_payload(soc, head_msdu, l2_hdr_offset);
1487 
1488 		dp_rx_mon_dest_debug("%pK: decap format raw head %pK head->next %pK last_msdu %pK last_msdu->next %pK",
1489 				     soc, head_msdu, head_msdu->next,
1490 				     last_msdu, last_msdu->next);
1491 
1492 		mpdu_buf = head_msdu;
1493 
1494 		prev_buf = mpdu_buf;
1495 
1496 		frag_list_sum_len = 0;
1497 		msdu = qdf_nbuf_next(head_msdu);
1498 		is_first_frag = 1;
1499 
1500 		while (msdu) {
1501 			l2_hdr_offset = hal_rx_frag_msdu_get_l2_hdr_offset(
1502 							soc, &buf_info,
1503 							rx_desc, false);
1504 			dp_rx_msdus_set_payload(soc, msdu, l2_hdr_offset);
1505 
1506 			if (is_first_frag) {
1507 				is_first_frag = 0;
1508 				head_frag_list  = msdu;
1509 			}
1510 
1511 			frag_list_sum_len += qdf_nbuf_len(msdu);
1512 
1513 			/* Maintain the linking of the cloned MSDUS */
1514 			qdf_nbuf_set_next_ext(prev_buf, msdu);
1515 
1516 			/* Move to the next */
1517 			prev_buf = msdu;
1518 			msdu = qdf_nbuf_next(msdu);
1519 		}
1520 
1521 		qdf_nbuf_trim_tail(prev_buf, HAL_RX_FCS_LEN);
1522 
1523 		/* If there were more fragments to this RAW frame */
1524 		if (head_frag_list) {
1525 			if (frag_list_sum_len <
1526 				sizeof(struct ieee80211_frame_min_one)) {
1527 				DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1);
1528 				return NULL;
1529 			}
1530 			frag_list_sum_len -= HAL_RX_FCS_LEN;
1531 			qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list,
1532 						 frag_list_sum_len);
1533 			qdf_nbuf_set_next(mpdu_buf, NULL);
1534 		}
1535 
1536 		goto mpdu_stitch_done;
1537 	}
1538 
1539 	/* Decap mode:
1540 	 * Calculate the amount of header in decapped packet to knock off based
1541 	 * on the decap type and the corresponding number of raw bytes to copy
1542 	 * status header
1543 	 */
1544 	rx_desc = qdf_nbuf_data(head_msdu);
1545 
1546 	hdr_desc = hal_rx_desc_get_80211_hdr(soc->hal_soc, rx_desc);
1547 
1548 	dp_rx_mon_dest_debug("%pK: decap format not raw", soc);
1549 
1550 	/* Base size */
1551 	wifi_hdr_len = sizeof(struct ieee80211_frame);
1552 	wh = (struct ieee80211_frame *)hdr_desc;
1553 
1554 	dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
1555 
1556 	if (dir == IEEE80211_FC1_DIR_DSTODS)
1557 		wifi_hdr_len += 6;
1558 
1559 	is_amsdu = 0;
1560 	if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
1561 		qos = (struct ieee80211_qoscntl *)
1562 			(hdr_desc + wifi_hdr_len);
1563 		wifi_hdr_len += 2;
1564 
1565 		is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
1566 	}
1567 
1568 	/* Calculate security header length based on 'Protected'
1569 	 * and 'EXT_IV' flag
1570 	 */
1571 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
1572 		char *iv = (char *)wh + wifi_hdr_len;
1573 
1574 		if (iv[3] & KEY_EXTIV)
1575 			sec_hdr_len = 8;
1576 		else
1577 			sec_hdr_len = 4;
1578 	} else {
1579 		sec_hdr_len = 0;
1580 	}
1581 	wifi_hdr_len += sec_hdr_len;
1582 
1583 	/* MSDU related stuff LLC - AMSDU subframe header etc */
1584 	msdu_llc_len = is_amsdu ? (14 + 8) : 8;
1585 
1586 	mpdu_buf_len = wifi_hdr_len + msdu_llc_len;
1587 
1588 	/* "Decap" header to remove from MSDU buffer */
1589 	decap_hdr_pull_bytes = 14;
1590 
1591 	/* Allocate a new nbuf for holding the 802.11 header retrieved from the
1592 	 * status of the now decapped first msdu. Leave enough headroom for
1593 	 * accommodating any radio-tap /prism like PHY header
1594 	 */
1595 	mpdu_buf = qdf_nbuf_alloc(soc->osdev,
1596 				  MAX_MONITOR_HEADER + mpdu_buf_len,
1597 				  MAX_MONITOR_HEADER, 4, FALSE);
1598 
1599 	if (!mpdu_buf)
1600 		goto mpdu_stitch_done;
1601 
1602 	/* Copy the MPDU related header and enc headers into the first buffer
1603 	 * - Note that there can be a 2 byte pad between heaader and enc header
1604 	 */
1605 
1606 	prev_buf = mpdu_buf;
1607 	dest = qdf_nbuf_put_tail(prev_buf, wifi_hdr_len);
1608 	if (!dest)
1609 		goto mpdu_stitch_fail;
1610 
1611 	qdf_mem_copy(dest, hdr_desc, wifi_hdr_len);
1612 	hdr_desc += wifi_hdr_len;
1613 
1614 #if 0
1615 	dest = qdf_nbuf_put_tail(prev_buf, sec_hdr_len);
1616 	adf_os_mem_copy(dest, hdr_desc, sec_hdr_len);
1617 	hdr_desc += sec_hdr_len;
1618 #endif
1619 
1620 	/* The first LLC len is copied into the MPDU buffer */
1621 	frag_list_sum_len = 0;
1622 
1623 	msdu_orig = head_msdu;
1624 	is_first_frag = 1;
1625 	amsdu_pad = 0;
1626 
1627 	while (msdu_orig) {
1628 
1629 		/* TODO: intra AMSDU padding - do we need it ??? */
1630 
1631 		msdu = msdu_orig;
1632 
1633 		if (is_first_frag) {
1634 			head_frag_list  = msdu;
1635 		} else {
1636 			/* Reload the hdr ptr only on non-first MSDUs */
1637 			rx_desc = qdf_nbuf_data(msdu_orig);
1638 			hdr_desc = hal_rx_desc_get_80211_hdr(soc->hal_soc,
1639 							     rx_desc);
1640 		}
1641 
1642 		/* Copy this buffers MSDU related status into the prev buffer */
1643 
1644 		if (is_first_frag)
1645 			is_first_frag = 0;
1646 
1647 		/* Update protocol and flow tag for MSDU */
1648 		dp_rx_mon_update_protocol_flow_tag(soc, dp_pdev,
1649 						   msdu_orig, rx_desc);
1650 
1651 		dest = qdf_nbuf_put_tail(prev_buf,
1652 					 msdu_llc_len + amsdu_pad);
1653 
1654 		if (!dest)
1655 			goto mpdu_stitch_fail;
1656 
1657 		dest += amsdu_pad;
1658 		qdf_mem_copy(dest, hdr_desc, msdu_llc_len);
1659 
1660 		l2_hdr_offset = hal_rx_frag_msdu_get_l2_hdr_offset(soc,
1661 								   &buf_info,
1662 								   rx_desc,
1663 								   true);
1664 		dp_rx_msdus_set_payload(soc, msdu, l2_hdr_offset);
1665 
1666 		/* Push the MSDU buffer beyond the decap header */
1667 		qdf_nbuf_pull_head(msdu, decap_hdr_pull_bytes);
1668 		frag_list_sum_len += msdu_llc_len + qdf_nbuf_len(msdu)
1669 			+ amsdu_pad;
1670 
1671 		/* Set up intra-AMSDU pad to be added to start of next buffer -
1672 		 * AMSDU pad is 4 byte pad on AMSDU subframe
1673 		 */
1674 		amsdu_pad = (msdu_llc_len + qdf_nbuf_len(msdu)) & 0x3;
1675 		amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
1676 
1677 		/* TODO FIXME How do we handle MSDUs that have fraglist - Should
1678 		 * probably iterate all the frags cloning them along the way and
1679 		 * and also updating the prev_buf pointer
1680 		 */
1681 
1682 		/* Move to the next */
1683 		prev_buf = msdu;
1684 		msdu_orig = qdf_nbuf_next(msdu_orig);
1685 	}
1686 
1687 #if 0
1688 	/* Add in the trailer section - encryption trailer + FCS */
1689 	qdf_nbuf_put_tail(prev_buf, HAL_RX_FCS_LEN);
1690 	frag_list_sum_len += HAL_RX_FCS_LEN;
1691 #endif
1692 
1693 	frag_list_sum_len -= msdu_llc_len;
1694 
1695 	/* TODO: Convert this to suitable adf routines */
1696 	qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list,
1697 				 frag_list_sum_len);
1698 
1699 	dp_rx_mon_dest_debug("%pK: mpdu_buf %pK mpdu_buf->len %u",
1700 			     soc, mpdu_buf, mpdu_buf->len);
1701 
1702 mpdu_stitch_done:
1703 	/* Check if this buffer contains the PPDU end status for TSF */
1704 	/* Need revist this code to see where we can get tsf timestamp */
1705 #if 0
1706 	/* PPDU end TLV will be retrieved from monitor status ring */
1707 	last_mpdu =
1708 		(*(((u_int32_t *)&rx_desc->attention)) &
1709 		RX_ATTENTION_0_LAST_MPDU_MASK) >>
1710 		RX_ATTENTION_0_LAST_MPDU_LSB;
1711 
1712 	if (last_mpdu)
1713 		rx_status->rs_tstamp.tsf = rx_desc->ppdu_end.tsf_timestamp;
1714 
1715 #endif
1716 	return mpdu_buf;
1717 
1718 mpdu_stitch_fail:
1719 	if ((mpdu_buf) && !buf_info.is_decap_raw) {
1720 		dp_rx_mon_dest_err("%pK: mpdu_stitch_fail mpdu_buf %pK",
1721 				   soc, mpdu_buf);
1722 		/* Free the head buffer */
1723 		qdf_nbuf_free(mpdu_buf);
1724 	}
1725 	return NULL;
1726 }
1727 
1728 #ifdef DP_RX_MON_MEM_FRAG
1729 /**
1730  * dp_rx_mon_fraglist_prepare() - Prepare nbuf fraglist from chained skb
1731  *
1732  * @head_msdu: Parent SKB
1733  * @tail_msdu: Last skb in the chained list
1734  *
1735  * Return: Void
1736  */
1737 void dp_rx_mon_fraglist_prepare(qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu)
1738 {
1739 	qdf_nbuf_t msdu, mpdu_buf, head_frag_list;
1740 	uint32_t frag_list_sum_len;
1741 
1742 	dp_err("[%s][%d] decap format raw head %pK head->next %pK last_msdu %pK last_msdu->next %pK",
1743 	       __func__, __LINE__, head_msdu, head_msdu->next,
1744 	       tail_msdu, tail_msdu->next);
1745 
1746 	/* Single skb accommodating MPDU worth Data */
1747 	if (tail_msdu == head_msdu)
1748 		return;
1749 
1750 	mpdu_buf = head_msdu;
1751 	frag_list_sum_len = 0;
1752 
1753 	msdu = qdf_nbuf_next(head_msdu);
1754 	/* msdu can't be NULL here as it is multiple skb case here */
1755 
1756 	/* Head frag list to point to second skb */
1757 	head_frag_list  = msdu;
1758 
1759 	while (msdu) {
1760 		frag_list_sum_len += qdf_nbuf_len(msdu);
1761 		msdu = qdf_nbuf_next(msdu);
1762 	}
1763 
1764 	qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list, frag_list_sum_len);
1765 
1766 	/* Make Parent skb next to NULL */
1767 	qdf_nbuf_set_next(mpdu_buf, NULL);
1768 }
1769 
1770 /**
1771  * dp_rx_mon_frag_restitch_mpdu_from_msdus() - Restitch logic to
1772  *      convert to 802.3 header and adjust frag memory pointing to
1773  *      dot3 header and payload in case of Non-Raw frame.
1774  *
1775  * @soc: struct dp_soc *
1776  * @mac_id: MAC id
1777  * @head_msdu: MPDU containing all MSDU as a frag
1778  * @tail_msdu: last skb which accommodate MPDU info
1779  * @rx_status: struct cdp_mon_status *
1780  *
1781  * Return: Adjusted nbuf containing MPDU worth info.
1782  */
1783 static inline qdf_nbuf_t
1784 dp_rx_mon_frag_restitch_mpdu_from_msdus(struct dp_soc *soc,
1785 					uint32_t mac_id,
1786 					qdf_nbuf_t head_msdu,
1787 					qdf_nbuf_t tail_msdu,
1788 					struct cdp_mon_status *rx_status)
1789 {
1790 	uint32_t wifi_hdr_len, sec_hdr_len, msdu_llc_len,
1791 		mpdu_buf_len, decap_hdr_pull_bytes, dir,
1792 		is_amsdu, amsdu_pad, frag_size, tot_msdu_len;
1793 	qdf_frag_t rx_desc, rx_src_desc, rx_dest_desc, frag_addr;
1794 	char *hdr_desc;
1795 	uint8_t num_frags, frags_iter, l2_hdr_offset;
1796 	struct ieee80211_frame *wh;
1797 	struct ieee80211_qoscntl *qos;
1798 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1799 	int16_t frag_page_offset = 0;
1800 	struct hal_rx_mon_dest_buf_info buf_info;
1801 	uint32_t pad_byte_pholder = 0;
1802 	qdf_nbuf_t msdu_curr;
1803 	uint16_t rx_mon_tlv_size = soc->rx_mon_pkt_tlv_size;
1804 	struct dp_mon_pdev *mon_pdev;
1805 
1806 	if (qdf_unlikely(!dp_pdev)) {
1807 		dp_rx_mon_dest_debug("%pK: pdev is null for mac_id = %d",
1808 				     soc, mac_id);
1809 		return NULL;
1810 	}
1811 
1812 	mon_pdev = dp_pdev->monitor_pdev;
1813 	qdf_mem_zero(&buf_info, sizeof(struct hal_rx_mon_dest_buf_info));
1814 
1815 	if (!head_msdu || !tail_msdu)
1816 		goto mpdu_stitch_fail;
1817 
1818 	rx_desc = qdf_nbuf_get_frag_addr(head_msdu, 0) - rx_mon_tlv_size;
1819 
1820 	if (hal_rx_tlv_mpdu_len_err_get(soc->hal_soc, rx_desc)) {
1821 		/* It looks like there is some issue on MPDU len err */
1822 		/* Need further investigate if drop the packet */
1823 		DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1);
1824 		return NULL;
1825 	}
1826 
1827 	/* Look for FCS error */
1828 	num_frags = qdf_nbuf_get_nr_frags(tail_msdu);
1829 	rx_desc = qdf_nbuf_get_frag_addr(tail_msdu, num_frags - 1) -
1830 				rx_mon_tlv_size;
1831 	rx_status->cdp_rs_fcs_err = hal_rx_tlv_mpdu_fcs_err_get(soc->hal_soc,
1832 								rx_desc);
1833 	mon_pdev->ppdu_info.rx_status.rs_fcs_err = rx_status->cdp_rs_fcs_err;
1834 
1835 	rx_desc = qdf_nbuf_get_frag_addr(head_msdu, 0) - rx_mon_tlv_size;
1836 	hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_desc,
1837 				      (uint8_t *)&buf_info,
1838 				      sizeof(buf_info));
1839 
1840 	/* Easy case - The MSDU status indicates that this is a non-decapped
1841 	 * packet in RAW mode.
1842 	 */
1843 	if (buf_info.is_decap_raw == 1) {
1844 		dp_rx_mon_fraglist_prepare(head_msdu, tail_msdu);
1845 		goto mpdu_stitch_done;
1846 	}
1847 
1848 	l2_hdr_offset = DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
1849 
1850 	/* Decap mode:
1851 	 * Calculate the amount of header in decapped packet to knock off based
1852 	 * on the decap type and the corresponding number of raw bytes to copy
1853 	 * status header
1854 	 */
1855 	hdr_desc = hal_rx_desc_get_80211_hdr(soc->hal_soc, rx_desc);
1856 
1857 	dp_rx_mon_dest_debug("%pK: decap format not raw", soc);
1858 
1859 	/* Base size */
1860 	wifi_hdr_len = sizeof(struct ieee80211_frame);
1861 	wh = (struct ieee80211_frame *)hdr_desc;
1862 
1863 	dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
1864 
1865 	if (dir == IEEE80211_FC1_DIR_DSTODS)
1866 		wifi_hdr_len += 6;
1867 
1868 	is_amsdu = 0;
1869 	if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
1870 		qos = (struct ieee80211_qoscntl *)
1871 			(hdr_desc + wifi_hdr_len);
1872 		wifi_hdr_len += 2;
1873 
1874 		is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
1875 	}
1876 
1877 	/*Calculate security header length based on 'Protected'
1878 	 * and 'EXT_IV' flag
1879 	 */
1880 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
1881 		char *iv = (char *)wh + wifi_hdr_len;
1882 
1883 		if (iv[3] & KEY_EXTIV)
1884 			sec_hdr_len = 8;
1885 		else
1886 			sec_hdr_len = 4;
1887 	} else {
1888 		sec_hdr_len = 0;
1889 	}
1890 	wifi_hdr_len += sec_hdr_len;
1891 
1892 	/* MSDU related stuff LLC - AMSDU subframe header etc */
1893 	msdu_llc_len = is_amsdu ? (14 + 8) : 8;
1894 
1895 	mpdu_buf_len = wifi_hdr_len + msdu_llc_len;
1896 
1897 	/* "Decap" header to remove from MSDU buffer */
1898 	decap_hdr_pull_bytes = 14;
1899 
1900 	amsdu_pad = 0;
1901 	tot_msdu_len = 0;
1902 
1903 	/*
1904 	 * keeping first MSDU ops outside of loop to avoid multiple
1905 	 * check handling
1906 	 */
1907 
1908 	/* Construct src header */
1909 	rx_src_desc = hdr_desc;
1910 
1911 	/*
1912 	 * Update protocol and flow tag for MSDU
1913 	 * update frag index in ctx_idx field.
1914 	 * Reset head pointer data of nbuf before updating.
1915 	 */
1916 	QDF_NBUF_CB_RX_CTX_ID(head_msdu) = 0;
1917 	dp_rx_mon_update_protocol_flow_tag(soc, dp_pdev, head_msdu, rx_desc);
1918 
1919 	/* Construct destination address */
1920 	frag_addr = qdf_nbuf_get_frag_addr(head_msdu, 0);
1921 	frag_size = qdf_nbuf_get_frag_size_by_idx(head_msdu, 0);
1922 	/* We will come here in 2 scenario:
1923 	 * 1. First MSDU of MPDU with single buffer
1924 	 * 2. First buffer of First MSDU of MPDU with continuation
1925 	 *
1926 	 *  ------------------------------------------------------------
1927 	 * | SINGLE BUFFER (<= RX_MONITOR_BUFFER_SIZE - RX_PKT_TLVS_LEN)|
1928 	 *  ------------------------------------------------------------
1929 	 *
1930 	 *  ------------------------------------------------------------
1931 	 * | First BUFFER with Continuation             | ...           |
1932 	 * | (RX_MONITOR_BUFFER_SIZE - RX_PKT_TLVS_LEN) |               |
1933 	 *  ------------------------------------------------------------
1934 	 */
1935 	pad_byte_pholder =
1936 		(RX_MONITOR_BUFFER_SIZE - soc->rx_pkt_tlv_size) - frag_size;
1937 	/* Construct destination address
1938 	 *  --------------------------------------------------------------
1939 	 * | RX_PKT_TLV | L2_HDR_PAD   |   Decap HDR   |      Payload     |
1940 	 * |            |                              /                  |
1941 	 * |            >Frag address points here     /                   |
1942 	 * |            \                            /                    |
1943 	 * |             \ This bytes needs to      /                     |
1944 	 * |              \  removed to frame pkt  /                      |
1945 	 * |               -----------------------                        |
1946 	 * |                                      |                       |
1947 	 * |                                      |                       |
1948 	 * |   WIFI +LLC HDR will be added here <-|                       |
1949 	 * |        |                             |                       |
1950 	 * |         >Dest addr will point        |                       |
1951 	 * |            somewhere in this area    |                       |
1952 	 *  --------------------------------------------------------------
1953 	 */
1954 	rx_dest_desc =
1955 		(frag_addr + decap_hdr_pull_bytes + l2_hdr_offset) -
1956 					mpdu_buf_len;
1957 	/* Add WIFI and LLC header for 1st MSDU of MPDU */
1958 	qdf_mem_copy(rx_dest_desc, rx_src_desc, mpdu_buf_len);
1959 
1960 	frag_page_offset =
1961 		(decap_hdr_pull_bytes + l2_hdr_offset) - mpdu_buf_len;
1962 
1963 	qdf_nbuf_move_frag_page_offset(head_msdu, 0, frag_page_offset);
1964 
1965 	frag_size = qdf_nbuf_get_frag_size_by_idx(head_msdu, 0);
1966 
1967 	if (buf_info.first_buffer && buf_info.last_buffer) {
1968 		/* MSDU with single buffer */
1969 		amsdu_pad = frag_size & 0x3;
1970 		amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
1971 		if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
1972 			char *frag_addr_temp;
1973 
1974 			qdf_nbuf_trim_add_frag_size(head_msdu, 0, amsdu_pad,
1975 						    0);
1976 			frag_addr_temp =
1977 				(char *)qdf_nbuf_get_frag_addr(head_msdu, 0);
1978 			frag_addr_temp = (frag_addr_temp +
1979 				qdf_nbuf_get_frag_size_by_idx(head_msdu, 0)) -
1980 					amsdu_pad;
1981 			qdf_mem_zero(frag_addr_temp, amsdu_pad);
1982 			amsdu_pad = 0;
1983 		}
1984 	} else {
1985 		/*
1986 		 * First buffer of Continuation frame and hence
1987 		 * amsdu_padding doesn't need to be added
1988 		 * Increase tot_msdu_len so that amsdu_pad byte
1989 		 * will be calculated for last frame of MSDU
1990 		 */
1991 		tot_msdu_len = frag_size;
1992 		amsdu_pad = 0;
1993 	}
1994 
1995 	/* Here amsdu_pad byte will have some value if 1sf buffer was
1996 	 * Single buffer MSDU and dint had pholder to adjust amsdu padding
1997 	 * byte in the end
1998 	 * So dont initialize to ZERO here
1999 	 */
2000 	pad_byte_pholder = 0;
2001 	for (msdu_curr = head_msdu; msdu_curr;) {
2002 		/* frag_iter will start from 0 for second skb onwards */
2003 		if (msdu_curr == head_msdu)
2004 			frags_iter = 1;
2005 		else
2006 			frags_iter = 0;
2007 
2008 		num_frags = qdf_nbuf_get_nr_frags(msdu_curr);
2009 
2010 		for (; frags_iter < num_frags; frags_iter++) {
2011 		/* Construct destination address
2012 		 *  ----------------------------------------------------------
2013 		 * | RX_PKT_TLV | L2_HDR_PAD   |   Decap HDR | Payload | Pad  |
2014 		 * |            | (First buffer)             |         |      |
2015 		 * |            |                            /        /       |
2016 		 * |            >Frag address points here   /        /        |
2017 		 * |            \                          /        /         |
2018 		 * |             \ This bytes needs to    /        /          |
2019 		 * |              \  removed to frame pkt/        /           |
2020 		 * |               ----------------------        /            |
2021 		 * |                                     |     /     Add      |
2022 		 * |                                     |    /   amsdu pad   |
2023 		 * |   LLC HDR will be added here      <-|    |   Byte for    |
2024 		 * |        |                            |    |   last frame  |
2025 		 * |         >Dest addr will point       |    |    if space   |
2026 		 * |            somewhere in this area   |    |    available  |
2027 		 * |  And amsdu_pad will be created if   |    |               |
2028 		 * | dint get added in last buffer       |    |               |
2029 		 * |       (First Buffer)                |    |               |
2030 		 *  ----------------------------------------------------------
2031 		 */
2032 			frag_addr =
2033 				qdf_nbuf_get_frag_addr(msdu_curr, frags_iter);
2034 			rx_desc = frag_addr - rx_mon_tlv_size;
2035 
2036 			/*
2037 			 * Update protocol and flow tag for MSDU
2038 			 * update frag index in ctx_idx field
2039 			 */
2040 			QDF_NBUF_CB_RX_CTX_ID(msdu_curr) = frags_iter;
2041 			dp_rx_mon_update_protocol_flow_tag(soc, dp_pdev,
2042 							   msdu_curr, rx_desc);
2043 
2044 			/* Read buffer info from stored data in tlvs */
2045 			hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_desc,
2046 						      (uint8_t *)&buf_info,
2047 						      sizeof(buf_info));
2048 
2049 			frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_curr,
2050 								  frags_iter);
2051 
2052 			/* If Middle buffer, dont add any header */
2053 			if ((!buf_info.first_buffer) &&
2054 			    (!buf_info.last_buffer)) {
2055 				tot_msdu_len += frag_size;
2056 				amsdu_pad = 0;
2057 				pad_byte_pholder = 0;
2058 				continue;
2059 			}
2060 
2061 			/* Calculate if current buffer has placeholder
2062 			 * to accommodate amsdu pad byte
2063 			 */
2064 			pad_byte_pholder =
2065 				(RX_MONITOR_BUFFER_SIZE - soc->rx_pkt_tlv_size)
2066 				- frag_size;
2067 			/*
2068 			 * We will come here only only three condition:
2069 			 * 1. Msdu with single Buffer
2070 			 * 2. First buffer in case MSDU is spread in multiple
2071 			 *    buffer
2072 			 * 3. Last buffer in case MSDU is spread in multiple
2073 			 *    buffer
2074 			 *
2075 			 *         First buffER | Last buffer
2076 			 * Case 1:      1       |     1
2077 			 * Case 2:      1       |     0
2078 			 * Case 3:      0       |     1
2079 			 *
2080 			 * In 3rd case only l2_hdr_padding byte will be Zero and
2081 			 * in other case, It will be 2 Bytes.
2082 			 */
2083 			if (buf_info.first_buffer)
2084 				l2_hdr_offset =
2085 					DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
2086 			else
2087 				l2_hdr_offset = DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
2088 
2089 			if (buf_info.first_buffer) {
2090 				/* Src addr from where llc header needs to be copied */
2091 				rx_src_desc =
2092 					hal_rx_desc_get_80211_hdr(soc->hal_soc,
2093 								  rx_desc);
2094 
2095 				/* Size of buffer with llc header */
2096 				frag_size = frag_size -
2097 					(l2_hdr_offset + decap_hdr_pull_bytes);
2098 				frag_size += msdu_llc_len;
2099 
2100 				/* Construct destination address */
2101 				rx_dest_desc = frag_addr +
2102 					decap_hdr_pull_bytes + l2_hdr_offset;
2103 				rx_dest_desc = rx_dest_desc - (msdu_llc_len);
2104 
2105 				qdf_mem_copy(rx_dest_desc, rx_src_desc,
2106 					     msdu_llc_len);
2107 
2108 				/*
2109 				 * Calculate new page offset and create hole
2110 				 * if amsdu_pad required.
2111 				 */
2112 				frag_page_offset = l2_hdr_offset +
2113 						decap_hdr_pull_bytes;
2114 				frag_page_offset = frag_page_offset -
2115 						(msdu_llc_len + amsdu_pad);
2116 
2117 				qdf_nbuf_move_frag_page_offset(msdu_curr,
2118 							       frags_iter,
2119 							       frag_page_offset);
2120 
2121 				tot_msdu_len = frag_size;
2122 				/*
2123 				 * No amsdu padding required for first frame of
2124 				 * continuation buffer
2125 				 */
2126 				if (!buf_info.last_buffer) {
2127 					amsdu_pad = 0;
2128 					continue;
2129 				}
2130 			} else {
2131 				tot_msdu_len += frag_size;
2132 			}
2133 
2134 			/* Will reach to this place in only two case:
2135 			 * 1. Single buffer MSDU
2136 			 * 2. Last buffer of MSDU in case of multiple buf MSDU
2137 			 */
2138 
2139 			/* Check size of buffer if amsdu padding required */
2140 			amsdu_pad = tot_msdu_len & 0x3;
2141 			amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
2142 
2143 			/* Create placeholder if current buffer can
2144 			 * accommodate padding.
2145 			 */
2146 			if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
2147 				char *frag_addr_temp;
2148 
2149 				qdf_nbuf_trim_add_frag_size(msdu_curr,
2150 							    frags_iter,
2151 							    amsdu_pad, 0);
2152 				frag_addr_temp = (char *)qdf_nbuf_get_frag_addr(msdu_curr,
2153 										frags_iter);
2154 				frag_addr_temp = (frag_addr_temp +
2155 					qdf_nbuf_get_frag_size_by_idx(msdu_curr, frags_iter)) -
2156 					amsdu_pad;
2157 				qdf_mem_zero(frag_addr_temp, amsdu_pad);
2158 				amsdu_pad = 0;
2159 			}
2160 
2161 			/* reset tot_msdu_len */
2162 			tot_msdu_len = 0;
2163 		}
2164 		msdu_curr = qdf_nbuf_next(msdu_curr);
2165 	}
2166 
2167 	dp_rx_mon_fraglist_prepare(head_msdu, tail_msdu);
2168 
2169 	dp_rx_mon_dest_debug("%pK: head_msdu %pK head_msdu->len %u",
2170 			     soc, head_msdu, head_msdu->len);
2171 
2172 mpdu_stitch_done:
2173 	return head_msdu;
2174 
2175 mpdu_stitch_fail:
2176 	dp_rx_mon_dest_err("%pK: mpdu_stitch_fail head_msdu %pK",
2177 			   soc, head_msdu);
2178 	return NULL;
2179 }
2180 #endif
2181 
2182 #ifdef DP_RX_MON_MEM_FRAG
2183 qdf_nbuf_t dp_rx_mon_restitch_mpdu(struct dp_soc *soc, uint32_t mac_id,
2184 				   qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu,
2185 				   struct cdp_mon_status *rs)
2186 {
2187 	if (qdf_nbuf_get_nr_frags(head_msdu))
2188 		return dp_rx_mon_frag_restitch_mpdu_from_msdus(soc, mac_id,
2189 							       head_msdu,
2190 							       tail_msdu, rs);
2191 	else
2192 		return dp_rx_mon_restitch_mpdu_from_msdus(soc, mac_id,
2193 							  head_msdu,
2194 							  tail_msdu, rs);
2195 }
2196 #else
2197 qdf_nbuf_t dp_rx_mon_restitch_mpdu(struct dp_soc *soc, uint32_t mac_id,
2198 				   qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu,
2199 				   struct cdp_mon_status *rs)
2200 {
2201 	return dp_rx_mon_restitch_mpdu_from_msdus(soc, mac_id, head_msdu,
2202 						  tail_msdu, rs);
2203 }
2204 #endif
2205 
2206 #ifdef DP_RX_MON_MEM_FRAG
2207 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
2208 	defined(WLAN_SUPPORT_RX_FLOW_TAG)
2209 void dp_rx_mon_update_pf_tag_to_buf_headroom(struct dp_soc *soc,
2210 					     qdf_nbuf_t nbuf)
2211 {
2212 	qdf_nbuf_t ext_list;
2213 
2214 	if (qdf_unlikely(!soc)) {
2215 		dp_err("Soc[%pK] Null. Can't update pftag to nbuf headroom\n",
2216 		       soc);
2217 		qdf_assert_always(0);
2218 	}
2219 
2220 	if (!wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx))
2221 		return;
2222 
2223 	if (qdf_unlikely(!nbuf))
2224 		return;
2225 
2226 	/* Return if it dint came from mon Path */
2227 	if (!qdf_nbuf_get_nr_frags(nbuf))
2228 		return;
2229 
2230 	/* Headroom must be double of PF_TAG_SIZE as we copy it 1stly to head */
2231 	if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < (DP_RX_MON_TOT_PF_TAG_LEN * 2))) {
2232 		dp_err("Nbuf avail Headroom[%d] < 2 * DP_RX_MON_PF_TAG_TOT_LEN[%lu]",
2233 		       qdf_nbuf_headroom(nbuf), DP_RX_MON_TOT_PF_TAG_LEN);
2234 		return;
2235 	}
2236 
2237 	qdf_nbuf_push_head(nbuf, DP_RX_MON_TOT_PF_TAG_LEN);
2238 	qdf_mem_copy(qdf_nbuf_data(nbuf), qdf_nbuf_head(nbuf),
2239 		     DP_RX_MON_TOT_PF_TAG_LEN);
2240 	qdf_nbuf_pull_head(nbuf, DP_RX_MON_TOT_PF_TAG_LEN);
2241 
2242 	ext_list = qdf_nbuf_get_ext_list(nbuf);
2243 	while (ext_list) {
2244 		/* Headroom must be double of PF_TAG_SIZE
2245 		 * as we copy it 1stly to head
2246 		 */
2247 		if (qdf_unlikely(qdf_nbuf_headroom(ext_list) < (DP_RX_MON_TOT_PF_TAG_LEN * 2))) {
2248 			dp_err("Fraglist Nbuf avail Headroom[%d] < 2 * DP_RX_MON_PF_TAG_TOT_LEN[%lu]",
2249 			       qdf_nbuf_headroom(ext_list),
2250 			       DP_RX_MON_TOT_PF_TAG_LEN);
2251 			ext_list = qdf_nbuf_queue_next(ext_list);
2252 			continue;
2253 		}
2254 		qdf_nbuf_push_head(ext_list, DP_RX_MON_TOT_PF_TAG_LEN);
2255 		qdf_mem_copy(qdf_nbuf_data(ext_list), qdf_nbuf_head(ext_list),
2256 			     DP_RX_MON_TOT_PF_TAG_LEN);
2257 		qdf_nbuf_pull_head(ext_list, DP_RX_MON_TOT_PF_TAG_LEN);
2258 		ext_list = qdf_nbuf_queue_next(ext_list);
2259 	}
2260 }
2261 #endif
2262 #endif
2263 
2264 #ifdef QCA_MONITOR_PKT_SUPPORT
2265 QDF_STATUS dp_mon_htt_dest_srng_setup(struct dp_soc *soc,
2266 				      struct dp_pdev *pdev,
2267 				      int mac_id,
2268 				      int mac_for_pdev)
2269 {
2270 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2271 
2272 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
2273 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
2274 					soc->rxdma_mon_buf_ring[mac_id]
2275 					.hal_srng,
2276 					RXDMA_MONITOR_BUF);
2277 
2278 		if (status != QDF_STATUS_SUCCESS) {
2279 			dp_mon_err("Failed to send htt srng setup message for Rxdma mon buf ring");
2280 			return status;
2281 		}
2282 
2283 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
2284 					soc->rxdma_mon_dst_ring[mac_id]
2285 					.hal_srng,
2286 					RXDMA_MONITOR_DST);
2287 
2288 		if (status != QDF_STATUS_SUCCESS) {
2289 			dp_mon_err("Failed to send htt srng setup message for Rxdma mon dst ring");
2290 			return status;
2291 		}
2292 
2293 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
2294 					soc->rxdma_mon_desc_ring[mac_id]
2295 					.hal_srng,
2296 					RXDMA_MONITOR_DESC);
2297 
2298 		if (status != QDF_STATUS_SUCCESS) {
2299 			dp_mon_err("Failed to send htt srng message for Rxdma mon desc ring");
2300 			return status;
2301 		}
2302 	}
2303 
2304 	return status;
2305 }
2306 #endif /* QCA_MONITOR_PKT_SUPPORT */
2307 
2308 #ifdef QCA_MONITOR_PKT_SUPPORT
2309 void dp_mon_dest_rings_deinit(struct dp_pdev *pdev, int lmac_id)
2310 {
2311 	struct dp_soc *soc = pdev->soc;
2312 
2313 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
2314 		dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[lmac_id],
2315 			       RXDMA_MONITOR_BUF, 0);
2316 		dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id],
2317 			       RXDMA_MONITOR_DST, 0);
2318 		dp_srng_deinit(soc, &soc->rxdma_mon_desc_ring[lmac_id],
2319 			       RXDMA_MONITOR_DESC, 0);
2320 	}
2321 }
2322 
2323 void dp_mon_dest_rings_free(struct dp_pdev *pdev, int lmac_id)
2324 {
2325 	struct dp_soc *soc = pdev->soc;
2326 
2327 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
2328 		dp_srng_free(soc, &soc->rxdma_mon_buf_ring[lmac_id]);
2329 		dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]);
2330 		dp_srng_free(soc, &soc->rxdma_mon_desc_ring[lmac_id]);
2331 	}
2332 }
2333 
2334 QDF_STATUS dp_mon_dest_rings_init(struct dp_pdev *pdev, int lmac_id)
2335 {
2336 	struct dp_soc *soc = pdev->soc;
2337 
2338 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
2339 		if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[lmac_id],
2340 				 RXDMA_MONITOR_BUF, 0, lmac_id)) {
2341 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_buf_ring ", soc);
2342 			goto fail1;
2343 		}
2344 
2345 		if (dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id],
2346 				 RXDMA_MONITOR_DST, 0, lmac_id)) {
2347 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
2348 			goto fail1;
2349 		}
2350 
2351 		if (dp_srng_init(soc, &soc->rxdma_mon_desc_ring[lmac_id],
2352 				 RXDMA_MONITOR_DESC, 0, lmac_id)) {
2353 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_desc_ring", soc);
2354 			goto fail1;
2355 		}
2356 	}
2357 	return QDF_STATUS_SUCCESS;
2358 
2359 fail1:
2360 	return QDF_STATUS_E_NOMEM;
2361 }
2362 
2363 QDF_STATUS dp_mon_dest_rings_alloc(struct dp_pdev *pdev, int lmac_id)
2364 {
2365 	int entries;
2366 	struct dp_soc *soc = pdev->soc;
2367 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2368 
2369 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
2370 		entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
2371 		if (dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[lmac_id],
2372 				  RXDMA_MONITOR_BUF, entries, 0)) {
2373 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_buf_ring ", soc);
2374 			goto fail1;
2375 		}
2376 		entries = wlan_cfg_get_dma_rx_mon_dest_ring_size(pdev_cfg_ctx);
2377 		if (dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id],
2378 				  RXDMA_MONITOR_DST, entries, 0)) {
2379 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
2380 			goto fail1;
2381 		}
2382 		entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
2383 		if (dp_srng_alloc(soc, &soc->rxdma_mon_desc_ring[lmac_id],
2384 				  RXDMA_MONITOR_DESC, entries, 0)) {
2385 			dp_mon_err("%pK: " RNG_ERR "rxdma_mon_desc_ring", soc);
2386 			goto fail1;
2387 		}
2388 	}
2389 	return QDF_STATUS_SUCCESS;
2390 
2391 fail1:
2392 	return QDF_STATUS_E_NOMEM;
2393 }
2394 #endif
2395