xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/2.0/dp_rx_mon_2.0.c (revision fb436899e24ed79fc745209e906f95145a787017)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "qdf_types.h"
19 #include "hal_be_hw_headers.h"
20 #include "dp_types.h"
21 #include "hal_be_rx.h"
22 #include "hal_api.h"
23 #include "qdf_trace.h"
24 #include "hal_be_api_mon.h"
25 #include "dp_internal.h"
26 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
27 #include <qdf_flex_mem.h>
28 #include "qdf_nbuf_frag.h"
29 #include "dp_mon.h"
30 #include <dp_rx_mon.h>
31 #include <dp_mon_2.0.h>
32 #include <dp_rx_mon.h>
33 #include <dp_rx_mon_2.0.h>
34 #include <dp_rx.h>
35 #include <dp_be.h>
36 #include <hal_be_api_mon.h>
37 #ifdef QCA_SUPPORT_LITE_MONITOR
38 #include "dp_lite_mon.h"
39 #endif
40 
41 #define F_MASK 0xFFFF
42 #define TEST_MASK 0xCBF
43 
44 /**
45  * dp_rx_mon_free_mpdu_queue() - Free MPDU queue
46  * @mon_pdev: monitor pdev
47  * @ppdu_info: PPDU info
48  *
49  * Return: Void
50  */
51 
52 static void dp_rx_mon_free_mpdu_queue(struct dp_mon_pdev *mon_pdev,
53 				      struct hal_rx_ppdu_info *ppdu_info)
54 {
55 	uint8_t user;
56 	qdf_nbuf_t mpdu;
57 
58 	for (user = 0; user < HAL_MAX_UL_MU_USERS; user++) {
59 		if (!qdf_nbuf_is_queue_empty(&ppdu_info->mpdu_q[user])) {
60 			while ((mpdu = qdf_nbuf_queue_remove(&ppdu_info->mpdu_q[user])) != NULL)
61 				dp_mon_free_parent_nbuf(mon_pdev, mpdu);
62 		}
63 	}
64 }
65 
66 /**
67  * dp_rx_mon_update_drop_cnt() - Update drop statistics
68  *
69  * @mon_pdev: monitor pdev
70  * @hal_mon_rx_desc: HAL monitor desc
71  *
72  * Return: void
73  */
74 static inline void
75 dp_rx_mon_update_drop_cnt(struct dp_mon_pdev *mon_pdev,
76 			  struct hal_mon_desc *hal_mon_rx_desc)
77 {
78 	mon_pdev->rx_mon_stats.empty_desc_ppdu++;
79 	mon_pdev->rx_mon_stats.ppdu_drop_cnt +=
80 		hal_mon_rx_desc->ppdu_drop_count;
81 	mon_pdev->rx_mon_stats.mpdu_drop_cnt +=
82 		hal_mon_rx_desc->mpdu_drop_count;
83 	if (hal_mon_rx_desc->end_of_ppdu_dropped)
84 		mon_pdev->rx_mon_stats.end_of_ppdu_drop_cnt++;
85 	mon_pdev->rx_mon_stats.tlv_drop_cnt +=
86 		hal_mon_rx_desc->tlv_drop_count;
87 }
88 
89 static
90 void dp_rx_mon_set_zero(qdf_nbuf_t nbuf)
91 {
92 	qdf_mem_zero(qdf_nbuf_head(nbuf), DP_RX_MON_TLV_ROOM);
93 }
94 
95 #ifdef QCA_KMEM_CACHE_SUPPORT
96 /**
97  * dp_rx_mon_get_ppdu_info() - Get PPDU info from freelist
98  *
99  * @mon_pdev: monitor pdev
100  *
101  * Return: ppdu_info
102  */
103 struct hal_rx_ppdu_info*
104 dp_rx_mon_get_ppdu_info(struct dp_mon_pdev *mon_pdev)
105 {
106 	struct dp_mon_pdev_be *mon_pdev_be =
107 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
108 	struct hal_rx_ppdu_info *ppdu_info, *temp_ppdu_info;
109 
110 	qdf_spin_lock_bh(&mon_pdev_be->ppdu_info_lock);
111 	TAILQ_FOREACH_SAFE(ppdu_info,
112 			   &mon_pdev_be->rx_mon_free_queue,
113 			   ppdu_free_list_elem,
114 			   temp_ppdu_info) {
115 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_free_queue,
116 			     ppdu_info, ppdu_free_list_elem);
117 
118 		if (ppdu_info) {
119 			mon_pdev_be->total_free_elem--;
120 			break;
121 		}
122 	}
123 	qdf_spin_unlock_bh(&mon_pdev_be->ppdu_info_lock);
124 
125 	return ppdu_info;
126 }
127 
128 void
129 __dp_rx_mon_free_ppdu_info(struct dp_mon_pdev *mon_pdev,
130 			   struct hal_rx_ppdu_info *ppdu_info)
131 {
132 	struct dp_mon_pdev_be *mon_pdev_be =
133 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
134 
135 	qdf_spin_lock_bh(&mon_pdev_be->ppdu_info_lock);
136 	if (ppdu_info) {
137 		TAILQ_INSERT_TAIL(&mon_pdev_be->rx_mon_free_queue, ppdu_info,
138 				  ppdu_free_list_elem);
139 		mon_pdev_be->total_free_elem++;
140 	}
141 	qdf_spin_unlock_bh(&mon_pdev_be->ppdu_info_lock);
142 }
143 
144 /**
145  * dp_rx_mon_free_ppdu_info() - Free PPDU info
146  * @pdev: DP pdev
147  * @ppdu_info: PPDU info
148  *
149  * Return: Void
150  */
151 void
152 dp_rx_mon_free_ppdu_info(struct dp_pdev *pdev,
153 			 struct hal_rx_ppdu_info *ppdu_info)
154 {
155 	struct dp_mon_pdev *mon_pdev;
156 
157 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
158 	dp_rx_mon_free_mpdu_queue(mon_pdev, ppdu_info);
159 	__dp_rx_mon_free_ppdu_info(mon_pdev, ppdu_info);
160 }
161 #endif
162 
163 /**
164  * dp_rx_mon_nbuf_add_rx_frag() -  Add frag to SKB
165  *
166  * @nbuf: SKB to which frag is going to be added
167  * @frag: frag to be added to SKB
168  * @frag_len: frag length
169  * @offset: frag offset
170  * @buf_size: buffer size
171  * @frag_ref: take frag ref
172  *
173  * Return: QDF_STATUS
174  */
175 static inline QDF_STATUS
176 dp_rx_mon_nbuf_add_rx_frag(qdf_nbuf_t nbuf, qdf_frag_t *frag,
177 			   uint16_t frag_len, uint16_t offset,
178 			   uint16_t buf_size, bool frag_ref)
179 {
180 	uint8_t num_frags;
181 
182 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
183 	if (num_frags < QDF_NBUF_MAX_FRAGS) {
184 		qdf_nbuf_add_rx_frag(frag, nbuf,
185 				     offset,
186 				     frag_len,
187 				     buf_size,
188 				     frag_ref);
189 		return QDF_STATUS_SUCCESS;
190 	}
191 	return QDF_STATUS_E_FAILURE;
192 }
193 
194 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\
195 	defined(WLAN_SUPPORT_RX_TAG_STATISTICS)
196 /** dp_mon_rx_update_rx_protocol_tag_stats() - Update mon protocols's
197  *					      statistics
198  * @pdev: pdev handle
199  * @protocol_index: Protocol index for which the stats should be incremented
200  * @ring_index: REO ring number from which this tag was received.
201  *
202  * Return: void
203  */
204 static void dp_mon_rx_update_rx_protocol_tag_stats(struct dp_pdev *pdev,
205 						   uint16_t protocol_index)
206 {
207 	pdev->mon_proto_tag_stats[protocol_index].tag_ctr++;
208 }
209 
210 #ifdef QCA_TEST_MON_PF_TAGS_STATS
211 
212 static
213 void dp_rx_mon_print_tag_buf(uint8_t *buf, uint16_t room)
214 {
215 	print_hex_dump(KERN_ERR, "TLV BUFFER: ", DUMP_PREFIX_NONE,
216 		       32, 2, buf, room, false);
217 }
218 
219 #else
220 static
221 void dp_rx_mon_print_tag_buf(uint8_t *buf, uint16_t room)
222 {
223 }
224 #endif
225 
226 void
227 dp_rx_mon_shift_pf_tag_in_headroom(qdf_nbuf_t nbuf, struct dp_soc *soc,
228 				   struct hal_rx_ppdu_info *ppdu_info)
229 {
230 	uint32_t room = 0;
231 	uint16_t msdu_count = 0;
232 	uint16_t *dp = NULL;
233 	uint16_t *hp = NULL;
234 	uint16_t tlv_data_len, total_tlv_len;
235 	uint32_t bytes = 0;
236 
237 	if (qdf_unlikely(!soc)) {
238 		dp_mon_err("Soc[%pK] Null. Can't update pftag to nbuf headroom",
239 			   soc);
240 		qdf_assert_always(0);
241 	}
242 
243 	if (!wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx))
244 		return;
245 
246 	if (qdf_unlikely(!nbuf))
247 		return;
248 
249 	/* Headroom must be have enough space for tlv to be added*/
250 	if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < DP_RX_MON_TLV_ROOM)) {
251 		dp_mon_err("Headroom[%d] < DP_RX_MON_TLV_ROOM[%d]",
252 			   qdf_nbuf_headroom(nbuf), DP_RX_MON_TLV_ROOM);
253 		return;
254 	}
255 
256 	hp = (uint16_t *)qdf_nbuf_head(nbuf);
257 	msdu_count = *hp;
258 
259 	if (qdf_unlikely(!msdu_count))
260 		return;
261 
262 	dp_mon_debug("msdu_count: %d", msdu_count);
263 
264 	room = DP_RX_MON_PF_TAG_LEN_PER_FRAG * msdu_count;
265 	tlv_data_len = DP_RX_MON_TLV_MSDU_CNT + (room);
266 	total_tlv_len = DP_RX_MON_TLV_HDR_LEN + tlv_data_len;
267 
268 	//1. store space for MARKER
269 	dp = (uint16_t *)qdf_nbuf_push_head(nbuf, sizeof(uint16_t));
270 	if (qdf_likely(dp)) {
271 		*(uint16_t *)dp = DP_RX_MON_TLV_HDR_MARKER;
272 		bytes += sizeof(uint16_t);
273 	}
274 
275 	//2. store space for total size
276 	dp = (uint16_t *)qdf_nbuf_push_head(nbuf, sizeof(uint16_t));
277 	if (qdf_likely(dp)) {
278 		*(uint16_t *)dp = total_tlv_len;
279 		bytes += sizeof(uint16_t);
280 	}
281 
282 	//create TLV
283 	bytes += dp_mon_rx_add_tlv(DP_RX_MON_TLV_PF_ID, tlv_data_len, hp, nbuf);
284 
285 	dp_rx_mon_print_tag_buf(qdf_nbuf_data(nbuf), total_tlv_len);
286 
287 	qdf_nbuf_pull_head(nbuf, bytes);
288 
289 }
290 
291 void
292 dp_rx_mon_pf_tag_to_buf_headroom_2_0(void *nbuf,
293 				     struct hal_rx_ppdu_info *ppdu_info,
294 				     struct dp_pdev *pdev, struct dp_soc *soc)
295 {
296 	uint8_t *nbuf_head = NULL;
297 	uint8_t user_id;
298 	struct hal_rx_mon_msdu_info *msdu_info;
299 	uint16_t flow_id;
300 	uint16_t cce_metadata;
301 	uint16_t protocol_tag = 0;
302 	uint32_t flow_tag;
303 	uint8_t invalid_cce = 0, invalid_fse = 0;
304 
305 	if (qdf_unlikely(!soc)) {
306 		dp_mon_err("Soc[%pK] Null. Can't update pftag to nbuf headroom",
307 			   soc);
308 		qdf_assert_always(0);
309 	}
310 
311 	if (!wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx))
312 		return;
313 
314 	if (qdf_unlikely(!nbuf))
315 		return;
316 
317 	/* Headroom must be have enough space for tlv to be added*/
318 	if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < DP_RX_MON_TLV_ROOM)) {
319 		dp_mon_err("Headroom[%d] < DP_RX_MON_TLV_ROOM[%d]",
320 			   qdf_nbuf_headroom(nbuf), DP_RX_MON_TLV_ROOM);
321 		return;
322 	}
323 
324 	user_id = ppdu_info->user_id;
325 	if (qdf_unlikely(user_id > HAL_MAX_UL_MU_USERS)) {
326 		dp_mon_debug("Invalid user_id user_id: %d pdev: %pK", user_id, pdev);
327 		return;
328 	}
329 
330 	msdu_info = &ppdu_info->msdu[user_id];
331 	flow_id = ppdu_info->rx_msdu_info[user_id].flow_idx;
332 	cce_metadata = ppdu_info->rx_msdu_info[user_id].cce_metadata -
333 		       RX_PROTOCOL_TAG_START_OFFSET;
334 
335 	flow_tag = ppdu_info->rx_msdu_info[user_id].fse_metadata & F_MASK;
336 
337 	if (qdf_unlikely((cce_metadata > RX_PROTOCOL_TAG_MAX - 1) ||
338 			 (cce_metadata > 0 && cce_metadata < 4))) {
339 		dp_mon_debug("Invalid user_id cce_metadata: %d pdev: %pK", cce_metadata, pdev);
340 		invalid_cce = 1;
341 		protocol_tag = cce_metadata;
342 	} else {
343 		protocol_tag = pdev->rx_proto_tag_map[cce_metadata].tag;
344 		dp_mon_rx_update_rx_protocol_tag_stats(pdev, cce_metadata);
345 	}
346 
347 	if (flow_tag > 0) {
348 		dp_mon_rx_update_rx_flow_tag_stats(pdev, flow_id);
349 	} else {
350 		dp_mon_debug("Invalid flow_tag: %d pdev: %pK ", flow_tag, pdev);
351 		invalid_fse = 1;
352 	}
353 
354 	if (invalid_cce && invalid_fse)
355 		return;
356 
357 	if (msdu_info->msdu_index >= DP_RX_MON_MAX_MSDU) {
358 		dp_mon_err("msdu_index causes overflow in headroom");
359 		return;
360 	}
361 
362 	dp_mon_debug("protocol_tag: %d, cce_metadata: %d, flow_tag: %d",
363 		     protocol_tag, cce_metadata, flow_tag);
364 
365 	dp_mon_debug("msdu_index: %d", msdu_info->msdu_index);
366 
367 
368 	nbuf_head = qdf_nbuf_head(nbuf);
369 
370 	*((uint16_t *)nbuf_head) = msdu_info->msdu_index + 1;
371 	nbuf_head += DP_RX_MON_TLV_MSDU_CNT;
372 
373 	nbuf_head += ((msdu_info->msdu_index) * DP_RX_MON_PF_TAG_SIZE);
374 	if (!invalid_cce)
375 		*((uint16_t *)nbuf_head) = protocol_tag;
376 	nbuf_head += sizeof(uint16_t);
377 	if (!invalid_fse)
378 		*((uint16_t *)nbuf_head) = flow_tag;
379 }
380 
381 #else
382 static inline void
383 dp_mon_rx_update_rx_protocol_tag_stats(struct dp_pdev *pdev,
384 				       uint16_t protocol_index)
385 {
386 }
387 
388 static inline
389 void dp_rx_mon_shift_pf_tag_in_headroom(qdf_nbuf_t nbuf, struct dp_soc *soc,
390 					struct hal_rx_ppdu_info *ppdu_info)
391 {
392 }
393 
394 static inline
395 void dp_rx_mon_pf_tag_to_buf_headroom_2_0(void *nbuf,
396 					  struct hal_rx_ppdu_info *ppdu_info,
397 					  struct dp_pdev *pdev,
398 					  struct dp_soc *soc)
399 {
400 }
401 
402 #endif
403 
404 #ifdef MONITOR_TLV_RECORDING_ENABLE
405 /**
406  * dp_mon_record_index_update() - update the indexes of dp_mon_tlv_logger
407  *                                 to store next tlv
408  *
409  * @mon_pdev_be: pointer to dp_mon_pdev_be
410  *
411  * Return
412  */
413 void
414 dp_mon_record_index_update(struct dp_mon_pdev_be *mon_pdev_be) {
415 	struct dp_mon_tlv_logger *tlv_log = NULL;
416 	struct dp_mon_tlv_info *tlv_info = NULL;
417 
418 	if (!mon_pdev_be || !(mon_pdev_be->rx_tlv_log))
419 		return;
420 
421 	tlv_log = mon_pdev_be->rx_tlv_log;
422 	if (!tlv_log->tlv_logging_enable || !(tlv_log->buff))
423 		return;
424 
425 	tlv_info = (struct dp_mon_tlv_info *)tlv_log->buff;
426 
427 	(tlv_log->curr_ppdu_pos + 1 == MAX_NUM_PPDU_RECORD) ?
428 		tlv_log->curr_ppdu_pos = 0 :
429 			tlv_log->curr_ppdu_pos++;
430 
431 	tlv_log->wrap_flag = 0;
432 	tlv_log->ppdu_start_idx = tlv_log->curr_ppdu_pos *
433 		MAX_TLVS_PER_PPDU;
434 	tlv_log->mpdu_idx = tlv_log->ppdu_start_idx +
435 		MAX_PPDU_START_TLV_NUM;
436 	tlv_log->ppdu_end_idx = tlv_log->mpdu_idx + MAX_MPDU_TLV_NUM;
437 	tlv_log->max_ppdu_start_idx = tlv_log->ppdu_start_idx +
438 		MAX_PPDU_START_TLV_NUM - 1;
439 	tlv_log->max_mpdu_idx = tlv_log->mpdu_idx +
440 		MAX_MPDU_TLV_NUM - 1;
441 	tlv_log->max_ppdu_end_idx = tlv_log->ppdu_end_idx +
442 		MAX_PPDU_END_TLV_NUM - 1;
443 }
444 
445 /**
446  * dp_mon_record_tlv() - Store the contents of the tlv in buffer
447  *
448  * @mon_pdev_be: pointe to dp_mon_pdev_be
449  * @ppdu_info: struct hal_rx_ppdu_info
450  *
451  * Return
452  */
453 void
454 dp_mon_record_tlv(struct dp_mon_pdev_be *mon_pdev_be,
455 		  struct hal_rx_ppdu_info *ppdu_info) {
456 	struct dp_mon_tlv_logger *tlv_log = NULL;
457 	struct dp_mon_tlv_info *tlv_info = NULL;
458 	uint32_t tlv_tag;
459 	uint16_t *ppdu_start_idx = NULL;
460 	uint16_t *mpdu_idx = NULL;
461 	uint16_t *ppdu_end_idx = NULL;
462 
463 	if (!mon_pdev_be || !(mon_pdev_be->rx_tlv_log))
464 		return;
465 
466 	tlv_log = mon_pdev_be->rx_tlv_log;
467 	if (!tlv_log->tlv_logging_enable || !(tlv_log->buff))
468 		return;
469 
470 	tlv_info = (struct dp_mon_tlv_info *)tlv_log->buff;
471 	ppdu_start_idx = &tlv_log->ppdu_start_idx;
472 	mpdu_idx = &tlv_log->mpdu_idx;
473 	ppdu_end_idx = &tlv_log->ppdu_end_idx;
474 
475 	tlv_tag = ppdu_info->rx_tlv_info.tlv_tag;
476 	if (ppdu_info->rx_tlv_info.tlv_category == CATEGORY_PPDU_START) {
477 		tlv_info[*ppdu_start_idx].tlv_tag = tlv_tag;
478 		switch (tlv_tag) {
479 		case WIFIRX_PPDU_START_E:
480 			tlv_info[*ppdu_start_idx].
481 				data.ppdu_start.ppdu_id =
482 					ppdu_info->com_info.ppdu_id;
483 			break;
484 		case WIFIRX_PPDU_START_USER_INFO_E:
485 			tlv_info[*ppdu_start_idx].
486 				data.ppdu_start_user_info.user_id =
487 					ppdu_info->user_id;
488 			tlv_info[*ppdu_start_idx].
489 				data.ppdu_start_user_info.rate_mcs =
490 					ppdu_info->rx_status.mcs;
491 			tlv_info[*ppdu_start_idx].
492 				data.ppdu_start_user_info.nss =
493 					ppdu_info->rx_status.nss;
494 			tlv_info[*ppdu_start_idx].
495 				data.ppdu_start_user_info.reception_type =
496 					ppdu_info->rx_status.reception_type;
497 			tlv_info[*ppdu_start_idx].
498 				data.ppdu_start_user_info.sgi =
499 					ppdu_info->rx_status.sgi;
500 			break;
501 		}
502 		if (*ppdu_start_idx < tlv_log->max_ppdu_start_idx)
503 			(*ppdu_start_idx)++;
504 	} else if (ppdu_info->rx_tlv_info.tlv_category == CATEGORY_MPDU) {
505 		tlv_info[*mpdu_idx].tlv_tag = tlv_tag;
506 		switch (tlv_tag) {
507 		case WIFIRX_MPDU_START_E:
508 			tlv_info[*mpdu_idx].
509 				data.mpdu_start.user_id =
510 					ppdu_info->user_id;
511 			tlv_info[*mpdu_idx].
512 				data.mpdu_start.wrap_flag =
513 					tlv_log->wrap_flag;
514 			break;
515 		case WIFIRX_MPDU_END_E:
516 			tlv_info[*mpdu_idx].
517 				data.mpdu_end.user_id =
518 					ppdu_info->user_id;
519 			tlv_info[*mpdu_idx].
520 				data.mpdu_end.fcs_err =
521 					ppdu_info->fcs_err;
522 			tlv_info[*mpdu_idx].
523 				data.mpdu_end.wrap_flag =
524 					tlv_log->wrap_flag;
525 			break;
526 		case WIFIRX_HEADER_E:
527 			tlv_info[*mpdu_idx].
528 				data.header.wrap_flag =
529 					tlv_log->wrap_flag;
530 			break;
531 		case WIFIRX_MSDU_END_E:
532 			tlv_info[*mpdu_idx].
533 				data.msdu_end.user_id =
534 					ppdu_info->user_id;
535 			tlv_info[*mpdu_idx].
536 				data.msdu_end.wrap_flag =
537 					tlv_log->wrap_flag;
538 			break;
539 		case WIFIMON_BUFFER_ADDR_E:
540 			tlv_info[*mpdu_idx].
541 				data.mon_buffer_addr.dma_length =
542 					ppdu_info->packet_info.dma_length;
543 			tlv_info[*mpdu_idx].
544 				data.mon_buffer_addr.truncation =
545 					ppdu_info->packet_info.truncated;
546 			tlv_info[*mpdu_idx].
547 				data.mon_buffer_addr.continuation =
548 					ppdu_info->packet_info.msdu_continuation;
549 			tlv_info[*mpdu_idx].
550 				data.mon_buffer_addr.wrap_flag =
551 					tlv_log->wrap_flag;
552 			break;
553 		}
554 		if (*mpdu_idx < tlv_log->max_mpdu_idx) {
555 			(*mpdu_idx)++;
556 		} else {
557 			*mpdu_idx = *mpdu_idx - MAX_MPDU_TLV_NUM + 1;
558 			tlv_log->wrap_flag ^= 1;
559 		}
560 	} else if (ppdu_info->rx_tlv_info.tlv_category == CATEGORY_PPDU_END) {
561 		tlv_info[*ppdu_end_idx].tlv_tag = tlv_tag;
562 		switch (tlv_tag) {
563 		case WIFIRX_USER_PPDU_END_E:
564 			break;
565 		case WIFIRX_PPDU_END_E:
566 			break;
567 		case WIFIPHYRX_RSSI_LEGACY_E:
568 			break;
569 		case WIFIPHYRX_L_SIG_B_E:
570 			break;
571 		case WIFIPHYRX_COMMON_USER_INFO_E:
572 			break;
573 		case WIFIPHYRX_DATA_DONE_E:
574 			break;
575 		case WIFIPHYRX_PKT_END_PART1_E:
576 			break;
577 		case WIFIPHYRX_PKT_END_E:
578 			break;
579 		case WIFIRXPCU_PPDU_END_INFO_E:
580 			break;
581 		case WIFIRX_PPDU_END_USER_STATS_E:
582 			break;
583 		case WIFIRX_PPDU_END_STATUS_DONE_E:
584 			break;
585 		}
586 		if (*ppdu_end_idx < tlv_log->max_ppdu_end_idx)
587 			(*ppdu_end_idx)++;
588 	}
589 }
590 
591 /**
592  * dp_mon_record_clear_buffer() - Clear the buffer to record next PPDU
593  *
594  * @mon_pdev_be: pointer to dp_mon_pdev_be
595  *
596  * Return
597  */
598 void
599 dp_mon_record_clear_buffer(struct dp_mon_pdev_be *mon_pdev_be) {
600 	struct dp_mon_tlv_logger *tlv_log = NULL;
601 	struct dp_mon_tlv_info *tlv_info = NULL;
602 
603 	if (!mon_pdev_be || !(mon_pdev_be->rx_tlv_log))
604 		return;
605 
606 	tlv_log = mon_pdev_be->rx_tlv_log;
607 	if (!tlv_log->tlv_logging_enable || !(tlv_log->buff))
608 		return;
609 
610 	tlv_info = (struct dp_mon_tlv_info *)tlv_log->buff;
611 	qdf_mem_zero(&tlv_info[tlv_log->ppdu_start_idx],
612 		     MAX_TLVS_PER_PPDU * sizeof(struct dp_mon_tlv_info));
613 }
614 
615 #else
616 
617 void
618 dp_mon_record_index_update(struct dp_mon_pdev_be *mon_pdev_be) {
619 }
620 
621 void
622 dp_mon_record_tlv(struct dp_mon_pdev_be *mon_pdev_be,
623 		  struct hal_rx_ppdu_info *ppdu_info) {
624 }
625 
626 void
627 dp_mon_record_clear_buffer(struct dp_mon_pdev_be *mon_pdev_be) {
628 }
629 
630 #endif
631 
632 /**
633  * dp_mon_free_parent_nbuf() - Free parent SKB
634  *
635  * @mon_pdev: monitor pdev
636  * @nbuf: SKB to be freed
637  *
638  * Return: void
639  */
640 void dp_mon_free_parent_nbuf(struct dp_mon_pdev *mon_pdev,
641 			     qdf_nbuf_t nbuf)
642 {
643 	mon_pdev->rx_mon_stats.parent_buf_free++;
644 	qdf_nbuf_free(nbuf);
645 }
646 
647 void dp_rx_mon_drain_wq(struct dp_pdev *pdev)
648 {
649 	struct dp_mon_pdev *mon_pdev;
650 	struct hal_rx_ppdu_info *ppdu_info = NULL;
651 	struct hal_rx_ppdu_info *temp_ppdu_info = NULL;
652 	struct dp_mon_pdev_be *mon_pdev_be;
653 
654 	if (qdf_unlikely(!pdev)) {
655 		dp_mon_debug("Pdev is NULL");
656 		return;
657 	}
658 
659 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
660 	if (qdf_unlikely(!mon_pdev)) {
661 		dp_mon_debug("monitor pdev is NULL");
662 		return;
663 	}
664 
665 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
666 
667 	qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
668 	TAILQ_FOREACH_SAFE(ppdu_info,
669 			   &mon_pdev_be->rx_mon_queue,
670 			   ppdu_list_elem,
671 			   temp_ppdu_info) {
672 		mon_pdev_be->rx_mon_queue_depth--;
673 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_queue,
674 			     ppdu_info, ppdu_list_elem);
675 
676 		dp_rx_mon_free_ppdu_info(pdev, ppdu_info);
677 	}
678 	qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
679 }
680 
681 /**
682  * dp_rx_mon_deliver_mpdu() - Deliver MPDU to osif layer
683  *
684  * @mon_pdev: monitor pdev
685  * @mpdu: MPDU nbuf
686  * @rx_status: monitor status
687  *
688  * Return: QDF_STATUS
689  */
690 static QDF_STATUS
691 dp_rx_mon_deliver_mpdu(struct dp_mon_pdev *mon_pdev,
692 		       qdf_nbuf_t mpdu,
693 		       struct mon_rx_status *rx_status)
694 {
695 	qdf_nbuf_t nbuf;
696 
697 	if (mon_pdev->mvdev && mon_pdev->mvdev->monitor_vdev->osif_rx_mon) {
698 		mon_pdev->rx_mon_stats.mpdus_buf_to_stack++;
699 		nbuf = qdf_nbuf_get_ext_list(mpdu);
700 
701 		while (nbuf) {
702 			mon_pdev->rx_mon_stats.mpdus_buf_to_stack++;
703 			nbuf = nbuf->next;
704 		}
705 		mon_pdev->mvdev->monitor_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev,
706 							   mpdu,
707 							   rx_status);
708 	} else {
709 		return QDF_STATUS_E_FAILURE;
710 	}
711 
712 	return QDF_STATUS_SUCCESS;
713 }
714 
715 /**
716  * dp_rx_mon_process_ppdu_info() - Process PPDU info
717  * @pdev: DP pdev
718  * @ppdu_info: PPDU info
719  *
720  * Return: Void
721  */
722 static void
723 dp_rx_mon_process_ppdu_info(struct dp_pdev *pdev,
724 			    struct hal_rx_ppdu_info *ppdu_info)
725 {
726 	struct dp_mon_pdev *mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
727 	uint8_t user;
728 	qdf_nbuf_t mpdu;
729 
730 	if (!ppdu_info)
731 		return;
732 
733 	for (user = 0; user < ppdu_info->com_info.num_users; user++) {
734 		uint16_t mpdu_count;
735 		uint16_t mpdu_idx;
736 		struct hal_rx_mon_mpdu_info *mpdu_meta;
737 		QDF_STATUS status;
738 
739 		if (user >= HAL_MAX_UL_MU_USERS) {
740 			dp_mon_err("num user exceeds max limit");
741 			return;
742 		}
743 
744 		mpdu_count  = ppdu_info->mpdu_count[user];
745 		ppdu_info->rx_status.rx_user_status =
746 					&ppdu_info->rx_user_status[user];
747 		for (mpdu_idx = 0; mpdu_idx < mpdu_count; mpdu_idx++) {
748 			mpdu = qdf_nbuf_queue_remove(&ppdu_info->mpdu_q[user]);
749 
750 			if (!mpdu)
751 				continue;
752 
753 			mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(mpdu);
754 
755 			if (dp_lite_mon_is_rx_enabled(mon_pdev)) {
756 				status = dp_lite_mon_rx_mpdu_process(pdev, ppdu_info,
757 								     mpdu, mpdu_idx, user);
758 				if (status != QDF_STATUS_SUCCESS) {
759 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
760 					continue;
761 				}
762 			} else {
763 				if (mpdu_meta->full_pkt) {
764 					if (qdf_unlikely(mpdu_meta->truncated)) {
765 						dp_mon_free_parent_nbuf(mon_pdev, mpdu);
766 						continue;
767 					}
768 
769 					status = dp_rx_mon_handle_full_mon(pdev,
770 									   ppdu_info, mpdu);
771 					if (status != QDF_STATUS_SUCCESS) {
772 						dp_mon_free_parent_nbuf(mon_pdev, mpdu);
773 						continue;
774 					}
775 				} else {
776 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
777 					continue;
778 				}
779 
780 				/* reset mpdu metadata and apply radiotap header over MPDU */
781 				qdf_mem_zero(mpdu_meta, sizeof(struct hal_rx_mon_mpdu_info));
782 				if (!qdf_nbuf_update_radiotap(&ppdu_info->rx_status,
783 							      mpdu,
784 							      qdf_nbuf_headroom(mpdu))) {
785 					dp_mon_err("failed to update radiotap pdev: %pK",
786 						   pdev);
787 				}
788 
789 				dp_rx_mon_shift_pf_tag_in_headroom(mpdu,
790 								   pdev->soc,
791 								   ppdu_info);
792 
793 				dp_rx_mon_process_dest_pktlog(pdev->soc,
794 							      pdev->pdev_id,
795 							      mpdu);
796 				/* Deliver MPDU to osif layer */
797 				status = dp_rx_mon_deliver_mpdu(mon_pdev,
798 								mpdu,
799 								&ppdu_info->rx_status);
800 				if (status != QDF_STATUS_SUCCESS)
801 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
802 			}
803 		}
804 	}
805 
806 	dp_rx_mon_free_mpdu_queue(mon_pdev, ppdu_info);
807 }
808 
809 /**
810  * dp_rx_mon_process_ppdu()-  Deferred monitor processing
811  * This workqueue API handles:
812  * a. Full monitor
813  * b. Lite monitor
814  *
815  * @context: Opaque work context
816  *
817  * Return: none
818  */
819 void dp_rx_mon_process_ppdu(void *context)
820 {
821 	struct dp_pdev *pdev = (struct dp_pdev *)context;
822 	struct dp_mon_pdev *mon_pdev;
823 	struct hal_rx_ppdu_info *ppdu_info = NULL;
824 	struct hal_rx_ppdu_info *temp_ppdu_info = NULL;
825 	struct dp_mon_pdev_be *mon_pdev_be;
826 
827 	if (qdf_unlikely(!pdev)) {
828 		dp_mon_debug("Pdev is NULL");
829 		return;
830 	}
831 
832 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
833 	if (qdf_unlikely(!mon_pdev)) {
834 		dp_mon_debug("monitor pdev is NULL");
835 		return;
836 	}
837 
838 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
839 
840 	qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
841 	TAILQ_FOREACH_SAFE(ppdu_info,
842 			   &mon_pdev_be->rx_mon_queue,
843 			   ppdu_list_elem, temp_ppdu_info) {
844 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_queue,
845 			     ppdu_info, ppdu_list_elem);
846 
847 		mon_pdev_be->rx_mon_queue_depth--;
848 		dp_rx_mon_process_ppdu_info(pdev, ppdu_info);
849 		__dp_rx_mon_free_ppdu_info(mon_pdev, ppdu_info);
850 	}
851 	qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
852 }
853 
854 /**
855  * dp_rx_mon_add_ppdu_info_to_wq() - Add PPDU info to workqueue
856  *
857  * @pdev: monitor pdev
858  * @ppdu_info: ppdu info to be added to workqueue
859  *
860  * Return: SUCCESS or FAILIRE
861  */
862 
863 static QDF_STATUS
864 dp_rx_mon_add_ppdu_info_to_wq(struct dp_pdev *pdev,
865 			      struct hal_rx_ppdu_info *ppdu_info)
866 {
867 	struct dp_mon_pdev *mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
868 	struct dp_mon_pdev_be *mon_pdev_be =
869 		dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
870 
871 	/* Full monitor or lite monitor mode is not enabled, return */
872 	if (!mon_pdev->monitor_configured &&
873 	    !dp_lite_mon_is_rx_enabled(mon_pdev))
874 		return QDF_STATUS_E_FAILURE;
875 
876 	if (qdf_likely(ppdu_info)) {
877 		if (mon_pdev_be->rx_mon_queue_depth < DP_RX_MON_WQ_THRESHOLD) {
878 			qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
879 			TAILQ_INSERT_TAIL(&mon_pdev_be->rx_mon_queue,
880 					  ppdu_info, ppdu_list_elem);
881 			mon_pdev_be->rx_mon_queue_depth++;
882 			mon_pdev->rx_mon_stats.total_ppdu_info_enq++;
883 		} else {
884 			mon_pdev->rx_mon_stats.total_ppdu_info_drop++;
885 			dp_rx_mon_free_ppdu_info(pdev, ppdu_info);
886 		}
887 		qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
888 
889 		if (mon_pdev_be->rx_mon_queue_depth > DP_MON_QUEUE_DEPTH_MAX) {
890 			qdf_queue_work(0, mon_pdev_be->rx_mon_workqueue,
891 				       &mon_pdev_be->rx_mon_work);
892 		}
893 	}
894 	return QDF_STATUS_SUCCESS;
895 }
896 
897 QDF_STATUS
898 dp_rx_mon_handle_full_mon(struct dp_pdev *pdev,
899 			  struct hal_rx_ppdu_info *ppdu_info,
900 			  qdf_nbuf_t mpdu)
901 {
902 	uint32_t wifi_hdr_len, sec_hdr_len, msdu_llc_len,
903 		 mpdu_buf_len, decap_hdr_pull_bytes, dir,
904 		 is_amsdu, amsdu_pad, frag_size, tot_msdu_len;
905 	struct hal_rx_mon_mpdu_info *mpdu_meta;
906 	struct hal_rx_mon_msdu_info *msdu_meta;
907 	char *hdr_desc;
908 	uint8_t num_frags, frag_iter, l2_hdr_offset;
909 	struct ieee80211_frame *wh;
910 	struct ieee80211_qoscntl *qos;
911 	uint32_t hdr_frag_size, frag_page_offset, pad_byte_pholder;
912 	qdf_nbuf_t head_msdu, msdu_cur;
913 	void *frag_addr;
914 	bool prev_msdu_end_received = false;
915 	bool is_nbuf_head = true;
916 
917 	/***************************************************************************
918 	 *********************** Non-raw packet ************************************
919 	 ---------------------------------------------------------------------------
920 	 |      | frag-0   | frag-1    | frag - 2 | frag - 3  | frag - 4 | frag - 5  |
921 	 | skb  | rx_hdr-1 | rx_msdu-1 | rx_hdr-2 | rx_msdu-2 | rx_hdr-3 | rx-msdu-3 |
922 	 ---------------------------------------------------------------------------
923 	 **************************************************************************/
924 
925 	if (!mpdu) {
926 		dp_mon_debug("nbuf is NULL, return");
927 		return QDF_STATUS_E_FAILURE;
928 	}
929 
930 	head_msdu = mpdu;
931 
932 	mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(mpdu);
933 
934 	if (mpdu_meta->decap_type == HAL_HW_RX_DECAP_FORMAT_RAW) {
935 		qdf_nbuf_trim_add_frag_size(mpdu,
936 					    qdf_nbuf_get_nr_frags(mpdu) - 1,
937 					    -HAL_RX_FCS_LEN, 0);
938 		return QDF_STATUS_SUCCESS;
939 	}
940 
941 	num_frags = qdf_nbuf_get_nr_frags(mpdu);
942 	if (qdf_unlikely(num_frags < DP_MON_MIN_FRAGS_FOR_RESTITCH)) {
943 		dp_mon_debug("not enough frags(%d) for restitch", num_frags);
944 		return QDF_STATUS_E_FAILURE;
945 	}
946 
947 	l2_hdr_offset = DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
948 
949 	/* hdr_desc points to 80211 hdr */
950 	hdr_desc = qdf_nbuf_get_frag_addr(mpdu, 0);
951 
952 	/* Calculate Base header size */
953 	wifi_hdr_len = sizeof(struct ieee80211_frame);
954 	wh = (struct ieee80211_frame *)hdr_desc;
955 
956 	dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
957 
958 	if (dir == IEEE80211_FC1_DIR_DSTODS)
959 		wifi_hdr_len += 6;
960 
961 	is_amsdu = 0;
962 	if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
963 		qos = (struct ieee80211_qoscntl *)
964 			(hdr_desc + wifi_hdr_len);
965 		wifi_hdr_len += 2;
966 
967 		is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
968 	}
969 
970 	/*Calculate security header length based on 'Protected'
971 	 * and 'EXT_IV' flag
972 	 */
973 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
974 		char *iv = (char *)wh + wifi_hdr_len;
975 
976 		if (iv[3] & KEY_EXTIV)
977 			sec_hdr_len = 8;
978 		else
979 			sec_hdr_len = 4;
980 	} else {
981 		sec_hdr_len = 0;
982 	}
983 	wifi_hdr_len += sec_hdr_len;
984 
985 	/* MSDU related stuff LLC - AMSDU subframe header etc */
986 	msdu_llc_len = is_amsdu ? (DP_RX_MON_DECAP_HDR_SIZE +
987 				   DP_RX_MON_LLC_SIZE +
988 				   DP_RX_MON_SNAP_SIZE) :
989 				   (DP_RX_MON_LLC_SIZE + DP_RX_MON_SNAP_SIZE);
990 
991 	mpdu_buf_len = wifi_hdr_len + msdu_llc_len;
992 
993 	/* "Decap" header to remove from MSDU buffer */
994 	decap_hdr_pull_bytes = DP_RX_MON_DECAP_HDR_SIZE;
995 
996 	amsdu_pad = 0;
997 	tot_msdu_len = 0;
998 	tot_msdu_len = 0;
999 
1000 	/*
1001 	 * Update protocol and flow tag for MSDU
1002 	 * update frag index in ctx_idx field.
1003 	 * Reset head pointer data of nbuf before updating.
1004 	 */
1005 	QDF_NBUF_CB_RX_CTX_ID(mpdu) = 0;
1006 
1007 	/* Construct destination address */
1008 	hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(mpdu, 0);
1009 
1010 	/* Adjust page frag offset to point to 802.11 header */
1011 	if (hdr_frag_size > mpdu_buf_len)
1012 		qdf_nbuf_trim_add_frag_size(head_msdu, 0, -(hdr_frag_size - mpdu_buf_len), 0);
1013 
1014 	msdu_meta = (struct hal_rx_mon_msdu_info *)(((void *)qdf_nbuf_get_frag_addr(mpdu, 1)) - DP_RX_MON_PACKET_OFFSET);
1015 
1016 	frag_size = qdf_nbuf_get_frag_size_by_idx(head_msdu, 1);
1017 	pad_byte_pholder =
1018 		RX_MONITOR_BUFFER_SIZE - (frag_size + DP_RX_MON_PACKET_OFFSET);
1019 
1020 	/* Adjust page frag offset to appropriate after decap header */
1021 	frag_page_offset =
1022 		decap_hdr_pull_bytes + l2_hdr_offset;
1023 	qdf_nbuf_move_frag_page_offset(head_msdu, 1, frag_page_offset);
1024 	frag_size = frag_size - frag_page_offset;
1025 
1026 	if (msdu_meta->first_buffer && msdu_meta->last_buffer) {
1027 		/* MSDU with single buffer */
1028 		amsdu_pad = frag_size & 0x3;
1029 		amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
1030 		if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
1031 			char *frag_addr_temp;
1032 
1033 			qdf_nbuf_trim_add_frag_size(mpdu, 1, amsdu_pad, 0);
1034 			frag_addr_temp =
1035 				(char *)qdf_nbuf_get_frag_addr(mpdu, 1);
1036 			frag_addr_temp = (frag_addr_temp +
1037 					  qdf_nbuf_get_frag_size_by_idx(mpdu, 1)) -
1038 				amsdu_pad;
1039 			qdf_mem_zero(frag_addr_temp, amsdu_pad);
1040 			amsdu_pad = 0;
1041 		}
1042 	} else {
1043 		tot_msdu_len = frag_size;
1044 		amsdu_pad = 0;
1045 	}
1046 
1047 	pad_byte_pholder = 0;
1048 	for (msdu_cur = mpdu; msdu_cur;) {
1049 		/* frag_iter will start from 0 for second skb onwards */
1050 		if (msdu_cur == mpdu)
1051 			frag_iter = 2;
1052 		else
1053 			frag_iter = 0;
1054 
1055 		num_frags = qdf_nbuf_get_nr_frags(msdu_cur);
1056 
1057 		for (; frag_iter < num_frags; frag_iter++) {
1058 			/* Construct destination address
1059 			 *  ----------------------------------------------------------
1060 			 * |            | L2_HDR_PAD   |   Decap HDR | Payload | Pad  |
1061 			 * |            | (First buffer)             |         |      |
1062 			 * |            |                            /        /       |
1063 			 * |            >Frag address points here   /        /        |
1064 			 * |            \                          /        /         |
1065 			 * |             \ This bytes needs to    /        /          |
1066 			 * |              \  removed to frame pkt/        /           |
1067 			 * |               ----------------------        /            |
1068 			 * |                                     |     /     Add      |
1069 			 * |                                     |    /   amsdu pad   |
1070 			 * |   LLC HDR will be added here      <-|    |   Byte for    |
1071 			 * |        |                            |    |   last frame  |
1072 			 * |         >Dest addr will point       |    |    if space   |
1073 			 * |            somewhere in this area   |    |    available  |
1074 			 * |  And amsdu_pad will be created if   |    |               |
1075 			 * | dint get added in last buffer       |    |               |
1076 			 * |       (First Buffer)                |    |               |
1077 			 *  ----------------------------------------------------------
1078 			 */
1079 			/* If previous msdu end has received, modify next frag's offset to point to LLC */
1080 			if (prev_msdu_end_received) {
1081 				hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter);
1082 				/* Adjust page frag offset to point to llc/snap header */
1083 				if (hdr_frag_size > msdu_llc_len)
1084 					qdf_nbuf_trim_add_frag_size(msdu_cur, frag_iter, -(hdr_frag_size - msdu_llc_len), 0);
1085 				prev_msdu_end_received = false;
1086 				continue;
1087 			}
1088 
1089 			frag_addr =
1090 				qdf_nbuf_get_frag_addr(msdu_cur, frag_iter) -
1091 						       DP_RX_MON_PACKET_OFFSET;
1092 			msdu_meta = (struct hal_rx_mon_msdu_info *)frag_addr;
1093 
1094 			/*
1095 			 * Update protocol and flow tag for MSDU
1096 			 * update frag index in ctx_idx field
1097 			 */
1098 			QDF_NBUF_CB_RX_CTX_ID(msdu_cur) = frag_iter;
1099 
1100 			frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur,
1101 					frag_iter);
1102 
1103 			/* If Middle buffer, dont add any header */
1104 			if ((!msdu_meta->first_buffer) &&
1105 					(!msdu_meta->last_buffer)) {
1106 				tot_msdu_len += frag_size;
1107 				amsdu_pad = 0;
1108 				pad_byte_pholder = 0;
1109 				continue;
1110 			}
1111 
1112 			/* Calculate if current buffer has placeholder
1113 			 * to accommodate amsdu pad byte
1114 			 */
1115 			pad_byte_pholder =
1116 				RX_MONITOR_BUFFER_SIZE - (frag_size + DP_RX_MON_PACKET_OFFSET);
1117 			/*
1118 			 * We will come here only only three condition:
1119 			 * 1. Msdu with single Buffer
1120 			 * 2. First buffer in case MSDU is spread in multiple
1121 			 *    buffer
1122 			 * 3. Last buffer in case MSDU is spread in multiple
1123 			 *    buffer
1124 			 *
1125 			 *         First buffER | Last buffer
1126 			 * Case 1:      1       |     1
1127 			 * Case 2:      1       |     0
1128 			 * Case 3:      0       |     1
1129 			 *
1130 			 * In 3rd case only l2_hdr_padding byte will be Zero and
1131 			 * in other case, It will be 2 Bytes.
1132 			 */
1133 			if (msdu_meta->first_buffer)
1134 				l2_hdr_offset =
1135 					DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
1136 			else
1137 				l2_hdr_offset = DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
1138 
1139 			if (msdu_meta->first_buffer) {
1140 				/* Adjust page frag offset to point to 802.11 header */
1141 				hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter-1);
1142 				if (hdr_frag_size > (msdu_llc_len + amsdu_pad))
1143 					qdf_nbuf_trim_add_frag_size(msdu_cur, frag_iter - 1, -(hdr_frag_size - (msdu_llc_len + amsdu_pad)), 0);
1144 
1145 				/* Adjust page frag offset to appropriate after decap header */
1146 				frag_page_offset =
1147 					(decap_hdr_pull_bytes + l2_hdr_offset);
1148 				if (frag_size > (decap_hdr_pull_bytes + l2_hdr_offset)) {
1149 					qdf_nbuf_move_frag_page_offset(msdu_cur, frag_iter, frag_page_offset);
1150 					frag_size = frag_size - (l2_hdr_offset + decap_hdr_pull_bytes);
1151 				}
1152 
1153 
1154 				/*
1155 				 * Calculate new page offset and create hole
1156 				 * if amsdu_pad required.
1157 				 */
1158 				tot_msdu_len = frag_size;
1159 				/*
1160 				 * No amsdu padding required for first frame of
1161 				 * continuation buffer
1162 				 */
1163 				if (!msdu_meta->last_buffer) {
1164 					amsdu_pad = 0;
1165 					continue;
1166 				}
1167 			} else {
1168 				tot_msdu_len += frag_size;
1169 			}
1170 
1171 			/* Will reach to this place in only two case:
1172 			 * 1. Single buffer MSDU
1173 			 * 2. Last buffer of MSDU in case of multiple buf MSDU
1174 			 */
1175 
1176 			/* This flag is used to identify msdu boundary */
1177 			prev_msdu_end_received = true;
1178 			/* Check size of buffer if amsdu padding required */
1179 			amsdu_pad = tot_msdu_len & 0x3;
1180 			amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
1181 
1182 			/* Create placeholder if current buffer can
1183 			 * accommodate padding.
1184 			 */
1185 			if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
1186 				char *frag_addr_temp;
1187 
1188 				qdf_nbuf_trim_add_frag_size(msdu_cur,
1189 						frag_iter,
1190 						amsdu_pad, 0);
1191 				frag_addr_temp = (char *)qdf_nbuf_get_frag_addr(msdu_cur,
1192 						frag_iter);
1193 				frag_addr_temp = (frag_addr_temp +
1194 						qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter)) -
1195 					amsdu_pad;
1196 				qdf_mem_zero(frag_addr_temp, amsdu_pad);
1197 				amsdu_pad = 0;
1198 			}
1199 
1200 			/* reset tot_msdu_len */
1201 			tot_msdu_len = 0;
1202 		}
1203 		if (is_nbuf_head) {
1204 			msdu_cur = qdf_nbuf_get_ext_list(msdu_cur);
1205 			is_nbuf_head = false;
1206 		} else {
1207 			msdu_cur = qdf_nbuf_queue_next(msdu_cur);
1208 		}
1209 	}
1210 
1211 	return QDF_STATUS_SUCCESS;
1212 }
1213 
1214 static inline int
1215 dp_rx_mon_flush_packet_tlv(struct dp_pdev *pdev, void *buf, uint16_t end_offset,
1216 			   union dp_mon_desc_list_elem_t **desc_list,
1217 			   union dp_mon_desc_list_elem_t **tail)
1218 {
1219 	struct dp_soc *soc = pdev->soc;
1220 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1221 	struct dp_mon_pdev_be *mon_pdev_be =
1222 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1223 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1224 	uint16_t work_done = 0;
1225 	qdf_frag_t addr;
1226 	uint8_t *rx_tlv;
1227 	uint8_t *rx_tlv_start;
1228 	uint16_t tlv_status = HAL_TLV_STATUS_BUF_DONE;
1229 	struct hal_rx_ppdu_info *ppdu_info;
1230 
1231 	if (!buf)
1232 		return work_done;
1233 
1234 	ppdu_info = &mon_pdev->ppdu_info;
1235 	if (!ppdu_info) {
1236 		dp_mon_debug("ppdu_info malloc failed pdev: %pK", pdev);
1237 		return work_done;
1238 	}
1239 	qdf_mem_zero(ppdu_info, sizeof(struct hal_rx_ppdu_info));
1240 	rx_tlv = buf;
1241 	rx_tlv_start = buf;
1242 
1243 	do {
1244 		tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
1245 							ppdu_info,
1246 							pdev->soc->hal_soc,
1247 							buf);
1248 
1249 		if (tlv_status == HAL_TLV_STATUS_MON_BUF_ADDR) {
1250 			struct dp_mon_desc *mon_desc = (struct dp_mon_desc *)(uintptr_t)ppdu_info->packet_info.sw_cookie;
1251 
1252 			qdf_assert_always(mon_desc);
1253 
1254 			/* WAR: sometimes duplicate pkt desc are received
1255 			 * from HW, this check gracefully handles
1256 			 * such cases.
1257 			 */
1258 			if ((mon_desc == mon_pdev_be->prev_rxmon_pkt_desc) &&
1259 			    (mon_desc->cookie ==
1260 			     mon_pdev_be->prev_rxmon_pkt_cookie)) {
1261 				dp_mon_err("duplicate pkt desc found mon_pdev: %pK mon_desc: %pK cookie: %d",
1262 					   mon_pdev, mon_desc,
1263 					   mon_desc->cookie);
1264 				mon_pdev->rx_mon_stats.dup_mon_buf_cnt++;
1265 				goto end;
1266 			}
1267 			mon_pdev_be->prev_rxmon_pkt_desc = mon_desc;
1268 			mon_pdev_be->prev_rxmon_pkt_cookie = mon_desc->cookie;
1269 
1270 			addr = mon_desc->buf_addr;
1271 
1272 			if (!mon_desc->unmapped) {
1273 				qdf_mem_unmap_page(soc->osdev,
1274 						   (qdf_dma_addr_t)mon_desc->paddr,
1275 						   DP_MON_DATA_BUFFER_SIZE,
1276 						   QDF_DMA_FROM_DEVICE);
1277 				mon_desc->unmapped = 1;
1278 			}
1279 			dp_mon_add_to_free_desc_list(desc_list, tail, mon_desc);
1280 			work_done++;
1281 
1282 			if (addr) {
1283 				qdf_frag_free(addr);
1284 				DP_STATS_INC(mon_soc, frag_free, 1);
1285 			}
1286 		}
1287 
1288 end:
1289 		rx_tlv = hal_rx_status_get_next_tlv(rx_tlv, 1);
1290 
1291 		if ((rx_tlv - rx_tlv_start) >= (end_offset + 1))
1292 			break;
1293 
1294 	} while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) ||
1295 		 (tlv_status == HAL_TLV_STATUS_HEADER) ||
1296 		 (tlv_status == HAL_TLV_STATUS_MPDU_END) ||
1297 		 (tlv_status == HAL_TLV_STATUS_MSDU_END) ||
1298 		 (tlv_status == HAL_TLV_STATUS_MON_BUF_ADDR) ||
1299 		 (tlv_status == HAL_TLV_STATUS_MPDU_START));
1300 
1301 	return work_done;
1302 }
1303 
1304 /**
1305  * dp_rx_mon_flush_status_buf_queue() - Flush status buffer queue
1306  *
1307  * @pdev: DP pdev handle
1308  *
1309  *Return: void
1310  */
1311 static inline void
1312 dp_rx_mon_flush_status_buf_queue(struct dp_pdev *pdev)
1313 {
1314 	struct dp_soc *soc = pdev->soc;
1315 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1316 	struct dp_mon_pdev_be *mon_pdev_be =
1317 		dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1318 	union dp_mon_desc_list_elem_t *desc_list = NULL;
1319 	union dp_mon_desc_list_elem_t *tail = NULL;
1320 	struct dp_mon_desc *mon_desc;
1321 	uint16_t idx;
1322 	void *buf;
1323 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1324 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1325 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1326 	uint16_t work_done = 0;
1327 	uint16_t status_buf_count;
1328 	uint16_t end_offset = 0;
1329 
1330 	if (!mon_pdev_be->desc_count) {
1331 		dp_mon_info("no of status buffer count is zero: %pK", pdev);
1332 		return;
1333 	}
1334 
1335 	status_buf_count = mon_pdev_be->desc_count;
1336 	for (idx = 0; idx < status_buf_count; idx++) {
1337 		mon_desc = mon_pdev_be->status[idx];
1338 		if (!mon_desc) {
1339 			qdf_assert_always(0);
1340 			return;
1341 		}
1342 
1343 		buf = mon_desc->buf_addr;
1344 		end_offset = mon_desc->end_offset;
1345 
1346 		dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
1347 		work_done++;
1348 
1349 		work_done += dp_rx_mon_flush_packet_tlv(pdev, buf, end_offset,
1350 							&desc_list, &tail);
1351 
1352 		/* set status buffer pointer to NULL */
1353 		mon_pdev_be->status[idx] = NULL;
1354 		mon_pdev_be->desc_count--;
1355 
1356 		qdf_frag_free(buf);
1357 		DP_STATS_INC(mon_soc, frag_free, 1);
1358 	}
1359 	mon_pdev_be->prev_rxmon_pkt_desc = NULL;
1360 	mon_pdev_be->prev_rxmon_pkt_cookie = 0;
1361 
1362 	if (work_done) {
1363 		mon_pdev->rx_mon_stats.mon_rx_bufs_replenished_dest +=
1364 			work_done;
1365 		if (desc_list)
1366 			dp_mon_add_desc_list_to_free_list(soc,
1367 							  &desc_list, &tail,
1368 							  rx_mon_desc_pool);
1369 	}
1370 }
1371 
1372 /**
1373  * dp_rx_mon_handle_flush_n_trucated_ppdu() - Handle flush and truncated ppdu
1374  *
1375  * @soc: DP soc handle
1376  * @pdev: pdev handle
1377  * @mon_desc: mon sw desc
1378  */
1379 static inline void
1380 dp_rx_mon_handle_flush_n_trucated_ppdu(struct dp_soc *soc,
1381 				       struct dp_pdev *pdev,
1382 				       struct dp_mon_desc *mon_desc)
1383 {
1384 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1385 	struct dp_mon_pdev_be *mon_pdev_be =
1386 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1387 	union dp_mon_desc_list_elem_t *desc_list = NULL;
1388 	union dp_mon_desc_list_elem_t *tail = NULL;
1389 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1390 	struct dp_mon_soc_be *mon_soc_be =
1391 			dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1392 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1393 	uint16_t work_done;
1394 	void *buf;
1395 	uint16_t end_offset = 0;
1396 
1397 	/* Flush status buffers in queue */
1398 	dp_rx_mon_flush_status_buf_queue(pdev);
1399 	buf = mon_desc->buf_addr;
1400 	end_offset = mon_desc->end_offset;
1401 	dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
1402 	work_done = 1;
1403 	work_done += dp_rx_mon_flush_packet_tlv(pdev, buf, end_offset,
1404 						&desc_list, &tail);
1405 	if (buf) {
1406 		qdf_frag_free(buf);
1407 		DP_STATS_INC(mon_soc, frag_free, 1);
1408 	}
1409 
1410 	mon_pdev_be->prev_rxmon_desc = NULL;
1411 	mon_pdev_be->prev_rxmon_cookie = 0;
1412 
1413 	if (desc_list)
1414 		dp_mon_add_desc_list_to_free_list(soc, &desc_list, &tail,
1415 						  rx_mon_desc_pool);
1416 }
1417 
1418 void dp_rx_mon_append_nbuf(qdf_nbuf_t nbuf, qdf_nbuf_t tmp_nbuf)
1419 {
1420 	qdf_nbuf_t last_nbuf;
1421 
1422 	/*
1423 	 * If nbuf does not have fraglist, then append tmp_nbuf as fraglist,
1424 	 * else append tmp_nbuf as next of last_nbuf present in nbuf fraglist.
1425 	 */
1426 	if (!qdf_nbuf_has_fraglist(nbuf))
1427 		qdf_nbuf_append_ext_list(nbuf, tmp_nbuf,
1428 					 qdf_nbuf_len(tmp_nbuf));
1429 	else {
1430 		last_nbuf = qdf_nbuf_get_last_frag_list_nbuf(nbuf);
1431 
1432 		if (qdf_likely(last_nbuf))
1433 			qdf_nbuf_set_next(last_nbuf, tmp_nbuf);
1434 	}
1435 }
1436 
1437 static
1438 uint8_t dp_rx_mon_process_tlv_status(struct dp_pdev *pdev,
1439 				     struct hal_rx_ppdu_info *ppdu_info,
1440 				     void *status_frag,
1441 				     uint16_t tlv_status,
1442 				     union dp_mon_desc_list_elem_t **desc_list,
1443 				     union dp_mon_desc_list_elem_t **tail)
1444 {
1445 	struct dp_soc *soc  = pdev->soc;
1446 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1447 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1448 	struct dp_mon_pdev_be *mon_pdev_be =
1449 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1450 	qdf_nbuf_t nbuf, tmp_nbuf;
1451 	qdf_frag_t addr;
1452 	uint8_t user_id = ppdu_info->user_id;
1453 	uint8_t mpdu_idx = ppdu_info->mpdu_count[user_id];
1454 	uint16_t num_frags;
1455 	uint8_t num_buf_reaped = 0;
1456 	bool rx_hdr_valid = true;
1457 	QDF_STATUS status;
1458 
1459 	if (!mon_pdev->monitor_configured &&
1460 	    !dp_lite_mon_is_rx_enabled(mon_pdev)) {
1461 		return num_buf_reaped;
1462 	}
1463 
1464 	/* If user id or rx header len is invalid drop this
1465 	 * mpdu. However we have to honor buffer address TLV
1466 	 * for this mpdu to free any associated packet buffer
1467 	 */
1468 	if (qdf_unlikely(user_id >= HAL_MAX_UL_MU_USERS ||
1469 			 ppdu_info->hdr_len > DP_RX_MON_MAX_RX_HEADER_LEN))
1470 		rx_hdr_valid = false;
1471 
1472 	switch (tlv_status) {
1473 	case HAL_TLV_STATUS_HEADER: {
1474 		if (qdf_unlikely(!rx_hdr_valid)) {
1475 			dp_mon_debug("rx hdr invalid userid: %d, len: %d ",
1476 				     user_id, ppdu_info->hdr_len);
1477 			mon_pdev->rx_mon_stats.rx_hdr_invalid_cnt++;
1478 			return num_buf_reaped;
1479 		}
1480 
1481 		/* If this is first RX_HEADER for MPDU, allocate skb
1482 		 * else add frag to already allocated skb
1483 		 */
1484 
1485 		if (!ppdu_info->mpdu_info[user_id].mpdu_start_received) {
1486 
1487 			nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1488 					      DP_RX_MON_TLV_ROOM +
1489 					      DP_RX_MON_MAX_RADIO_TAP_HDR,
1490 					      DP_RX_MON_TLV_ROOM +
1491 					      DP_RX_MON_MAX_RADIO_TAP_HDR,
1492 					      4, FALSE);
1493 
1494 			/* Set *head_msdu->next as NULL as all msdus are
1495 			 *                          * mapped via nr frags
1496 			 *                                                   */
1497 			if (qdf_unlikely(!nbuf)) {
1498 				dp_mon_debug("malloc failed pdev: %pK ", pdev);
1499 				return num_buf_reaped;
1500 			}
1501 
1502 			mon_pdev->rx_mon_stats.parent_buf_alloc++;
1503 
1504 			dp_rx_mon_set_zero(nbuf);
1505 
1506 			qdf_nbuf_set_next(nbuf, NULL);
1507 
1508 			qdf_nbuf_queue_add(&ppdu_info->mpdu_q[user_id], nbuf);
1509 
1510 			status = dp_rx_mon_nbuf_add_rx_frag(nbuf, status_frag,
1511 							    ppdu_info->hdr_len - DP_RX_MON_RX_HDR_OFFSET,
1512 							    ppdu_info->data - (unsigned char *)status_frag + 4,
1513 							    DP_MON_DATA_BUFFER_SIZE, true);
1514 			if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
1515 				dp_mon_err("num_frags exceeding MAX frags");
1516 				qdf_assert_always(0);
1517 			}
1518 			ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = true;
1519 			ppdu_info->mpdu_info[user_id].first_rx_hdr_rcvd = true;
1520 			/* initialize decap type to invalid, this will be set to appropriate
1521 			 * value once the mpdu start tlv is received
1522 			 */
1523 			ppdu_info->mpdu_info[user_id].decap_type = DP_MON_DECAP_FORMAT_INVALID;
1524 		} else {
1525 			if (ppdu_info->mpdu_info[user_id].decap_type ==
1526 					HAL_HW_RX_DECAP_FORMAT_RAW) {
1527 				return num_buf_reaped;
1528 			}
1529 
1530 			if (dp_lite_mon_is_rx_enabled(mon_pdev) &&
1531 			    !dp_lite_mon_is_level_msdu(mon_pdev))
1532 				break;
1533 
1534 			nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1535 			if (qdf_unlikely(!nbuf)) {
1536 				dp_mon_debug("nbuf is NULL");
1537 				return num_buf_reaped;
1538 			}
1539 
1540 			tmp_nbuf = qdf_get_nbuf_valid_frag(nbuf);
1541 
1542 			if (!tmp_nbuf) {
1543 				tmp_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1544 							  DP_RX_MON_MAX_MONITOR_HEADER,
1545 							  DP_RX_MON_MAX_MONITOR_HEADER,
1546 							  4, FALSE);
1547 				if (qdf_unlikely(!tmp_nbuf)) {
1548 					dp_mon_err("nbuf is NULL");
1549 					qdf_assert_always(0);
1550 				}
1551 				mon_pdev->rx_mon_stats.parent_buf_alloc++;
1552 				dp_rx_mon_append_nbuf(nbuf, tmp_nbuf);
1553 			}
1554 			dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, status_frag,
1555 						   ppdu_info->hdr_len - DP_RX_MON_RX_HDR_OFFSET,
1556 						   ppdu_info->data - (unsigned char *)status_frag + 4,
1557 						   DP_MON_DATA_BUFFER_SIZE,
1558 						   true);
1559 		}
1560 		ppdu_info->rx_hdr_rcvd[user_id] = true;
1561 	}
1562 	break;
1563 	case HAL_TLV_STATUS_MON_BUF_ADDR:
1564 	{
1565 		struct hal_rx_mon_msdu_info *buf_info;
1566 		struct hal_mon_packet_info *packet_info = &ppdu_info->packet_info;
1567 		struct dp_mon_desc *mon_desc = (struct dp_mon_desc *)(uintptr_t)ppdu_info->packet_info.sw_cookie;
1568 		struct hal_rx_mon_mpdu_info *mpdu_info;
1569 		uint16_t frag_idx = 0;
1570 
1571 		qdf_assert_always(mon_desc);
1572 
1573 		if (mon_desc->magic != DP_MON_DESC_MAGIC)
1574 			qdf_assert_always(0);
1575 
1576 		/* WAR: sometimes duplicate pkt desc are received
1577 		 * from HW this check gracefully handles
1578 		 * such cases.
1579 		 */
1580 		if ((mon_desc == mon_pdev_be->prev_rxmon_pkt_desc) &&
1581 		    (mon_desc->cookie ==
1582 		     mon_pdev_be->prev_rxmon_pkt_cookie)) {
1583 			dp_mon_err("duplicate pkt desc found mon_pdev: %pK mon_desc: %pK cookie: %d",
1584 				   mon_pdev, mon_desc,
1585 				   mon_desc->cookie);
1586 			mon_pdev->rx_mon_stats.dup_mon_buf_cnt++;
1587 			return num_buf_reaped;
1588 		}
1589 		mon_pdev_be->prev_rxmon_pkt_desc = mon_desc;
1590 		mon_pdev_be->prev_rxmon_pkt_cookie = mon_desc->cookie;
1591 
1592 		addr = mon_desc->buf_addr;
1593 		qdf_assert_always(addr);
1594 
1595 		if (!mon_desc->unmapped) {
1596 			qdf_mem_unmap_page(soc->osdev,
1597 					   (qdf_dma_addr_t)mon_desc->paddr,
1598 				   DP_MON_DATA_BUFFER_SIZE,
1599 					   QDF_DMA_FROM_DEVICE);
1600 			mon_desc->unmapped = 1;
1601 		}
1602 		dp_mon_add_to_free_desc_list(desc_list, tail, mon_desc);
1603 		num_buf_reaped++;
1604 
1605 		mon_pdev->rx_mon_stats.pkt_buf_count++;
1606 
1607 		/* if rx hdr is not valid free pkt buffer and return */
1608 		if (qdf_unlikely(!rx_hdr_valid)) {
1609 			DP_STATS_INC(mon_soc, frag_free, 1);
1610 			qdf_frag_free(addr);
1611 			return num_buf_reaped;
1612 		}
1613 
1614 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1615 
1616 			/* WAR: RX_HDR is not received for this MPDU, drop this frame */
1617 			mon_pdev->rx_mon_stats.rx_hdr_not_received++;
1618 			DP_STATS_INC(mon_soc, frag_free, 1);
1619 			qdf_frag_free(addr);
1620 			return num_buf_reaped;
1621 		}
1622 
1623 		if (packet_info->dma_length >
1624 		    (DP_MON_DATA_BUFFER_SIZE - DP_RX_MON_PACKET_OFFSET)) {
1625 			/* WAR: Invalid DMA length is received for this MPDU */
1626 			mon_pdev->rx_mon_stats.invalid_dma_length++;
1627 			DP_STATS_INC(mon_soc, frag_free, 1);
1628 			qdf_frag_free(addr);
1629 			return num_buf_reaped;
1630 		}
1631 
1632 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1633 		if (qdf_unlikely(!nbuf)) {
1634 			dp_mon_debug("nbuf is NULL");
1635 			DP_STATS_INC(mon_soc, frag_free, 1);
1636 			DP_STATS_INC(mon_soc, empty_queue, 1);
1637 			qdf_frag_free(addr);
1638 			return num_buf_reaped;
1639 		}
1640 
1641 		mpdu_info = &ppdu_info->mpdu_info[user_id];
1642 		if (mpdu_info->decap_type == DP_MON_DECAP_FORMAT_INVALID) {
1643 			/* decap type is invalid, drop the frame */
1644 			mon_pdev->rx_mon_stats.mpdu_decap_type_invalid++;
1645 			DP_STATS_INC(mon_soc, frag_free, 1);
1646 			mon_pdev->rx_mon_stats.parent_buf_free++;
1647 			qdf_frag_free(addr);
1648 			qdf_nbuf_queue_remove_last(&ppdu_info->mpdu_q[user_id]);
1649 			qdf_nbuf_free(nbuf);
1650 			/* if invalid decap type handling is disabled, assert */
1651 			if (soc->wlan_cfg_ctx->is_handle_invalid_decap_type_disabled) {
1652 				dp_mon_err("Decap type invalid");
1653 				qdf_assert_always(0);
1654 			}
1655 			ppdu_info->rx_hdr_rcvd[user_id] = false;
1656 			return num_buf_reaped;
1657 		}
1658 
1659 		tmp_nbuf = qdf_get_nbuf_valid_frag(nbuf);
1660 
1661 		if (!tmp_nbuf) {
1662 			tmp_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1663 						  DP_RX_MON_MAX_MONITOR_HEADER,
1664 						  DP_RX_MON_MAX_MONITOR_HEADER,
1665 						  4, FALSE);
1666 			if (qdf_unlikely(!tmp_nbuf)) {
1667 				dp_mon_err("nbuf is NULL");
1668 				DP_STATS_INC(mon_soc, frag_free, 1);
1669 				mon_pdev->rx_mon_stats.parent_buf_free++;
1670 				qdf_frag_free(addr);
1671 				/* remove this nbuf from queue */
1672 				qdf_nbuf_queue_remove_last(&ppdu_info->mpdu_q[user_id]);
1673 				qdf_nbuf_free(nbuf);
1674 				return num_buf_reaped;
1675 			}
1676 			mon_pdev->rx_mon_stats.parent_buf_alloc++;
1677 			dp_rx_mon_append_nbuf(nbuf, tmp_nbuf);
1678 		}
1679 		mpdu_info->full_pkt = true;
1680 
1681 		if (mpdu_info->decap_type == HAL_HW_RX_DECAP_FORMAT_RAW) {
1682 			if (mpdu_info->first_rx_hdr_rcvd) {
1683 				qdf_nbuf_remove_frag(nbuf, frag_idx, DP_MON_DATA_BUFFER_SIZE);
1684 				dp_rx_mon_nbuf_add_rx_frag(nbuf, addr,
1685 							   packet_info->dma_length,
1686 							   DP_RX_MON_PACKET_OFFSET,
1687 							   DP_MON_DATA_BUFFER_SIZE,
1688 							   false);
1689 				DP_STATS_INC(mon_soc, frag_free, 1);
1690 				mpdu_info->first_rx_hdr_rcvd = false;
1691 			} else {
1692 				dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, addr,
1693 							   packet_info->dma_length,
1694 							   DP_RX_MON_PACKET_OFFSET,
1695 							   DP_MON_DATA_BUFFER_SIZE,
1696 							   false);
1697 				DP_STATS_INC(mon_soc, frag_free, 1);
1698 			}
1699 		} else {
1700 			dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, addr,
1701 						   packet_info->dma_length,
1702 						   DP_RX_MON_PACKET_OFFSET,
1703 						   DP_MON_DATA_BUFFER_SIZE,
1704 						   false);
1705 				DP_STATS_INC(mon_soc, frag_free, 1);
1706 			buf_info = addr;
1707 
1708 			if (!ppdu_info->msdu[user_id].first_buffer) {
1709 				buf_info->first_buffer = true;
1710 				ppdu_info->msdu[user_id].first_buffer = true;
1711 			} else {
1712 				buf_info->first_buffer = false;
1713 			}
1714 
1715 			if (packet_info->msdu_continuation)
1716 				buf_info->last_buffer = false;
1717 			else
1718 				buf_info->last_buffer = true;
1719 
1720 			buf_info->frag_len = packet_info->dma_length;
1721 		}
1722 		if (qdf_unlikely(packet_info->truncated))
1723 			mpdu_info->truncated = true;
1724 	}
1725 	break;
1726 	case HAL_TLV_STATUS_MSDU_END:
1727 	{
1728 		struct hal_rx_mon_msdu_info *msdu_info;
1729 		struct hal_rx_mon_msdu_info *last_buf_info;
1730 
1731 		if (qdf_unlikely(!rx_hdr_valid))
1732 			break;
1733 
1734 		msdu_info = &ppdu_info->msdu[user_id];
1735 		/* update msdu metadata at last buffer of msdu in MPDU */
1736 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1737 			/* reset msdu info for next msdu for same user */
1738 			qdf_mem_zero(msdu_info, sizeof(*msdu_info));
1739 			dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d",
1740 				     __LINE__, user_id, mpdu_idx);
1741 			break;
1742 		}
1743 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1744 		if (qdf_unlikely(!nbuf)) {
1745 			dp_mon_debug("nbuf is NULL");
1746 			break;
1747 		}
1748 		num_frags = qdf_nbuf_get_nr_frags(nbuf);
1749 		if (ppdu_info->mpdu_info[user_id].decap_type ==
1750 				HAL_HW_RX_DECAP_FORMAT_RAW) {
1751 			break;
1752 		}
1753 		/* This points to last buffer of MSDU . update metadata here */
1754 		addr = qdf_nbuf_get_frag_addr(nbuf, num_frags - 1) -
1755 					      DP_RX_MON_PACKET_OFFSET;
1756 		last_buf_info = addr;
1757 
1758 		last_buf_info->first_msdu = msdu_info->first_msdu;
1759 		last_buf_info->last_msdu = msdu_info->last_msdu;
1760 		last_buf_info->decap_type = msdu_info->decap_type;
1761 		last_buf_info->msdu_index = msdu_info->msdu_index;
1762 		last_buf_info->user_rssi = msdu_info->user_rssi;
1763 		last_buf_info->reception_type = msdu_info->reception_type;
1764 		last_buf_info->msdu_len = msdu_info->msdu_len;
1765 
1766 		/* If flow classification is enabled,
1767 		 * update protocol and flow tag to buf headroom
1768 		 */
1769 		dp_rx_mon_pf_tag_to_buf_headroom_2_0(nbuf, ppdu_info, pdev,
1770 						     soc);
1771 
1772 		/* reset msdu info for next msdu for same user */
1773 		qdf_mem_zero(msdu_info, sizeof(*msdu_info));
1774 	}
1775 	break;
1776 	case HAL_TLV_STATUS_MPDU_START:
1777 	{
1778 		struct hal_rx_mon_mpdu_info *mpdu_info, *mpdu_meta;
1779 
1780 		if (qdf_unlikely(!rx_hdr_valid))
1781 			break;
1782 
1783 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1784 			dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d", __LINE__, user_id, mpdu_idx);
1785 			break;
1786 		}
1787 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1788 		if (qdf_unlikely(!nbuf)) {
1789 			dp_mon_debug("nbuf is NULL");
1790 			break;
1791 		}
1792 		mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(nbuf);
1793 		mpdu_info = &ppdu_info->mpdu_info[user_id];
1794 		mpdu_meta->decap_type = mpdu_info->decap_type;
1795 		ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = true;
1796 	break;
1797 	}
1798 	case HAL_TLV_STATUS_MPDU_END:
1799 	{
1800 		struct hal_rx_mon_mpdu_info *mpdu_info, *mpdu_meta;
1801 
1802 		if (qdf_unlikely(!rx_hdr_valid))
1803 			break;
1804 
1805 		mpdu_info = &ppdu_info->mpdu_info[user_id];
1806 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1807 			/* reset mpdu info for next mpdu for same user */
1808 			qdf_mem_zero(mpdu_info, sizeof(*mpdu_info));
1809 			dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d",
1810 				     __LINE__, user_id, mpdu_idx);
1811 			break;
1812 		}
1813 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1814 		if (qdf_unlikely(!nbuf)) {
1815 			dp_mon_debug("nbuf is NULL");
1816 			break;
1817 		}
1818 		mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(nbuf);
1819 		mpdu_meta->mpdu_length_err = mpdu_info->mpdu_length_err;
1820 		mpdu_meta->fcs_err = mpdu_info->fcs_err;
1821 		ppdu_info->rx_status.rs_fcs_err = mpdu_info->fcs_err;
1822 		mpdu_meta->overflow_err = mpdu_info->overflow_err;
1823 		mpdu_meta->decrypt_err = mpdu_info->decrypt_err;
1824 		mpdu_meta->full_pkt = mpdu_info->full_pkt;
1825 		mpdu_meta->truncated = mpdu_info->truncated;
1826 
1827 		/* reset mpdu info for next mpdu for same user */
1828 		qdf_mem_zero(mpdu_info, sizeof(*mpdu_info));
1829 		ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = false;
1830 		ppdu_info->mpdu_count[user_id]++;
1831 		ppdu_info->rx_hdr_rcvd[user_id] = false;
1832 	}
1833 	break;
1834 	case HAL_TLV_STATUS_MON_DROP:
1835 	{
1836 		mon_pdev->rx_mon_stats.ppdu_drop_cnt +=
1837 			ppdu_info->drop_cnt.ppdu_drop_cnt;
1838 		mon_pdev->rx_mon_stats.mpdu_drop_cnt +=
1839 			ppdu_info->drop_cnt.mpdu_drop_cnt;
1840 		mon_pdev->rx_mon_stats.end_of_ppdu_drop_cnt +=
1841 			ppdu_info->drop_cnt.end_of_ppdu_drop_cnt;
1842 		mon_pdev->rx_mon_stats.tlv_drop_cnt +=
1843 			ppdu_info->drop_cnt.tlv_drop_cnt;
1844 	}
1845 	break;
1846 	}
1847 	return num_buf_reaped;
1848 }
1849 
1850 /**
1851  * dp_rx_mon_process_status_tlv() - Handle mon status process TLV
1852  *
1853  * @pdev: DP pdev handle
1854  *
1855  * Return
1856  */
1857 static inline struct hal_rx_ppdu_info *
1858 dp_rx_mon_process_status_tlv(struct dp_pdev *pdev)
1859 {
1860 	struct dp_soc *soc = pdev->soc;
1861 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1862 	struct dp_mon_pdev_be *mon_pdev_be =
1863 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1864 	union dp_mon_desc_list_elem_t *desc_list = NULL;
1865 	union dp_mon_desc_list_elem_t *tail = NULL;
1866 	struct dp_mon_desc *mon_desc;
1867 	uint8_t user;
1868 	uint16_t idx;
1869 	void *buf;
1870 	struct hal_rx_ppdu_info *ppdu_info;
1871 	uint8_t *rx_tlv;
1872 	uint8_t *rx_tlv_start;
1873 	uint16_t end_offset = 0;
1874 	uint16_t tlv_status = HAL_TLV_STATUS_BUF_DONE;
1875 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1876 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1877 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1878 	uint16_t work_done = 0;
1879 	uint16_t status_buf_count;
1880 
1881 	if (!mon_pdev_be->desc_count) {
1882 		dp_mon_err("no of status buffer count is zero: %pK", pdev);
1883 		return NULL;
1884 	}
1885 
1886 	ppdu_info = dp_rx_mon_get_ppdu_info(mon_pdev);
1887 
1888 	if (!ppdu_info) {
1889 		dp_mon_debug("ppdu_info malloc failed pdev: %pK", pdev);
1890 		dp_rx_mon_flush_status_buf_queue(pdev);
1891 		return NULL;
1892 	}
1893 
1894 	qdf_mem_zero(ppdu_info, sizeof(struct hal_rx_ppdu_info));
1895 	mon_pdev->rx_mon_stats.total_ppdu_info_alloc++;
1896 
1897 	for (user = 0; user < HAL_MAX_UL_MU_USERS; user++)
1898 		qdf_nbuf_queue_init(&ppdu_info->mpdu_q[user]);
1899 
1900 	status_buf_count = mon_pdev_be->desc_count;
1901 	for (idx = 0; idx < status_buf_count; idx++) {
1902 		mon_desc = mon_pdev_be->status[idx];
1903 		if (!mon_desc) {
1904 			qdf_assert_always(0);
1905 			return NULL;
1906 		}
1907 
1908 		buf = mon_desc->buf_addr;
1909 		end_offset = mon_desc->end_offset;
1910 
1911 		dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
1912 		work_done++;
1913 
1914 		rx_tlv = buf;
1915 		rx_tlv_start = buf;
1916 
1917 		dp_mon_record_clear_buffer(mon_pdev_be);
1918 
1919 		do {
1920 			tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
1921 								ppdu_info,
1922 								pdev->soc->hal_soc,
1923 								buf);
1924 			dp_mon_record_tlv(mon_pdev_be, ppdu_info);
1925 			work_done += dp_rx_mon_process_tlv_status(pdev,
1926 								  ppdu_info,
1927 								  buf,
1928 								  tlv_status,
1929 								  &desc_list,
1930 								  &tail);
1931 			rx_tlv = hal_rx_status_get_next_tlv(rx_tlv, 1);
1932 
1933 			/* HW provides end_offset (how many bytes HW DMA'ed)
1934 			 * as part of descriptor, use this as delimiter for
1935 			 * status buffer
1936 			 */
1937 			if ((rx_tlv - rx_tlv_start) >= (end_offset + 1))
1938 				break;
1939 
1940 		} while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) ||
1941 			 (tlv_status == HAL_TLV_STATUS_HEADER) ||
1942 			 (tlv_status == HAL_TLV_STATUS_MPDU_END) ||
1943 			 (tlv_status == HAL_TLV_STATUS_MSDU_END) ||
1944 			 (tlv_status == HAL_TLV_STATUS_MON_BUF_ADDR) ||
1945 			 (tlv_status == HAL_TLV_STATUS_MPDU_START));
1946 
1947 		/* set status buffer pointer to NULL */
1948 		mon_pdev_be->status[idx] = NULL;
1949 		mon_pdev_be->desc_count--;
1950 
1951 		qdf_frag_free(buf);
1952 		DP_STATS_INC(mon_soc, frag_free, 1);
1953 		mon_pdev->rx_mon_stats.status_buf_count++;
1954 		dp_mon_record_index_update(mon_pdev_be);
1955 	}
1956 	mon_pdev_be->prev_rxmon_desc = NULL;
1957 	mon_pdev_be->prev_rxmon_cookie = 0;
1958 	mon_pdev_be->prev_rxmon_pkt_desc = NULL;
1959 	mon_pdev_be->prev_rxmon_pkt_cookie = 0;
1960 
1961 	dp_mon_rx_stats_update_rssi_dbm_params(mon_pdev, ppdu_info);
1962 	if (work_done) {
1963 		mon_pdev->rx_mon_stats.mon_rx_bufs_replenished_dest +=
1964 			work_done;
1965 		if (desc_list)
1966 			dp_mon_add_desc_list_to_free_list(soc,
1967 							  &desc_list, &tail,
1968 							  rx_mon_desc_pool);
1969 	}
1970 
1971 	ppdu_info->rx_status.tsft = ppdu_info->rx_status.tsft +
1972 				    pdev->timestamp.mlo_offset_lo_us +
1973 				    ((uint64_t)pdev->timestamp.mlo_offset_hi_us
1974 				    << 32);
1975 
1976 	return ppdu_info;
1977 }
1978 
1979 #ifdef WLAN_FEATURE_11BE_MLO
1980 #define DP_PEER_ID_MASK 0x3FFF
1981 /**
1982  * dp_rx_mon_update_peer_id() - Update sw_peer_id with link peer_id
1983  *
1984  * @pdev: DP pdev handle
1985  * @ppdu_info: HAL PPDU Info buffer
1986  *
1987  * Return: none
1988  */
1989 static inline
1990 void dp_rx_mon_update_peer_id(struct dp_pdev *pdev,
1991 			      struct hal_rx_ppdu_info *ppdu_info)
1992 {
1993 	uint32_t i;
1994 	uint16_t peer_id;
1995 	struct dp_soc *soc = pdev->soc;
1996 	uint32_t num_users = ppdu_info->com_info.num_users;
1997 
1998 	for (i = 0; i < num_users; i++) {
1999 		peer_id = ppdu_info->rx_user_status[i].sw_peer_id;
2000 		if (peer_id == HTT_INVALID_PEER)
2001 			continue;
2002 		/*
2003 		+---------------------------------------------------------------------+
2004 		| 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
2005 		+---------------------------------------------------------------------+
2006 		| CHIP ID | ML |                     PEER ID                          |
2007 		+---------------------------------------------------------------------+
2008 		*/
2009 		peer_id &= DP_PEER_ID_MASK;
2010 		peer_id = dp_get_link_peer_id_by_lmac_id(soc, peer_id,
2011 							 pdev->lmac_id);
2012 		ppdu_info->rx_user_status[i].sw_peer_id = peer_id;
2013 	}
2014 }
2015 #else
2016 static inline
2017 void dp_rx_mon_update_peer_id(struct dp_pdev *pdev,
2018 			      struct hal_rx_ppdu_info *ppdu_info)
2019 {
2020 }
2021 #endif
2022 
2023 /*
2024  * HAL_RX_PKT_TYPE_11A     0 -> CDP_PKT_TYPE_OFDM
2025  * HAL_RX_PKT_TYPE_11B     1 -> CDP_PKT_TYPE_CCK
2026  * HAL_RX_PKT_TYPE_11N     2 -> CDP_PKT_TYPE_HT
2027  * HAL_RX_PKT_TYPE_11AC    3 -> CDP_PKT_TYPE_VHT
2028  * HAL_RX_PKT_TYPE_11AX    4 -> CDP_PKT_TYPE_HE
2029  * HAL_RX_PKT_TYPE_11BE    6 -> CDP_PKT_TYPE_EHT
2030  */
2031 
2032 static uint32_t const cdp_preamble_type_map[] = {
2033 	CDP_PKT_TYPE_OFDM,
2034 	CDP_PKT_TYPE_CCK,
2035 	CDP_PKT_TYPE_HT,
2036 	CDP_PKT_TYPE_VHT,
2037 	CDP_PKT_TYPE_HE,
2038 	CDP_PKT_TYPE_NO_SUP,
2039 #ifdef WLAN_FEATURE_11BE
2040 	CDP_PKT_TYPE_EHT,
2041 #endif
2042 	CDP_PKT_TYPE_MAX,
2043 };
2044 
2045 /*
2046  * HAL_RX_RECEPTION_TYPE_SU       -> CDP_RX_TYPE_SU
2047  * HAL_RX_RECEPTION_TYPE_MU_MIMO  -> CDP_RX_TYPE_MU_MIMO
2048  * HAL_RX_RECEPTION_TYPE_OFDMA    -> CDP_RX_TYPE_MU_OFDMA
2049  * HAL_RX_RECEPTION_TYPE_MU_OFDMA -> CDP_RX_TYPE_MU_OFDMA_MIMO
2050  */
2051 static uint32_t const cdp_reception_type_map[] = {
2052 	CDP_RX_TYPE_SU,
2053 	CDP_RX_TYPE_MU_MIMO,
2054 	CDP_RX_TYPE_MU_OFDMA,
2055 	CDP_RX_TYPE_MU_OFDMA_MIMO,
2056 };
2057 
2058 static uint32_t const cdp_mu_dl_up_map[] = {
2059 	CDP_MU_TYPE_DL,
2060 	CDP_MU_TYPE_UL,
2061 };
2062 
2063 static inline void
2064 dp_rx_mu_stats_update(
2065 	struct hal_rx_ppdu_info *ppdu_info,
2066 	struct cdp_pdev_mon_stats *rx_mon_sts,
2067 	uint32_t preamble_type,
2068 	uint32_t  recept_type,
2069 	uint32_t  mu_dl_ul,
2070 	uint32_t i
2071 )
2072 {
2073 	struct mon_rx_user_status *rx_user_status;
2074 
2075 	rx_user_status =  &ppdu_info->rx_user_status[i];
2076 	rx_mon_sts->mpdu_cnt_fcs_ok[preamble_type][recept_type][mu_dl_ul][i]
2077 			+= rx_user_status->mpdu_cnt_fcs_ok;
2078 	rx_mon_sts->mpdu_cnt_fcs_err[preamble_type][recept_type][mu_dl_ul][i]
2079 			+= rx_user_status->mpdu_cnt_fcs_err;
2080 }
2081 
2082 static inline void
2083 dp_rx_he_ppdu_stats_update(
2084 	struct cdp_pdev_mon_stats *stats,
2085 	struct hal_rx_u_sig_info *u_sig
2086 )
2087 {
2088 	stats->ppdu_eht_type_mode[u_sig->ppdu_type_comp_mode][u_sig->ul_dl]++;
2089 }
2090 
2091 static inline void
2092 dp_rx_he_ppdu_stats(struct dp_pdev *pdev, struct hal_rx_ppdu_info *ppdu_info)
2093 {
2094 	struct dp_mon_pdev *mon_pdev;
2095 	struct cdp_pdev_mon_stats *rx_mon_stats;
2096 
2097 	mon_pdev = pdev->monitor_pdev;
2098 	rx_mon_stats = &mon_pdev->rx_mon_stats;
2099 
2100 	if (ppdu_info->u_sig_info.ppdu_type_comp_mode < CDP_EHT_TYPE_MODE_MAX &&
2101 	    ppdu_info->u_sig_info.ul_dl < CDP_MU_TYPE_MAX)
2102 		dp_rx_he_ppdu_stats_update(
2103 			rx_mon_stats,
2104 			&ppdu_info->u_sig_info);
2105 		else
2106 			qdf_assert(0);
2107 }
2108 
2109 static inline void
2110 dp_rx_mu_stats(struct dp_pdev *pdev, struct hal_rx_ppdu_info *ppdu_info)
2111 {
2112 	struct dp_mon_pdev *mon_pdev;
2113 	struct cdp_pdev_mon_stats *rx_mon_stats;
2114 	struct mon_rx_status *rx_status;
2115 	uint32_t preamble_type, reception_type, mu_dl_ul, num_users, i;
2116 
2117 	mon_pdev = pdev->monitor_pdev;
2118 	rx_mon_stats = &mon_pdev->rx_mon_stats;
2119 	rx_status = &ppdu_info->rx_status;
2120 
2121 	num_users = ppdu_info->com_info.num_users;
2122 
2123 	if (rx_status->preamble_type < CDP_PKT_TYPE_MAX)
2124 		preamble_type = cdp_preamble_type_map[rx_status->preamble_type];
2125 	else
2126 		preamble_type = CDP_PKT_TYPE_NO_SUP;
2127 
2128 	reception_type = cdp_reception_type_map[rx_status->reception_type];
2129 	mu_dl_ul = cdp_mu_dl_up_map[rx_status->mu_dl_ul];
2130 
2131 	for (i = 0; i < num_users; i++) {
2132 		if (i >= CDP_MU_SNIF_USER_MAX)
2133 			return;
2134 
2135 		dp_rx_mu_stats_update(ppdu_info, rx_mon_stats, preamble_type,
2136 				      reception_type, mu_dl_ul, i);
2137 	}
2138 
2139 	if (rx_status->eht_flags)
2140 		dp_rx_he_ppdu_stats(pdev, ppdu_info);
2141 }
2142 
2143 static inline uint32_t
2144 dp_rx_mon_srng_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
2145 			   uint32_t mac_id, uint32_t quota)
2146 {
2147 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2148 	struct dp_mon_pdev *mon_pdev;
2149 	struct dp_mon_pdev_be *mon_pdev_be;
2150 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
2151 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
2152 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
2153 	hal_soc_handle_t hal_soc = soc->hal_soc;
2154 	void *rx_mon_dst_ring_desc;
2155 	void *mon_dst_srng;
2156 	uint32_t work_done = 0;
2157 	struct hal_rx_ppdu_info *ppdu_info = NULL;
2158 	QDF_STATUS status;
2159 	if (!pdev) {
2160 		dp_mon_err("%pK: pdev is null for mac_id = %d", soc, mac_id);
2161 		return work_done;
2162 	}
2163 
2164 	mon_pdev = pdev->monitor_pdev;
2165 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
2166 	mon_dst_srng = soc->rxdma_mon_dst_ring[mac_id].hal_srng;
2167 
2168 	if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) {
2169 		dp_mon_err("%pK: : HAL Monitor Destination Ring Init Failed -- %pK",
2170 			   soc, mon_dst_srng);
2171 		return work_done;
2172 	}
2173 
2174 	hal_soc = soc->hal_soc;
2175 
2176 	qdf_assert((hal_soc && pdev));
2177 
2178 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
2179 
2180 	if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, mon_dst_srng))) {
2181 		dp_mon_err("%s %d : HAL Mon Dest Ring access Failed -- %pK",
2182 			   __func__, __LINE__, mon_dst_srng);
2183 		qdf_spin_unlock_bh(&mon_pdev->mon_lock);
2184 		return work_done;
2185 	}
2186 
2187 	while (qdf_likely((rx_mon_dst_ring_desc =
2188 			  (void *)hal_srng_dst_peek(hal_soc, mon_dst_srng))
2189 				&& quota--)) {
2190 		struct hal_mon_desc hal_mon_rx_desc = {0};
2191 		struct dp_mon_desc *mon_desc;
2192 		hal_be_get_mon_dest_status(soc->hal_soc,
2193 					   rx_mon_dst_ring_desc,
2194 					   &hal_mon_rx_desc);
2195 		/* If it's empty descriptor, skip processing
2196 		 * and process next hW desc
2197 		 */
2198 		if (hal_mon_rx_desc.empty_descriptor == 1) {
2199 			dp_mon_debug("empty descriptor found mon_pdev: %pK",
2200 				     mon_pdev);
2201 			rx_mon_dst_ring_desc =
2202 				hal_srng_dst_get_next(hal_soc, mon_dst_srng);
2203 			dp_rx_mon_update_drop_cnt(mon_pdev, &hal_mon_rx_desc);
2204 			continue;
2205 		}
2206 		mon_desc = (struct dp_mon_desc *)(uintptr_t)(hal_mon_rx_desc.buf_addr);
2207 		qdf_assert_always(mon_desc);
2208 
2209 		if ((mon_desc == mon_pdev_be->prev_rxmon_desc) &&
2210 		    (mon_desc->cookie == mon_pdev_be->prev_rxmon_cookie)) {
2211 			dp_mon_err("duplicate descritout found mon_pdev: %pK mon_desc: %pK cookie: %d",
2212 				   mon_pdev, mon_desc, mon_desc->cookie);
2213 			mon_pdev->rx_mon_stats.dup_mon_buf_cnt++;
2214 			hal_srng_dst_get_next(hal_soc, mon_dst_srng);
2215 			continue;
2216 		}
2217 		mon_pdev_be->prev_rxmon_desc = mon_desc;
2218 		mon_pdev_be->prev_rxmon_cookie = mon_desc->cookie;
2219 
2220 		if (!mon_desc->unmapped) {
2221 			qdf_mem_unmap_page(soc->osdev, mon_desc->paddr,
2222 					   rx_mon_desc_pool->buf_size,
2223 					   QDF_DMA_FROM_DEVICE);
2224 			mon_desc->unmapped = 1;
2225 		}
2226 		mon_desc->end_offset = hal_mon_rx_desc.end_offset;
2227 
2228 		/* Flush and truncated status buffers content
2229 		 * need to discarded
2230 		 */
2231 		if (hal_mon_rx_desc.end_reason == HAL_MON_FLUSH_DETECTED ||
2232 		    hal_mon_rx_desc.end_reason == HAL_MON_PPDU_TRUNCATED) {
2233 			dp_mon_debug("end_resaon: %d mon_pdev: %pK",
2234 				     hal_mon_rx_desc.end_reason, mon_pdev);
2235 			mon_pdev->rx_mon_stats.status_ppdu_drop++;
2236 			dp_rx_mon_handle_flush_n_trucated_ppdu(soc,
2237 							       pdev,
2238 							       mon_desc);
2239 			rx_mon_dst_ring_desc = hal_srng_dst_get_next(hal_soc,
2240 							mon_dst_srng);
2241 			continue;
2242 		}
2243 		if (mon_pdev_be->desc_count >= DP_MON_MAX_STATUS_BUF)
2244 			qdf_assert_always(0);
2245 
2246 		mon_pdev_be->status[mon_pdev_be->desc_count++] = mon_desc;
2247 
2248 		rx_mon_dst_ring_desc = hal_srng_dst_get_next(hal_soc, mon_dst_srng);
2249 
2250 		dp_rx_process_pktlog_be(soc, pdev, ppdu_info,
2251 					mon_desc->buf_addr,
2252 					hal_mon_rx_desc.end_offset);
2253 
2254 		if (hal_mon_rx_desc.end_reason == HAL_MON_STATUS_BUFFER_FULL)
2255 			continue;
2256 
2257 		mon_pdev->rx_mon_stats.status_ppdu_done++;
2258 
2259 		ppdu_info = dp_rx_mon_process_status_tlv(pdev);
2260 
2261 		if (ppdu_info) {
2262 			mon_pdev->rx_mon_stats.start_user_info_cnt +=
2263 				ppdu_info->start_user_info_cnt;
2264 			ppdu_info->start_user_info_cnt = 0;
2265 
2266 			mon_pdev->rx_mon_stats.end_user_stats_cnt +=
2267 				ppdu_info->end_user_stats_cnt;
2268 			ppdu_info->end_user_stats_cnt = 0;
2269 
2270 			dp_rx_mon_update_peer_id(pdev, ppdu_info);
2271 			dp_rx_mu_stats(pdev, ppdu_info);
2272 		}
2273 
2274 		/* Call enhanced stats update API */
2275 		if (mon_pdev->enhanced_stats_en && ppdu_info)
2276 			dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info);
2277 		else if (dp_cfr_rcc_mode_status(pdev) && ppdu_info)
2278 			dp_rx_handle_cfr(soc, pdev, ppdu_info);
2279 
2280 		dp_rx_mon_update_user_ctrl_frame_stats(pdev, ppdu_info);
2281 
2282 		status = dp_rx_mon_add_ppdu_info_to_wq(pdev, ppdu_info);
2283 		if (status != QDF_STATUS_SUCCESS) {
2284 			if (ppdu_info)
2285 				dp_rx_mon_free_ppdu_info(pdev, ppdu_info);
2286 		}
2287 
2288 		work_done++;
2289 
2290 		/* desc_count should be zero  after PPDU status processing */
2291 		if (mon_pdev_be->desc_count > 0)
2292 			qdf_assert_always(0);
2293 
2294 		mon_pdev_be->desc_count = 0;
2295 	}
2296 	dp_rx_srng_access_end(int_ctx, soc, mon_dst_srng);
2297 
2298 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
2299 	dp_mon_info("mac_id: %d, work_done:%d", mac_id, work_done);
2300 	return work_done;
2301 }
2302 
2303 uint32_t
2304 dp_rx_mon_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
2305 		      uint32_t mac_id, uint32_t quota)
2306 {
2307 	uint32_t work_done;
2308 
2309 	work_done = dp_rx_mon_srng_process_2_0(soc, int_ctx, mac_id, quota);
2310 
2311 	return work_done;
2312 }
2313 
2314 #ifdef QCA_KMEM_CACHE_SUPPORT
2315 QDF_STATUS dp_rx_mon_ppdu_info_cache_create(struct dp_pdev *pdev)
2316 {
2317 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2318 	struct dp_mon_pdev_be *mon_pdev_be =
2319 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
2320 	uint16_t obj;
2321 	struct hal_rx_ppdu_info *ppdu_info = NULL;
2322 
2323 	mon_pdev_be->ppdu_info_cache =
2324 		qdf_kmem_cache_create("rx_mon_ppdu_info_cache",
2325 				      sizeof(struct hal_rx_ppdu_info));
2326 
2327 	if (!mon_pdev_be->ppdu_info_cache) {
2328 		dp_mon_err("cache creation failed pdev :%px", pdev);
2329 		return QDF_STATUS_E_NOMEM;
2330 	}
2331 
2332 	TAILQ_INIT(&mon_pdev_be->rx_mon_free_queue);
2333 	for (obj = 0; obj < DP_RX_MON_WQ_THRESHOLD; obj++) {
2334 		ppdu_info =  (struct hal_rx_ppdu_info *)qdf_kmem_cache_alloc(mon_pdev_be->ppdu_info_cache);
2335 
2336 		if (ppdu_info) {
2337 			TAILQ_INSERT_TAIL(&mon_pdev_be->rx_mon_free_queue,
2338 					  ppdu_info,
2339 					  ppdu_free_list_elem);
2340 			mon_pdev_be->total_free_elem++;
2341 		}
2342 	}
2343 	qdf_spinlock_create(&mon_pdev_be->ppdu_info_lock);
2344 
2345 	return QDF_STATUS_SUCCESS;
2346 }
2347 
2348 void dp_rx_mon_ppdu_info_cache_destroy(struct dp_pdev *pdev)
2349 {
2350 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2351 	struct dp_mon_pdev_be *mon_pdev_be =
2352 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
2353 	struct hal_rx_ppdu_info *ppdu_info = NULL, *temp_ppdu_info = NULL;
2354 
2355 	qdf_spin_lock(&mon_pdev_be->ppdu_info_lock);
2356 	TAILQ_FOREACH_SAFE(ppdu_info,
2357 			   &mon_pdev_be->rx_mon_free_queue,
2358 			   ppdu_free_list_elem,
2359 			   temp_ppdu_info) {
2360 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_free_queue,
2361 			     ppdu_info, ppdu_free_list_elem);
2362 		if (ppdu_info) {
2363 			mon_pdev_be->total_free_elem--;
2364 			qdf_kmem_cache_free(mon_pdev_be->ppdu_info_cache,
2365 					    ppdu_info);
2366 		}
2367 	}
2368 	qdf_spin_unlock(&mon_pdev_be->ppdu_info_lock);
2369 	dp_mon_debug(" total free element: %d", mon_pdev_be->total_free_elem);
2370 	qdf_kmem_cache_destroy(mon_pdev_be->ppdu_info_cache);
2371 }
2372 #endif
2373 
2374 /**
2375  * dp_mon_pdev_ext_init_2_0() - Init pdev ext param
2376  *
2377  * @pdev: DP pdev handle
2378  *
2379  * Return:  QDF_STATUS_SUCCESS: Success
2380  *          QDF_STATUS_E_FAILURE: failure
2381  */
2382 QDF_STATUS dp_mon_pdev_ext_init_2_0(struct dp_pdev *pdev)
2383 {
2384 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2385 	struct dp_mon_pdev_be *mon_pdev_be =
2386 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
2387 
2388 	qdf_create_work(0, &mon_pdev_be->rx_mon_work,
2389 			dp_rx_mon_process_ppdu, pdev);
2390 	mon_pdev_be->rx_mon_workqueue =
2391 		qdf_alloc_unbound_workqueue("rx_mon_work_queue");
2392 
2393 	if (!mon_pdev_be->rx_mon_workqueue) {
2394 		dp_mon_err("failed to create rxmon wq mon_pdev: %pK", mon_pdev);
2395 		goto fail;
2396 	}
2397 	TAILQ_INIT(&mon_pdev_be->rx_mon_queue);
2398 
2399 	qdf_spinlock_create(&mon_pdev_be->rx_mon_wq_lock);
2400 
2401 	return QDF_STATUS_SUCCESS;
2402 
2403 fail:
2404 	return QDF_STATUS_E_FAILURE;
2405 }
2406 
2407 /**
2408  * dp_mon_pdev_ext_deinit_2_0() - denit pdev ext param
2409  *
2410  * @pdev: DP pdev handle
2411  *
2412  * Return: QDF_STATUS_SUCCESS
2413  */
2414 QDF_STATUS dp_mon_pdev_ext_deinit_2_0(struct dp_pdev *pdev)
2415 {
2416 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2417 	struct dp_mon_pdev_be *mon_pdev_be =
2418 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
2419 
2420 	if (!mon_pdev_be->rx_mon_workqueue)
2421 		return QDF_STATUS_E_FAILURE;
2422 
2423 	qdf_err(" total free element: %d", mon_pdev_be->total_free_elem);
2424 	qdf_flush_workqueue(0, mon_pdev_be->rx_mon_workqueue);
2425 	qdf_destroy_workqueue(0, mon_pdev_be->rx_mon_workqueue);
2426 	qdf_flush_work(&mon_pdev_be->rx_mon_work);
2427 	qdf_disable_work(&mon_pdev_be->rx_mon_work);
2428 	dp_rx_mon_drain_wq(pdev);
2429 	mon_pdev_be->rx_mon_workqueue = NULL;
2430 	qdf_spinlock_destroy(&mon_pdev_be->rx_mon_wq_lock);
2431 
2432 	return QDF_STATUS_SUCCESS;
2433 }
2434 
2435 #ifdef QCA_ENHANCED_STATS_SUPPORT
2436 void
2437 dp_rx_mon_populate_ppdu_usr_info_2_0(struct mon_rx_user_status *rx_user_status,
2438 				     struct cdp_rx_stats_ppdu_user *ppdu_user)
2439 {
2440 	ppdu_user->mpdu_retries = rx_user_status->retry_mpdu;
2441 }
2442 
2443 #ifdef WLAN_FEATURE_11BE
2444 void dp_rx_mon_stats_update_2_0(struct dp_mon_peer *mon_peer,
2445 				struct cdp_rx_indication_ppdu *ppdu,
2446 				struct cdp_rx_stats_ppdu_user *ppdu_user)
2447 {
2448 	uint8_t mcs, preamble, ppdu_type, punc_mode, res_mcs;
2449 	uint32_t num_msdu;
2450 
2451 	preamble = ppdu->u.preamble;
2452 	ppdu_type = ppdu->u.ppdu_type;
2453 	num_msdu = ppdu_user->num_msdu;
2454 	punc_mode = ppdu->punc_bw;
2455 
2456 	if (ppdu_type == HAL_RX_TYPE_SU)
2457 		mcs = ppdu->u.mcs;
2458 	else
2459 		mcs = ppdu_user->mcs;
2460 
2461 	DP_STATS_INC(mon_peer, rx.mpdu_retry_cnt, ppdu_user->mpdu_retries);
2462 	DP_STATS_INC(mon_peer, rx.punc_bw[punc_mode], num_msdu);
2463 
2464 	if (preamble == DOT11_BE) {
2465 		res_mcs = (mcs < MAX_MCS_11BE) ? mcs : (MAX_MCS - 1);
2466 
2467 		DP_STATS_INC(mon_peer,
2468 			     rx.pkt_type[preamble].mcs_count[res_mcs], num_msdu);
2469 		DP_STATS_INCC(mon_peer,
2470 			      rx.su_be_ppdu_cnt.mcs_count[res_mcs], 1,
2471 			      (ppdu_type == HAL_RX_TYPE_SU));
2472 		DP_STATS_INCC(mon_peer,
2473 			      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[res_mcs],
2474 			      1, (ppdu_type == HAL_RX_TYPE_MU_OFDMA));
2475 		DP_STATS_INCC(mon_peer,
2476 			      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[res_mcs],
2477 			      1, (ppdu_type == HAL_RX_TYPE_MU_MIMO));
2478 	}
2479 }
2480 
2481 void
2482 dp_rx_mon_populate_ppdu_info_2_0(struct hal_rx_ppdu_info *hal_ppdu_info,
2483 				 struct cdp_rx_indication_ppdu *ppdu)
2484 {
2485 	uint16_t puncture_pattern;
2486 	enum cdp_punctured_modes punc_mode;
2487 
2488 	/* Align bw value as per host data structures */
2489 	if (hal_ppdu_info->rx_status.bw == HAL_FULL_RX_BW_320)
2490 		ppdu->u.bw = CMN_BW_320MHZ;
2491 	else
2492 		ppdu->u.bw = hal_ppdu_info->rx_status.bw;
2493 	if (hal_ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11BE) {
2494 		/* Align preamble value as per host data structures */
2495 		ppdu->u.preamble = DOT11_BE;
2496 		ppdu->u.stbc = hal_ppdu_info->rx_status.is_stbc;
2497 		ppdu->u.dcm = hal_ppdu_info->rx_status.dcm;
2498 	} else {
2499 		ppdu->u.preamble = hal_ppdu_info->rx_status.preamble_type;
2500 	}
2501 
2502 	puncture_pattern = hal_ppdu_info->rx_status.punctured_pattern;
2503 	punc_mode = dp_mon_get_puncture_type(puncture_pattern,
2504 					     ppdu->u.bw);
2505 	ppdu->punc_bw = punc_mode;
2506 }
2507 #else
2508 void dp_rx_mon_stats_update_2_0(struct dp_mon_peer *mon_peer,
2509 				struct cdp_rx_indication_ppdu *ppdu,
2510 				struct cdp_rx_stats_ppdu_user *ppdu_user)
2511 {
2512 	DP_STATS_INC(mon_peer, rx.mpdu_retry_cnt, ppdu_user->mpdu_retries);
2513 }
2514 
2515 void
2516 dp_rx_mon_populate_ppdu_info_2_0(struct hal_rx_ppdu_info *hal_ppdu_info,
2517 				 struct cdp_rx_indication_ppdu *ppdu)
2518 {
2519 	ppdu->punc_bw = NO_PUNCTURE;
2520 }
2521 #endif
2522 void dp_mon_rx_print_advanced_stats_2_0(struct dp_soc *soc,
2523 					struct dp_pdev *pdev)
2524 {
2525 	struct cdp_pdev_mon_stats *rx_mon_stats;
2526 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2527 	struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc;
2528 	struct dp_mon_pdev_be *mon_pdev_be =
2529 				dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
2530 
2531 	rx_mon_stats = &mon_pdev->rx_mon_stats;
2532 
2533 	DP_PRINT_STATS("total_ppdu_info_alloc = %d",
2534 		       rx_mon_stats->total_ppdu_info_alloc);
2535 	DP_PRINT_STATS("total_ppdu_info_free = %d",
2536 		       rx_mon_stats->total_ppdu_info_free);
2537 	DP_PRINT_STATS("total_ppdu_info_enq = %d",
2538 		       rx_mon_stats->total_ppdu_info_enq);
2539 	DP_PRINT_STATS("total_ppdu_info_drop = %d",
2540 		       rx_mon_stats->total_ppdu_info_drop);
2541 	DP_PRINT_STATS("rx_hdr_not_received = %d",
2542 		       rx_mon_stats->rx_hdr_not_received);
2543 	DP_PRINT_STATS("parent_buf_alloc = %d",
2544 		       rx_mon_stats->parent_buf_alloc);
2545 	DP_PRINT_STATS("parent_buf_free = %d",
2546 		       rx_mon_stats->parent_buf_free);
2547 	DP_PRINT_STATS("mpdus_buf_to_stack = %d",
2548 		       rx_mon_stats->mpdus_buf_to_stack);
2549 	DP_PRINT_STATS("frag_alloc = %d",
2550 		       mon_soc->stats.frag_alloc);
2551 	DP_PRINT_STATS("total frag_free = %d",
2552 		       mon_soc->stats.frag_free);
2553 	DP_PRINT_STATS("frag_free due to empty queue= %d",
2554 		       mon_soc->stats.empty_queue);
2555 	DP_PRINT_STATS("status_buf_count = %d",
2556 		       rx_mon_stats->status_buf_count);
2557 	DP_PRINT_STATS("pkt_buf_count = %d",
2558 		       rx_mon_stats->pkt_buf_count);
2559 	DP_PRINT_STATS("rx_mon_queue_depth= %d",
2560 		       mon_pdev_be->rx_mon_queue_depth);
2561 	DP_PRINT_STATS("empty_desc= %d",
2562 		       mon_pdev->rx_mon_stats.empty_desc_ppdu);
2563 	DP_PRINT_STATS("mpdu_dropped_due_invalid_decap= %d",
2564 		       mon_pdev->rx_mon_stats.mpdu_decap_type_invalid);
2565 	DP_PRINT_STATS("total_free_elem= %d",
2566 		       mon_pdev_be->total_free_elem);
2567 	DP_PRINT_STATS("ppdu_drop_cnt= %d",
2568 		       mon_pdev->rx_mon_stats.ppdu_drop_cnt);
2569 	DP_PRINT_STATS("mpdu_drop_cnt= %d",
2570 		       mon_pdev->rx_mon_stats.mpdu_drop_cnt);
2571 	DP_PRINT_STATS("end_of_ppdu_drop_cnt= %d",
2572 		       mon_pdev->rx_mon_stats.end_of_ppdu_drop_cnt);
2573 	DP_PRINT_STATS("tlv_drop_cnt= %d",
2574 		       mon_pdev->rx_mon_stats.tlv_drop_cnt);
2575 	DP_PRINT_STATS("rx_hdr_invalid_cnt = %d",
2576 		       rx_mon_stats->rx_hdr_invalid_cnt);
2577 	DP_PRINT_STATS("invalid_dma_length Received = %d",
2578 		       rx_mon_stats->invalid_dma_length);
2579 }
2580 #endif
2581 
2582 #ifdef WLAN_PKT_CAPTURE_RX_2_0
2583 void
2584 dp_rx_mon_buf_desc_pool_deinit(struct dp_soc *soc)
2585 {
2586 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
2587 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
2588 
2589 	/* Drain page frag cachce before pool deinit */
2590 	qdf_frag_cache_drain(&mon_soc_be->rx_desc_mon.pf_cache);
2591 	dp_mon_desc_pool_deinit(&mon_soc_be->rx_desc_mon);
2592 }
2593 
2594 QDF_STATUS
2595 dp_rx_mon_buf_desc_pool_init(struct dp_soc *soc)
2596 {
2597 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
2598 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
2599 	uint32_t num_entries;
2600 
2601 	num_entries =
2602 		wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc->wlan_cfg_ctx);
2603 	return dp_mon_desc_pool_init(&mon_soc_be->rx_desc_mon, num_entries);
2604 }
2605 
2606 void dp_rx_mon_buf_desc_pool_free(struct dp_soc *soc)
2607 {
2608 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
2609 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
2610 
2611 	if (mon_soc)
2612 		dp_mon_desc_pool_free(soc, &mon_soc_be->rx_desc_mon,
2613 				      DP_MON_RX_DESC_POOL_TYPE);
2614 }
2615 
2616 void dp_rx_mon_soc_detach_2_0(struct dp_soc *soc, int lmac_id)
2617 {
2618 	dp_rx_mon_buf_desc_pool_free(soc);
2619 	dp_srng_free(soc, &soc->rxdma_mon_buf_ring[lmac_id]);
2620 }
2621 
2622 void dp_rx_mon_soc_deinit_2_0(struct dp_soc *soc, uint32_t lmac_id)
2623 {
2624 	dp_rx_mon_buffers_free(soc);
2625 	dp_rx_mon_buf_desc_pool_deinit(soc);
2626 	dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[lmac_id],
2627 			RXDMA_MONITOR_BUF, 0);
2628 }
2629 
2630 QDF_STATUS
2631 dp_rx_mon_buf_desc_pool_alloc(struct dp_soc *soc)
2632 {
2633 	struct dp_srng *mon_buf_ring;
2634 	struct dp_mon_desc_pool *rx_mon_desc_pool;
2635 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
2636 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
2637 	int entries;
2638 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2639 
2640 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2641 
2642 	entries = wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc_cfg_ctx);
2643 	mon_buf_ring = &soc->rxdma_mon_buf_ring[0];
2644 
2645 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
2646 
2647 	qdf_print("%s:%d rx mon buf desc pool entries: %d", __func__, __LINE__, entries);
2648 	return dp_mon_desc_pool_alloc(soc, DP_MON_RX_DESC_POOL_TYPE,
2649 				      entries, rx_mon_desc_pool);
2650 }
2651 
2652 void
2653 dp_rx_mon_buffers_free(struct dp_soc *soc)
2654 {
2655 	struct dp_mon_desc_pool *rx_mon_desc_pool;
2656 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
2657 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
2658 
2659 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
2660 
2661 	dp_mon_pool_frag_unmap_and_free(soc, rx_mon_desc_pool);
2662 }
2663 
2664 QDF_STATUS
2665 dp_rx_mon_buffers_alloc(struct dp_soc *soc, uint32_t size)
2666 {
2667 	struct dp_srng *mon_buf_ring;
2668 	struct dp_mon_desc_pool *rx_mon_desc_pool;
2669 	union dp_mon_desc_list_elem_t *desc_list = NULL;
2670 	union dp_mon_desc_list_elem_t *tail = NULL;
2671 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
2672 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
2673 
2674 	mon_buf_ring = &soc->rxdma_mon_buf_ring[0];
2675 
2676 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
2677 
2678 	return dp_mon_buffers_replenish(soc, mon_buf_ring,
2679 					rx_mon_desc_pool,
2680 					size,
2681 					&desc_list, &tail, NULL);
2682 }
2683 
2684 QDF_STATUS dp_rx_mon_soc_init_2_0(struct dp_soc *soc)
2685 {
2686 	if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[0],
2687 			 RXDMA_MONITOR_BUF, 0, 0)) {
2688 		dp_mon_err("%pK: " RNG_ERR "rx_mon_buf_ring", soc);
2689 		goto fail;
2690 	}
2691 
2692 	if (dp_rx_mon_buf_desc_pool_init(soc)) {
2693 		dp_mon_err("%pK: " RNG_ERR "rx mon desc pool init", soc);
2694 		goto fail;
2695 	}
2696 
2697 	/* monitor buffers for src */
2698 	if (dp_rx_mon_buffers_alloc(soc, DP_MON_RING_FILL_LEVEL_DEFAULT)) {
2699 		dp_mon_err("%pK: Rx mon buffers allocation failed", soc);
2700 		goto fail;
2701 	}
2702 
2703 	return QDF_STATUS_SUCCESS;
2704 fail:
2705 	return QDF_STATUS_E_FAILURE;
2706 }
2707 
2708 QDF_STATUS dp_rx_mon_pdev_htt_srng_setup_2_0(struct dp_soc *soc,
2709 					    struct dp_pdev *pdev,
2710 					    int mac_id,
2711 					    int mac_for_pdev)
2712 {
2713 	return htt_srng_setup(soc->htt_handle, mac_for_pdev,
2714 			      soc->rxdma_mon_dst_ring[mac_id].hal_srng,
2715 			      RXDMA_MONITOR_DST);
2716 }
2717 
2718 QDF_STATUS dp_rx_mon_soc_htt_srng_setup_2_0(struct dp_soc *soc,
2719 					    int mac_id)
2720 {
2721 	hal_set_low_threshold(soc->rxdma_mon_buf_ring[0].hal_srng,
2722 			      MON_BUF_MIN_ENTRIES << 2);
2723 	return htt_srng_setup(soc->htt_handle, 0,
2724 			soc->rxdma_mon_buf_ring[0].hal_srng,
2725 			RXDMA_MONITOR_BUF);
2726 }
2727 
2728 QDF_STATUS dp_rx_mon_pdev_rings_alloc_2_0(struct dp_pdev *pdev, int lmac_id)
2729 {
2730 	struct dp_soc *soc = pdev->soc;
2731 	int entries;
2732 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2733 
2734 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2735 	entries = wlan_cfg_get_dma_rx_mon_dest_ring_size(pdev_cfg_ctx);
2736 
2737 	return dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id],
2738 				  RXDMA_MONITOR_DST, entries, 0);
2739 }
2740 
2741 void dp_rx_mon_pdev_rings_free_2_0(struct dp_pdev *pdev, int lmac_id)
2742 {
2743 	struct dp_soc *soc = pdev->soc;
2744 
2745 	dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]);
2746 }
2747 
2748 QDF_STATUS dp_rx_mon_pdev_rings_init_2_0(struct dp_pdev *pdev, int lmac_id)
2749 {
2750 	struct dp_soc *soc = pdev->soc;
2751 
2752 	return dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id],
2753 				 RXDMA_MONITOR_DST, pdev->pdev_id, lmac_id);
2754 }
2755 
2756 void dp_rx_mon_pdev_rings_deinit_2_0(struct dp_pdev *pdev, int lmac_id)
2757 {
2758 	struct dp_soc *soc = pdev->soc;
2759 
2760 	dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id],
2761 		       RXDMA_MONITOR_DST, pdev->pdev_id);
2762 }
2763 
2764 QDF_STATUS dp_rx_mon_soc_attach_2_0(struct dp_soc *soc, int lmac_id)
2765 {
2766 	int entries;
2767 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
2768 
2769 	entries = wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc_cfg_ctx);
2770 	qdf_print("%s:%d rx mon buf entries: %d", __func__, __LINE__, entries);
2771 
2772 	return dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[lmac_id],
2773 			  RXDMA_MONITOR_BUF, entries, 0);
2774 }
2775 
2776 #endif /* WLAN_PKT_CAPTURE_RX_2_0 */
2777