xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/2.0/dp_rx_mon_2.0.c (revision 737b028eeab9d1c8c0971fb81ffcb33313bb90f0)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "qdf_types.h"
19 #include "hal_be_hw_headers.h"
20 #include "dp_types.h"
21 #include "hal_be_rx.h"
22 #include "hal_api.h"
23 #include "qdf_trace.h"
24 #include "hal_be_api_mon.h"
25 #include "dp_internal.h"
26 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
27 #include <qdf_flex_mem.h>
28 #include "qdf_nbuf_frag.h"
29 #include "dp_mon.h"
30 #include <dp_rx_mon.h>
31 #include <dp_mon_2.0.h>
32 #include <dp_rx_mon.h>
33 #include <dp_rx_mon_2.0.h>
34 #include <dp_rx.h>
35 #include <dp_be.h>
36 #include <hal_be_api_mon.h>
37 #ifdef QCA_SUPPORT_LITE_MONITOR
38 #include "dp_lite_mon.h"
39 #endif
40 
41 #define F_MASK 0xFFFF
42 #define TEST_MASK 0xCBF
43 
44 #ifdef MONITOR_TLV_RECORDING_ENABLE
45 /**
46  * dp_mon_record_index_update() - update the indexes of dp_mon_tlv_logger
47  *                                 to store next tlv
48  *
49  * @mon_pdev_be: pointer to dp_mon_pdev_be
50  *
51  * Return
52  */
53 void
54 dp_mon_record_index_update(struct dp_mon_pdev_be *mon_pdev_be)
55 {
56 	struct dp_mon_tlv_logger *tlv_log = NULL;
57 	struct dp_mon_tlv_info *tlv_info = NULL;
58 
59 	if (!mon_pdev_be || !(mon_pdev_be->rx_tlv_log))
60 		return;
61 
62 	tlv_log = mon_pdev_be->rx_tlv_log;
63 	if (!tlv_log->tlv_logging_enable || !(tlv_log->buff))
64 		return;
65 
66 	tlv_info = (struct dp_mon_tlv_info *)tlv_log->buff;
67 
68 	(tlv_log->curr_ppdu_pos + 1 == MAX_NUM_PPDU_RECORD) ?
69 		tlv_log->curr_ppdu_pos = 0 :
70 			tlv_log->curr_ppdu_pos++;
71 
72 	tlv_log->wrap_flag = 0;
73 	tlv_log->ppdu_start_idx = tlv_log->curr_ppdu_pos *
74 		MAX_TLVS_PER_PPDU;
75 	tlv_log->mpdu_idx = tlv_log->ppdu_start_idx +
76 		MAX_PPDU_START_TLV_NUM;
77 	tlv_log->ppdu_end_idx = tlv_log->mpdu_idx + MAX_MPDU_TLV_NUM;
78 	tlv_log->max_ppdu_start_idx = tlv_log->ppdu_start_idx +
79 		MAX_PPDU_START_TLV_NUM - 1;
80 	tlv_log->max_mpdu_idx = tlv_log->mpdu_idx +
81 		MAX_MPDU_TLV_NUM - 1;
82 	tlv_log->max_ppdu_end_idx = tlv_log->ppdu_end_idx +
83 		MAX_PPDU_END_TLV_NUM - 1;
84 }
85 
86 /**
87  * dp_mon_record_tlv() - Store the contents of the tlv in buffer
88  *
89  * @mon_pdev_be: pointe to dp_mon_pdev_be
90  * @ppdu_info: struct hal_rx_ppdu_info
91  *
92  * Return
93  */
94 void
95 dp_mon_record_tlv(struct dp_mon_pdev_be *mon_pdev_be,
96 		  struct hal_rx_ppdu_info *ppdu_info)
97 {
98 	struct dp_mon_tlv_logger *tlv_log = NULL;
99 	struct dp_mon_tlv_info *tlv_info = NULL;
100 	uint32_t tlv_tag;
101 	uint16_t *ppdu_start_idx = NULL;
102 	uint16_t *mpdu_idx = NULL;
103 	uint16_t *ppdu_end_idx = NULL;
104 
105 	if (!mon_pdev_be || !(mon_pdev_be->rx_tlv_log))
106 		return;
107 
108 	tlv_log = mon_pdev_be->rx_tlv_log;
109 	if (!tlv_log->tlv_logging_enable || !(tlv_log->buff))
110 		return;
111 
112 	tlv_info = (struct dp_mon_tlv_info *)tlv_log->buff;
113 	ppdu_start_idx = &tlv_log->ppdu_start_idx;
114 	mpdu_idx = &tlv_log->mpdu_idx;
115 	ppdu_end_idx = &tlv_log->ppdu_end_idx;
116 
117 	tlv_tag = ppdu_info->rx_tlv_info.tlv_tag;
118 	if (ppdu_info->rx_tlv_info.tlv_category == CATEGORY_PPDU_START) {
119 		tlv_info[*ppdu_start_idx].tlv_tag = tlv_tag;
120 		switch (tlv_tag) {
121 		case WIFIRX_PPDU_START_E:
122 			tlv_info[*ppdu_start_idx].
123 				data.ppdu_start.ppdu_id =
124 					ppdu_info->com_info.ppdu_id;
125 			break;
126 		case WIFIRX_PPDU_START_USER_INFO_E:
127 			tlv_info[*ppdu_start_idx].
128 				data.ppdu_start_user_info.user_id =
129 					ppdu_info->user_id;
130 			tlv_info[*ppdu_start_idx].
131 				data.ppdu_start_user_info.rate_mcs =
132 					ppdu_info->rx_status.mcs;
133 			tlv_info[*ppdu_start_idx].
134 				data.ppdu_start_user_info.nss =
135 					ppdu_info->rx_status.nss;
136 			tlv_info[*ppdu_start_idx].
137 				data.ppdu_start_user_info.reception_type =
138 					ppdu_info->rx_status.reception_type;
139 			tlv_info[*ppdu_start_idx].
140 				data.ppdu_start_user_info.sgi =
141 					ppdu_info->rx_status.sgi;
142 			break;
143 		}
144 		if (*ppdu_start_idx < tlv_log->max_ppdu_start_idx)
145 			(*ppdu_start_idx)++;
146 	} else if (ppdu_info->rx_tlv_info.tlv_category == CATEGORY_MPDU) {
147 		tlv_info[*mpdu_idx].tlv_tag = tlv_tag;
148 		switch (tlv_tag) {
149 		case WIFIRX_MPDU_START_E:
150 			tlv_info[*mpdu_idx].
151 				data.mpdu_start.user_id =
152 					ppdu_info->user_id;
153 			tlv_info[*mpdu_idx].
154 				data.mpdu_start.wrap_flag =
155 					tlv_log->wrap_flag;
156 			break;
157 		case WIFIRX_MPDU_END_E:
158 			tlv_info[*mpdu_idx].
159 				data.mpdu_end.user_id =
160 					ppdu_info->user_id;
161 			tlv_info[*mpdu_idx].
162 				data.mpdu_end.fcs_err =
163 					ppdu_info->fcs_err;
164 			tlv_info[*mpdu_idx].
165 				data.mpdu_end.wrap_flag =
166 					tlv_log->wrap_flag;
167 			break;
168 		case WIFIRX_HEADER_E:
169 			tlv_info[*mpdu_idx].
170 				data.header.wrap_flag =
171 					tlv_log->wrap_flag;
172 			break;
173 		case WIFIRX_MSDU_END_E:
174 			tlv_info[*mpdu_idx].
175 				data.msdu_end.user_id =
176 					ppdu_info->user_id;
177 			tlv_info[*mpdu_idx].
178 				data.msdu_end.wrap_flag =
179 					tlv_log->wrap_flag;
180 			break;
181 		case WIFIMON_BUFFER_ADDR_E:
182 			tlv_info[*mpdu_idx].
183 				data.mon_buffer_addr.dma_length =
184 					ppdu_info->packet_info.dma_length;
185 			tlv_info[*mpdu_idx].
186 				data.mon_buffer_addr.truncation =
187 					ppdu_info->packet_info.truncated;
188 			tlv_info[*mpdu_idx].
189 				data.mon_buffer_addr.continuation =
190 					ppdu_info->packet_info.msdu_continuation;
191 			tlv_info[*mpdu_idx].
192 				data.mon_buffer_addr.wrap_flag =
193 					tlv_log->wrap_flag;
194 			break;
195 		}
196 		if (*mpdu_idx < tlv_log->max_mpdu_idx) {
197 			(*mpdu_idx)++;
198 		} else {
199 			*mpdu_idx = *mpdu_idx - MAX_MPDU_TLV_NUM + 1;
200 			tlv_log->wrap_flag ^= 1;
201 		}
202 	} else if (ppdu_info->rx_tlv_info.tlv_category == CATEGORY_PPDU_END) {
203 		tlv_info[*ppdu_end_idx].tlv_tag = tlv_tag;
204 		switch (tlv_tag) {
205 		case WIFIRX_USER_PPDU_END_E:
206 			break;
207 		case WIFIRX_PPDU_END_E:
208 			break;
209 		case WIFIPHYRX_RSSI_LEGACY_E:
210 			break;
211 		case WIFIPHYRX_L_SIG_B_E:
212 			break;
213 		case WIFIPHYRX_COMMON_USER_INFO_E:
214 			break;
215 		case WIFIPHYRX_DATA_DONE_E:
216 			break;
217 		case WIFIPHYRX_PKT_END_PART1_E:
218 			break;
219 		case WIFIPHYRX_PKT_END_E:
220 			break;
221 		case WIFIRXPCU_PPDU_END_INFO_E:
222 			break;
223 		case WIFIRX_PPDU_END_USER_STATS_E:
224 			break;
225 		case WIFIRX_PPDU_END_STATUS_DONE_E:
226 			break;
227 		}
228 		if (*ppdu_end_idx < tlv_log->max_ppdu_end_idx)
229 			(*ppdu_end_idx)++;
230 	}
231 }
232 
233 /**
234  * dp_mon_record_clear_buffer() - Clear the buffer to record next PPDU
235  *
236  * @mon_pdev_be: pointer to dp_mon_pdev_be
237  *
238  * Return
239  */
240 void
241 dp_mon_record_clear_buffer(struct dp_mon_pdev_be *mon_pdev_be)
242 {
243 	struct dp_mon_tlv_logger *tlv_log = NULL;
244 	struct dp_mon_tlv_info *tlv_info = NULL;
245 
246 	if (!mon_pdev_be || !(mon_pdev_be->rx_tlv_log))
247 		return;
248 
249 	tlv_log = mon_pdev_be->rx_tlv_log;
250 	if (!tlv_log->tlv_logging_enable || !(tlv_log->buff))
251 		return;
252 
253 	tlv_info = (struct dp_mon_tlv_info *)tlv_log->buff;
254 	qdf_mem_zero(&tlv_info[tlv_log->ppdu_start_idx],
255 		     MAX_TLVS_PER_PPDU * sizeof(struct dp_mon_tlv_info));
256 }
257 #else
258 void
259 dp_mon_record_index_update(struct dp_mon_pdev_be *mon_pdev_be)
260 {
261 }
262 
263 void
264 dp_mon_record_tlv(struct dp_mon_pdev_be *mon_pdev_be,
265 		  struct hal_rx_ppdu_info *ppdu_info)
266 {
267 }
268 
269 void
270 dp_mon_record_clear_buffer(struct dp_mon_pdev_be *mon_pdev_be)
271 {
272 }
273 
274 #endif
275 
276 /**
277  * dp_rx_mon_update_drop_cnt() - Update drop statistics
278  *
279  * @mon_pdev: monitor pdev
280  * @hal_mon_rx_desc: HAL monitor desc
281  *
282  * Return: void
283  */
284 static inline void
285 dp_rx_mon_update_drop_cnt(struct dp_mon_pdev *mon_pdev,
286 			  struct hal_mon_desc *hal_mon_rx_desc)
287 {
288 	mon_pdev->rx_mon_stats.empty_desc_ppdu++;
289 	mon_pdev->rx_mon_stats.ppdu_drop_cnt +=
290 		hal_mon_rx_desc->ppdu_drop_count;
291 	mon_pdev->rx_mon_stats.mpdu_drop_cnt +=
292 		hal_mon_rx_desc->mpdu_drop_count;
293 	if (hal_mon_rx_desc->end_of_ppdu_dropped)
294 		mon_pdev->rx_mon_stats.end_of_ppdu_drop_cnt++;
295 	mon_pdev->rx_mon_stats.tlv_drop_cnt +=
296 		hal_mon_rx_desc->tlv_drop_count;
297 }
298 
299 #ifdef QCA_MONITOR_2_0_PKT_SUPPORT
300 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\
301 	defined(WLAN_SUPPORT_RX_TAG_STATISTICS)
302 /** dp_mon_rx_update_rx_protocol_tag_stats() - Update mon protocols's
303  *					      statistics
304  * @pdev: pdev handle
305  * @protocol_index: Protocol index for which the stats should be incremented
306  * @ring_index: REO ring number from which this tag was received.
307  *
308  * Return: void
309  */
310 static void dp_mon_rx_update_rx_protocol_tag_stats(struct dp_pdev *pdev,
311 						   uint16_t protocol_index)
312 {
313 	pdev->mon_proto_tag_stats[protocol_index].tag_ctr++;
314 }
315 
316 #ifdef QCA_TEST_MON_PF_TAGS_STATS
317 
318 static
319 void dp_rx_mon_print_tag_buf(uint8_t *buf, uint16_t room)
320 {
321 	print_hex_dump(KERN_ERR, "TLV BUFFER: ", DUMP_PREFIX_NONE,
322 		       32, 2, buf, room, false);
323 }
324 
325 #else
326 static
327 void dp_rx_mon_print_tag_buf(uint8_t *buf, uint16_t room)
328 {
329 }
330 #endif
331 
332 void
333 dp_rx_mon_shift_pf_tag_in_headroom(qdf_nbuf_t nbuf, struct dp_soc *soc,
334 				   struct hal_rx_ppdu_info *ppdu_info)
335 {
336 	uint32_t room = 0;
337 	uint16_t msdu_count = 0;
338 	uint16_t *dp = NULL;
339 	uint16_t *hp = NULL;
340 	uint16_t tlv_data_len, total_tlv_len;
341 	uint32_t bytes = 0;
342 
343 	if (qdf_unlikely(!soc)) {
344 		dp_mon_err("Soc[%pK] Null. Can't update pftag to nbuf headroom",
345 			   soc);
346 		return;
347 	}
348 
349 	if (!wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx))
350 		return;
351 
352 	if (qdf_unlikely(!nbuf))
353 		return;
354 
355 	/* Headroom must be have enough space for tlv to be added*/
356 	if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < DP_RX_MON_TLV_ROOM)) {
357 		dp_mon_err("Headroom[%d] < DP_RX_MON_TLV_ROOM[%d]",
358 			   qdf_nbuf_headroom(nbuf), DP_RX_MON_TLV_ROOM);
359 		return;
360 	}
361 
362 	hp = (uint16_t *)qdf_nbuf_head(nbuf);
363 	msdu_count = *hp;
364 
365 	if (qdf_unlikely(!msdu_count))
366 		return;
367 
368 	dp_mon_debug("msdu_count: %d", msdu_count);
369 
370 	room = DP_RX_MON_PF_TAG_LEN_PER_FRAG * msdu_count;
371 	tlv_data_len = DP_RX_MON_TLV_MSDU_CNT + (room);
372 	total_tlv_len = DP_RX_MON_TLV_HDR_LEN + tlv_data_len;
373 
374 	//1. store space for MARKER
375 	dp = (uint16_t *)qdf_nbuf_push_head(nbuf, sizeof(uint16_t));
376 	if (qdf_likely(dp)) {
377 		*(uint16_t *)dp = DP_RX_MON_TLV_HDR_MARKER;
378 		bytes += sizeof(uint16_t);
379 	}
380 
381 	//2. store space for total size
382 	dp = (uint16_t *)qdf_nbuf_push_head(nbuf, sizeof(uint16_t));
383 	if (qdf_likely(dp)) {
384 		*(uint16_t *)dp = total_tlv_len;
385 		bytes += sizeof(uint16_t);
386 	}
387 
388 	//create TLV
389 	bytes += dp_mon_rx_add_tlv(DP_RX_MON_TLV_PF_ID, tlv_data_len, hp, nbuf);
390 
391 	dp_rx_mon_print_tag_buf(qdf_nbuf_data(nbuf), total_tlv_len);
392 
393 	qdf_nbuf_pull_head(nbuf, bytes);
394 
395 }
396 
397 void
398 dp_rx_mon_pf_tag_to_buf_headroom_2_0(void *nbuf,
399 				     struct hal_rx_ppdu_info *ppdu_info,
400 				     struct dp_pdev *pdev, struct dp_soc *soc)
401 {
402 	uint8_t *nbuf_head = NULL;
403 	uint8_t user_id;
404 	struct hal_rx_mon_msdu_info *msdu_info;
405 	uint16_t flow_id;
406 	uint16_t cce_metadata;
407 	uint16_t protocol_tag = 0;
408 	uint32_t flow_tag;
409 	uint8_t invalid_cce = 0, invalid_fse = 0;
410 
411 	if (qdf_unlikely(!soc)) {
412 		dp_mon_err("Soc[%pK] Null. Can't update pftag to nbuf headroom",
413 			   soc);
414 		return;
415 	}
416 
417 	if (!wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx))
418 		return;
419 
420 	if (qdf_unlikely(!nbuf))
421 		return;
422 
423 	/* Headroom must be have enough space for tlv to be added*/
424 	if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < DP_RX_MON_TLV_ROOM)) {
425 		dp_mon_err("Headroom[%d] < DP_RX_MON_TLV_ROOM[%d]",
426 			   qdf_nbuf_headroom(nbuf), DP_RX_MON_TLV_ROOM);
427 		return;
428 	}
429 
430 	user_id = ppdu_info->user_id;
431 	if (qdf_unlikely(user_id >= HAL_MAX_UL_MU_USERS)) {
432 		dp_mon_debug("Invalid user_id user_id: %d pdev: %pK", user_id, pdev);
433 		return;
434 	}
435 
436 	msdu_info = &ppdu_info->msdu[user_id];
437 	flow_id = ppdu_info->rx_msdu_info[user_id].flow_idx;
438 	cce_metadata = ppdu_info->rx_msdu_info[user_id].cce_metadata -
439 		       RX_PROTOCOL_TAG_START_OFFSET;
440 
441 	flow_tag = ppdu_info->rx_msdu_info[user_id].fse_metadata & F_MASK;
442 
443 	if (qdf_unlikely((cce_metadata > RX_PROTOCOL_TAG_MAX - 1) ||
444 			 (cce_metadata > 0 && cce_metadata < 4))) {
445 		dp_mon_debug("Invalid user_id cce_metadata: %d pdev: %pK", cce_metadata, pdev);
446 		invalid_cce = 1;
447 		protocol_tag = cce_metadata;
448 	} else {
449 		protocol_tag = pdev->rx_proto_tag_map[cce_metadata].tag;
450 		dp_mon_rx_update_rx_protocol_tag_stats(pdev, cce_metadata);
451 	}
452 
453 	if (flow_tag > 0) {
454 		dp_mon_rx_update_rx_flow_tag_stats(pdev, flow_id);
455 	} else {
456 		dp_mon_debug("Invalid flow_tag: %d pdev: %pK ", flow_tag, pdev);
457 		invalid_fse = 1;
458 	}
459 
460 	if (invalid_cce && invalid_fse)
461 		return;
462 
463 	if (msdu_info->msdu_index >= DP_RX_MON_MAX_MSDU) {
464 		dp_mon_err("msdu_index causes overflow in headroom");
465 		return;
466 	}
467 
468 	dp_mon_debug("protocol_tag: %d, cce_metadata: %d, flow_tag: %d",
469 		     protocol_tag, cce_metadata, flow_tag);
470 
471 	dp_mon_debug("msdu_index: %d", msdu_info->msdu_index);
472 
473 
474 	nbuf_head = qdf_nbuf_head(nbuf);
475 
476 	*((uint16_t *)nbuf_head) = msdu_info->msdu_index + 1;
477 	nbuf_head += DP_RX_MON_TLV_MSDU_CNT;
478 
479 	nbuf_head += ((msdu_info->msdu_index) * DP_RX_MON_PF_TAG_SIZE);
480 	if (!invalid_cce)
481 		*((uint16_t *)nbuf_head) = protocol_tag;
482 	nbuf_head += sizeof(uint16_t);
483 	if (!invalid_fse)
484 		*((uint16_t *)nbuf_head) = flow_tag;
485 }
486 
487 #else
488 static inline void
489 dp_mon_rx_update_rx_protocol_tag_stats(struct dp_pdev *pdev,
490 				       uint16_t protocol_index)
491 {
492 }
493 
494 static inline
495 void dp_rx_mon_shift_pf_tag_in_headroom(qdf_nbuf_t nbuf, struct dp_soc *soc,
496 					struct hal_rx_ppdu_info *ppdu_info)
497 {
498 }
499 
500 static inline
501 void dp_rx_mon_pf_tag_to_buf_headroom_2_0(void *nbuf,
502 					  struct hal_rx_ppdu_info *ppdu_info,
503 					  struct dp_pdev *pdev,
504 					  struct dp_soc *soc)
505 {
506 }
507 
508 #endif
509 
510 #ifdef QCA_KMEM_CACHE_SUPPORT
511 QDF_STATUS dp_rx_mon_ppdu_info_cache_create(struct dp_pdev *pdev)
512 {
513 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
514 	struct dp_mon_pdev_be *mon_pdev_be =
515 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
516 	uint16_t obj;
517 	struct hal_rx_ppdu_info *ppdu_info = NULL;
518 
519 	mon_pdev_be->ppdu_info_cache =
520 		qdf_kmem_cache_create("rx_mon_ppdu_info_cache",
521 				      sizeof(struct hal_rx_ppdu_info));
522 
523 	if (!mon_pdev_be->ppdu_info_cache) {
524 		dp_mon_err("cache creation failed pdev :%px", pdev);
525 		return QDF_STATUS_E_NOMEM;
526 	}
527 
528 	TAILQ_INIT(&mon_pdev_be->rx_mon_free_queue);
529 	for (obj = 0; obj < DP_RX_MON_WQ_THRESHOLD; obj++) {
530 		ppdu_info =  (struct hal_rx_ppdu_info *)qdf_kmem_cache_alloc(mon_pdev_be->ppdu_info_cache);
531 
532 		if (ppdu_info) {
533 			qdf_mem_zero(ppdu_info, sizeof(struct hal_rx_ppdu_info));
534 			TAILQ_INSERT_TAIL(&mon_pdev_be->rx_mon_free_queue,
535 					  ppdu_info,
536 					  ppdu_free_list_elem);
537 			mon_pdev_be->total_free_elem++;
538 		}
539 	}
540 	qdf_spinlock_create(&mon_pdev_be->ppdu_info_lock);
541 
542 	return QDF_STATUS_SUCCESS;
543 }
544 
545 void dp_rx_mon_ppdu_info_cache_destroy(struct dp_pdev *pdev)
546 {
547 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
548 	struct dp_mon_pdev_be *mon_pdev_be =
549 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
550 	struct hal_rx_ppdu_info *ppdu_info = NULL, *temp_ppdu_info = NULL;
551 
552 	qdf_spin_lock(&mon_pdev_be->ppdu_info_lock);
553 	TAILQ_FOREACH_SAFE(ppdu_info,
554 			   &mon_pdev_be->rx_mon_free_queue,
555 			   ppdu_free_list_elem,
556 			   temp_ppdu_info) {
557 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_free_queue,
558 			     ppdu_info, ppdu_free_list_elem);
559 		if (ppdu_info) {
560 			mon_pdev_be->total_free_elem--;
561 			qdf_kmem_cache_free(mon_pdev_be->ppdu_info_cache,
562 					    ppdu_info);
563 		}
564 	}
565 	qdf_spin_unlock(&mon_pdev_be->ppdu_info_lock);
566 	dp_mon_debug(" total free element: %d", mon_pdev_be->total_free_elem);
567 	qdf_kmem_cache_destroy(mon_pdev_be->ppdu_info_cache);
568 }
569 #endif
570 
571 static QDF_STATUS dp_rx_mon_init_wq_sm(struct dp_pdev *pdev)
572 {
573 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
574 	struct dp_mon_pdev_be *mon_pdev_be =
575 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
576 
577 	qdf_create_work(0, &mon_pdev_be->rx_mon_work,
578 			dp_rx_mon_process_ppdu, pdev);
579 	mon_pdev_be->rx_mon_workqueue =
580 		qdf_alloc_unbound_workqueue("rx_mon_work_queue");
581 
582 	if (!mon_pdev_be->rx_mon_workqueue) {
583 		dp_mon_err("failed to create rxmon wq mon_pdev: %pK", mon_pdev);
584 		goto fail;
585 	}
586 	TAILQ_INIT(&mon_pdev_be->rx_mon_queue);
587 
588 	qdf_spinlock_create(&mon_pdev_be->rx_mon_wq_lock);
589 	return QDF_STATUS_SUCCESS;
590 
591 fail:
592 	return QDF_STATUS_E_FAILURE;
593 }
594 
595 static QDF_STATUS dp_rx_mon_deinit_wq_sm(struct dp_pdev *pdev)
596 {
597 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
598 	struct dp_mon_pdev_be *mon_pdev_be =
599 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
600 
601 	if (!mon_pdev_be->rx_mon_workqueue)
602 		return QDF_STATUS_E_FAILURE;
603 
604 	qdf_err(" total free element: %d", mon_pdev_be->total_free_elem);
605 	qdf_flush_workqueue(0, mon_pdev_be->rx_mon_workqueue);
606 	qdf_destroy_workqueue(0, mon_pdev_be->rx_mon_workqueue);
607 	qdf_flush_work(&mon_pdev_be->rx_mon_work);
608 	qdf_disable_work(&mon_pdev_be->rx_mon_work);
609 	dp_rx_mon_drain_wq(pdev);
610 	mon_pdev_be->rx_mon_workqueue = NULL;
611 	qdf_spinlock_destroy(&mon_pdev_be->rx_mon_wq_lock);
612 
613 	return QDF_STATUS_SUCCESS;
614 }
615 
616 static
617 void dp_rx_mon_set_zero(qdf_nbuf_t nbuf)
618 {
619 	qdf_mem_zero(qdf_nbuf_head(nbuf), DP_RX_MON_TLV_ROOM);
620 }
621 
622 /**
623  * dp_rx_mon_nbuf_add_rx_frag() -  Add frag to SKB
624  *
625  * @nbuf: SKB to which frag is going to be added
626  * @frag: frag to be added to SKB
627  * @frag_len: frag length
628  * @offset: frag offset
629  * @buf_size: buffer size
630  * @frag_ref: take frag ref
631  *
632  * Return: QDF_STATUS
633  */
634 static inline QDF_STATUS
635 dp_rx_mon_nbuf_add_rx_frag(qdf_nbuf_t nbuf, qdf_frag_t *frag,
636 			   uint16_t frag_len, uint16_t offset,
637 			   uint16_t buf_size, bool frag_ref)
638 {
639 	uint8_t num_frags;
640 
641 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
642 	if (num_frags < QDF_NBUF_MAX_FRAGS) {
643 		qdf_nbuf_add_rx_frag(frag, nbuf,
644 				     offset,
645 				     frag_len,
646 				     buf_size,
647 				     frag_ref);
648 		return QDF_STATUS_SUCCESS;
649 	}
650 	return QDF_STATUS_E_FAILURE;
651 }
652 
653 /**
654  * dp_rx_mon_handle_mpdu_end() - Process MPDU_END TLV
655  *
656  * @ppdu_info: PPDU info
657  *
658  * Return: void
659  */
660 static inline void
661 dp_rx_mon_handle_mpdu_end(struct hal_rx_ppdu_info *ppdu_info)
662 {
663 	struct hal_rx_mon_mpdu_info *mpdu_info, *mpdu_meta;
664 	qdf_nbuf_t nbuf;
665 	uint8_t user_id = ppdu_info->user_id;
666 	uint8_t mpdu_idx = ppdu_info->mpdu_count[user_id];
667 
668 	mpdu_info = &ppdu_info->mpdu_info[user_id];
669 	if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
670 		/* reset mpdu info for next mpdu for same user */
671 		qdf_mem_zero(mpdu_info, sizeof(*mpdu_info));
672 		dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d",
673 			     __LINE__, user_id, mpdu_idx);
674 		return;
675 	}
676 	nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
677 	if (qdf_unlikely(!nbuf)) {
678 		dp_mon_debug("nbuf is NULL");
679 		return;
680 	}
681 	mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(nbuf);
682 	mpdu_meta->mpdu_length_err = mpdu_info->mpdu_length_err;
683 	mpdu_meta->fcs_err = mpdu_info->fcs_err;
684 	mpdu_meta->overflow_err = mpdu_info->overflow_err;
685 	mpdu_meta->decrypt_err = mpdu_info->decrypt_err;
686 	mpdu_meta->full_pkt = mpdu_info->full_pkt;
687 	mpdu_meta->truncated = mpdu_info->truncated;
688 
689 	/* reset mpdu info for next mpdu for same user */
690 	qdf_mem_zero(mpdu_info, sizeof(*mpdu_info));
691 	ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = false;
692 	ppdu_info->mpdu_count[user_id]++;
693 }
694 
695 /**
696  * dp_rx_mon_handle_mpdu_start() - Process MPDU_START TLV
697  *
698  * @ppdu_info: PPDU info
699  *
700  * Return: void
701  */
702 static inline void
703 dp_rx_mon_handle_mpdu_start(struct hal_rx_ppdu_info *ppdu_info)
704 {
705 	struct hal_rx_mon_mpdu_info *mpdu_info, *mpdu_meta;
706 	qdf_nbuf_t nbuf;
707 	uint8_t user_id = ppdu_info->user_id;
708 	uint8_t mpdu_idx = ppdu_info->mpdu_count[user_id];
709 
710 	if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
711 		dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d", __LINE__, user_id, mpdu_idx);
712 		return;
713 	}
714 	nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
715 	if (qdf_unlikely(!nbuf)) {
716 		dp_mon_debug("nbuf is NULL");
717 		return;
718 	}
719 	mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(nbuf);
720 	mpdu_info = &ppdu_info->mpdu_info[user_id];
721 	mpdu_meta->decap_type = mpdu_info->decap_type;
722 	ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = true;
723 }
724 
725 /**
726  * dp_rx_mon_handle_msdu_end() - Process MSDU_END TLV
727  *
728  * @pdev: DP Pdev
729  * @ppdu_info: PPDU info
730  *
731  * Return: void
732  */
733 static inline void
734 dp_rx_mon_handle_msdu_end(struct dp_pdev *pdev,
735 			  struct hal_rx_ppdu_info *ppdu_info)
736 {
737 	qdf_nbuf_t nbuf;
738 	qdf_frag_t addr;
739 	uint16_t num_frags;
740 	struct hal_rx_mon_msdu_info *msdu_info;
741 	struct hal_rx_mon_msdu_info *last_buf_info;
742 	uint8_t user_id = ppdu_info->user_id;
743 	uint8_t mpdu_idx = ppdu_info->mpdu_count[user_id];
744 
745 	msdu_info = &ppdu_info->msdu[user_id];
746 	/* update msdu metadata at last buffer of msdu in MPDU */
747 	if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
748 		/* reset msdu info for next msdu for same user */
749 		qdf_mem_zero(msdu_info, sizeof(*msdu_info));
750 		dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d",
751 			     __LINE__, user_id, mpdu_idx);
752 		return;
753 	}
754 	nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
755 	if (qdf_unlikely(!nbuf)) {
756 		dp_mon_debug("nbuf is NULL");
757 		return;
758 	}
759 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
760 	if (ppdu_info->mpdu_info[user_id].decap_type ==
761 			HAL_HW_RX_DECAP_FORMAT_RAW) {
762 		return;
763 	}
764 	/* This points to last buffer of MSDU . update metadata here */
765 	addr = qdf_nbuf_get_frag_addr(nbuf, num_frags - 1) -
766 		DP_RX_MON_PACKET_OFFSET;
767 	last_buf_info = addr;
768 
769 	last_buf_info->first_msdu = msdu_info->first_msdu;
770 	last_buf_info->last_msdu = msdu_info->last_msdu;
771 	last_buf_info->decap_type = msdu_info->decap_type;
772 	last_buf_info->msdu_index = msdu_info->msdu_index;
773 	last_buf_info->user_rssi = msdu_info->user_rssi;
774 	last_buf_info->reception_type = msdu_info->reception_type;
775 	last_buf_info->msdu_len = msdu_info->msdu_len;
776 
777 	/* If flow classification is enabled,
778 	 * update protocol and flow tag to buf headroom
779 	 */
780 	dp_rx_mon_pf_tag_to_buf_headroom_2_0(nbuf, ppdu_info, pdev, pdev->soc);
781 
782 	/* reset msdu info for next msdu for same user */
783 	qdf_mem_zero(msdu_info, sizeof(*msdu_info));
784 }
785 
786 /**
787  * dp_rx_mon_handle_mon_buf_addr() - Process MON BUF ADDR TLV
788  *
789  * @pdev: DP Pdev
790  * @ppdu_info: PPDU info
791  * @desc_list: desc list head
792  * @tail: desc list tail
793  *
794  * Return: number of buffers reaped
795  */
796 static inline uint16_t
797 dp_rx_mon_handle_mon_buf_addr(struct dp_pdev *pdev,
798 			      struct hal_rx_ppdu_info *ppdu_info,
799 			      union dp_mon_desc_list_elem_t **desc_list,
800 			      union dp_mon_desc_list_elem_t **tail)
801 {
802 	struct dp_soc *soc = pdev->soc;
803 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
804 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
805 	struct dp_mon_pdev_be *mon_pdev_be =
806 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
807 	struct hal_rx_mon_msdu_info *buf_info;
808 	struct hal_mon_packet_info *packet_info = &ppdu_info->packet_info;
809 	struct dp_mon_desc *mon_desc = (struct dp_mon_desc *)(uintptr_t)ppdu_info->packet_info.sw_cookie;
810 	unsigned long long desc = ppdu_info->packet_info.sw_cookie;
811 	struct hal_rx_mon_mpdu_info *mpdu_info;
812 	qdf_nbuf_t nbuf, tmp_nbuf;
813 	qdf_frag_t addr;
814 	uint16_t frag_idx = 0;
815 	uint16_t num_buf_reaped = 0;
816 	uint8_t user_id = ppdu_info->user_id;
817 	bool rx_hdr_valid = true;
818 	uint32_t cookie_2;
819 
820 	if (!mon_pdev->monitor_configured &&
821 	    !dp_lite_mon_is_rx_enabled(mon_pdev)) {
822 		return num_buf_reaped;
823 	}
824 
825 	if (qdf_unlikely(user_id >= HAL_MAX_UL_MU_USERS ||
826 			 ppdu_info->hdr_len > DP_RX_MON_MAX_RX_HEADER_LEN))
827 		rx_hdr_valid = false;
828 
829 	cookie_2 = DP_MON_GET_COOKIE(desc);
830 	mon_desc = DP_MON_GET_DESC(desc);
831 	qdf_assert_always(mon_desc);
832 
833 	if (mon_desc->cookie_2 != cookie_2) {
834 		mon_pdev->rx_mon_stats.dup_mon_sw_desc++;
835 		qdf_err("duplicate cookie found mon_desc:%pK", mon_desc);
836 		qdf_assert_always(0);
837 	}
838 
839 	if (mon_desc->magic != DP_MON_DESC_MAGIC)
840 		qdf_assert_always(0);
841 
842 	/* WAR: sometimes duplicate pkt desc are received
843 	 * from HW this check gracefully handles
844 	 * such cases.
845 	 */
846 	if ((mon_desc == mon_pdev_be->prev_rxmon_pkt_desc) &&
847 	    (mon_desc->cookie == mon_pdev_be->prev_rxmon_pkt_cookie)) {
848 		dp_mon_err("duplicate pkt desc found mon_pdev: %pK mon_desc: %pK cookie: %d",
849 			   mon_pdev, mon_desc,
850 			   mon_desc->cookie);
851 		mon_pdev->rx_mon_stats.dup_mon_buf_cnt++;
852 		return num_buf_reaped;
853 	}
854 	mon_pdev_be->prev_rxmon_pkt_desc = mon_desc;
855 	mon_pdev_be->prev_rxmon_pkt_cookie = mon_desc->cookie;
856 
857 	addr = mon_desc->buf_addr;
858 	qdf_assert_always(addr);
859 
860 	if (!mon_desc->unmapped) {
861 		qdf_mem_unmap_page(soc->osdev,
862 				   (qdf_dma_addr_t)mon_desc->paddr,
863 				   DP_MON_DATA_BUFFER_SIZE,
864 				   QDF_DMA_FROM_DEVICE);
865 		mon_desc->unmapped = 1;
866 	}
867 	dp_mon_add_to_free_desc_list(desc_list, tail, mon_desc);
868 	num_buf_reaped++;
869 
870 	mon_pdev->rx_mon_stats.pkt_buf_count++;
871 
872 	/* if rx hdr is not valid free pkt buffer and return */
873 	if (qdf_unlikely(!rx_hdr_valid)) {
874 		DP_STATS_INC(mon_soc, frag_free, 1);
875 		qdf_frag_free(addr);
876 		return num_buf_reaped;
877 	}
878 
879 	if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
880 		/* WAR: RX_HDR is not received for this MPDU, drop this frame */
881 		mon_pdev->rx_mon_stats.rx_hdr_not_received++;
882 		DP_STATS_INC(mon_soc, frag_free, 1);
883 		qdf_frag_free(addr);
884 		return num_buf_reaped;
885 	}
886 
887 	if (packet_info->dma_length >
888 			(DP_MON_DATA_BUFFER_SIZE - DP_RX_MON_PACKET_OFFSET)) {
889 		/* WAR: Invalid DMA length is received for this MPDU */
890 		mon_pdev->rx_mon_stats.invalid_dma_length++;
891 		DP_STATS_INC(mon_soc, frag_free, 1);
892 		qdf_frag_free(addr);
893 		return num_buf_reaped;
894 	}
895 
896 	nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
897 	if (qdf_unlikely(!nbuf)) {
898 		dp_mon_debug("nbuf is NULL");
899 		DP_STATS_INC(mon_soc, frag_free, 1);
900 		DP_STATS_INC(mon_soc, empty_queue, 1);
901 		qdf_frag_free(addr);
902 		return num_buf_reaped;
903 	}
904 
905 	mpdu_info = &ppdu_info->mpdu_info[user_id];
906 	if (mpdu_info->decap_type == DP_MON_DECAP_FORMAT_INVALID) {
907 		/* decap type is invalid, drop the frame */
908 		mon_pdev->rx_mon_stats.mpdu_decap_type_invalid++;
909 		DP_STATS_INC(mon_soc, frag_free, 1);
910 		mon_pdev->rx_mon_stats.parent_buf_free++;
911 		qdf_frag_free(addr);
912 		qdf_nbuf_queue_remove_last(&ppdu_info->mpdu_q[user_id]);
913 		qdf_nbuf_free(nbuf);
914 		/* if invalid decap type handling is disabled, assert */
915 		if (soc->wlan_cfg_ctx->is_handle_invalid_decap_type_disabled) {
916 			dp_mon_err("Decap type invalid");
917 			qdf_assert_always(0);
918 		}
919 		ppdu_info->rx_hdr_rcvd[user_id] = false;
920 		return num_buf_reaped;
921 	}
922 
923 	tmp_nbuf = qdf_get_nbuf_valid_frag(nbuf);
924 
925 	if (!tmp_nbuf) {
926 		tmp_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
927 					  DP_RX_MON_MAX_MONITOR_HEADER,
928 					  DP_RX_MON_MAX_MONITOR_HEADER,
929 					  4, FALSE);
930 		if (qdf_unlikely(!tmp_nbuf)) {
931 			dp_mon_err("nbuf is NULL");
932 			DP_STATS_INC(mon_soc, frag_free, 1);
933 			mon_pdev->rx_mon_stats.parent_buf_free++;
934 			qdf_frag_free(addr);
935 			/* remove this nbuf from queue */
936 			qdf_nbuf_queue_remove_last(&ppdu_info->mpdu_q[user_id]);
937 			qdf_nbuf_free(nbuf);
938 			return num_buf_reaped;
939 		}
940 		mon_pdev->rx_mon_stats.parent_buf_alloc++;
941 		dp_rx_mon_append_nbuf(nbuf, tmp_nbuf);
942 	}
943 	mpdu_info->full_pkt = true;
944 
945 	if (mpdu_info->decap_type == HAL_HW_RX_DECAP_FORMAT_RAW) {
946 		if (mpdu_info->first_rx_hdr_rcvd) {
947 			qdf_nbuf_remove_frag(nbuf, frag_idx, DP_MON_DATA_BUFFER_SIZE);
948 			dp_rx_mon_nbuf_add_rx_frag(nbuf, addr,
949 						   packet_info->dma_length,
950 						   DP_RX_MON_PACKET_OFFSET,
951 						   DP_MON_DATA_BUFFER_SIZE,
952 						   false);
953 			DP_STATS_INC(mon_soc, frag_free, 1);
954 			mpdu_info->first_rx_hdr_rcvd = false;
955 		} else {
956 			dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, addr,
957 						   packet_info->dma_length,
958 						   DP_RX_MON_PACKET_OFFSET,
959 						   DP_MON_DATA_BUFFER_SIZE,
960 						   false);
961 			DP_STATS_INC(mon_soc, frag_free, 1);
962 		}
963 	} else {
964 		dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, addr,
965 					   packet_info->dma_length,
966 					   DP_RX_MON_PACKET_OFFSET,
967 					   DP_MON_DATA_BUFFER_SIZE,
968 					   false);
969 		DP_STATS_INC(mon_soc, frag_free, 1);
970 		buf_info = addr;
971 
972 		if (!ppdu_info->msdu[user_id].first_buffer) {
973 			buf_info->first_buffer = true;
974 			ppdu_info->msdu[user_id].first_buffer = true;
975 		} else {
976 			buf_info->first_buffer = false;
977 		}
978 
979 		if (packet_info->msdu_continuation)
980 			buf_info->last_buffer = false;
981 		else
982 			buf_info->last_buffer = true;
983 
984 		buf_info->frag_len = packet_info->dma_length;
985 	}
986 	if (qdf_unlikely(packet_info->truncated))
987 		mpdu_info->truncated = true;
988 
989 	return num_buf_reaped;
990 }
991 
992 /**
993  * dp_rx_mon_handle_rx_hdr() - Process RX_HDR TLV
994  *
995  * @pdev: DP pdev
996  * @ppdu_info: PPDU info
997  * @status_frag: Status frag
998  *
999  * Return: void
1000  */
1001 static inline void
1002 dp_rx_mon_handle_rx_hdr(struct dp_pdev *pdev,
1003 			struct hal_rx_ppdu_info *ppdu_info,
1004 			void *status_frag)
1005 {
1006 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1007 	qdf_nbuf_t nbuf, tmp_nbuf;
1008 	uint8_t user_id = ppdu_info->user_id;
1009 	QDF_STATUS status;
1010 
1011 	/* If this is first RX_HEADER for MPDU, allocate skb
1012 	 * else add frag to already allocated skb
1013 	 */
1014 
1015 	if (!ppdu_info->mpdu_info[user_id].mpdu_start_received) {
1016 		nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1017 				      DP_RX_MON_TLV_ROOM +
1018 				      DP_RX_MON_MAX_RADIO_TAP_HDR,
1019 				      DP_RX_MON_TLV_ROOM +
1020 				      DP_RX_MON_MAX_RADIO_TAP_HDR,
1021 				      4, FALSE);
1022 
1023 		/**
1024 		  * Set *head_msdu->next as NULL as all msdus are
1025 		  * mapped via nr frags
1026 		  **/
1027 		if (qdf_unlikely(!nbuf)) {
1028 			dp_mon_debug("malloc failed pdev: %pK ", pdev);
1029 			return;
1030 		}
1031 
1032 		mon_pdev->rx_mon_stats.parent_buf_alloc++;
1033 
1034 		dp_rx_mon_set_zero(nbuf);
1035 
1036 		qdf_nbuf_set_next(nbuf, NULL);
1037 
1038 		qdf_nbuf_queue_add(&ppdu_info->mpdu_q[user_id], nbuf);
1039 
1040 		status = dp_rx_mon_nbuf_add_rx_frag(nbuf, status_frag,
1041 						    ppdu_info->hdr_len - DP_RX_MON_RX_HDR_OFFSET,
1042 						    ppdu_info->data - (unsigned char *)status_frag + 4,
1043 						    DP_MON_DATA_BUFFER_SIZE, true);
1044 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
1045 			dp_mon_err("num_frags exceeding MAX frags");
1046 			return;
1047 		}
1048 		ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = true;
1049 		ppdu_info->mpdu_info[user_id].first_rx_hdr_rcvd = true;
1050 		/* initialize decap type to invalid, this will be set to appropriate
1051 		 * value once the mpdu start tlv is received
1052 		 */
1053 		ppdu_info->mpdu_info[user_id].decap_type = DP_MON_DECAP_FORMAT_INVALID;
1054 	} else {
1055 		if (ppdu_info->mpdu_info[user_id].decap_type ==
1056 				HAL_HW_RX_DECAP_FORMAT_RAW) {
1057 			return;
1058 		}
1059 
1060 		if (dp_lite_mon_is_rx_enabled(mon_pdev) &&
1061 				!dp_lite_mon_is_level_msdu(mon_pdev))
1062 			return;
1063 
1064 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1065 		if (qdf_unlikely(!nbuf)) {
1066 			dp_mon_debug("nbuf is NULL");
1067 			return;
1068 		}
1069 
1070 		tmp_nbuf = qdf_get_nbuf_valid_frag(nbuf);
1071 
1072 		if (!tmp_nbuf) {
1073 			tmp_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1074 						  DP_RX_MON_MAX_MONITOR_HEADER,
1075 						  DP_RX_MON_MAX_MONITOR_HEADER,
1076 						  4, FALSE);
1077 			if (qdf_unlikely(!tmp_nbuf)) {
1078 				dp_mon_err("nbuf is NULL");
1079 				qdf_assert_always(0);
1080 			}
1081 			mon_pdev->rx_mon_stats.parent_buf_alloc++;
1082 			dp_rx_mon_append_nbuf(nbuf, tmp_nbuf);
1083 		}
1084 		dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, status_frag,
1085 					   ppdu_info->hdr_len - DP_RX_MON_RX_HDR_OFFSET,
1086 					   ppdu_info->data - (unsigned char *)status_frag + 4,
1087 					   DP_MON_DATA_BUFFER_SIZE,
1088 					   true);
1089 	}
1090 	ppdu_info->rx_hdr_rcvd[user_id] = true;
1091 }
1092 
1093 /**
1094  * dp_rx_mon_free_mpdu_queue() - Free MPDU queue
1095  * @mon_pdev: monitor pdev
1096  * @ppdu_info: PPDU info
1097  *
1098  * Return: Void
1099  */
1100 
1101 static void dp_rx_mon_free_mpdu_queue(struct dp_mon_pdev *mon_pdev,
1102 				      struct hal_rx_ppdu_info *ppdu_info)
1103 {
1104 	uint8_t user;
1105 	qdf_nbuf_t mpdu;
1106 
1107 	for (user = 0; user < HAL_MAX_UL_MU_USERS; user++) {
1108 		if (!qdf_nbuf_is_queue_empty(&ppdu_info->mpdu_q[user])) {
1109 			while ((mpdu = qdf_nbuf_queue_remove(&ppdu_info->mpdu_q[user])) != NULL)
1110 				dp_mon_free_parent_nbuf(mon_pdev, mpdu);
1111 		}
1112 	}
1113 }
1114 
1115 #ifdef QCA_KMEM_CACHE_SUPPORT
1116 /**
1117  * dp_rx_mon_get_ppdu_info() - Get PPDU info from freelist
1118  *
1119  * @mon_pdev: monitor pdev
1120  *
1121  * Return: ppdu_info
1122  */
1123 struct hal_rx_ppdu_info*
1124 dp_rx_mon_get_ppdu_info(struct dp_mon_pdev *mon_pdev)
1125 {
1126 	struct dp_mon_pdev_be *mon_pdev_be =
1127 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1128 	struct hal_rx_ppdu_info *ppdu_info, *temp_ppdu_info;
1129 
1130 	qdf_spin_lock_bh(&mon_pdev_be->ppdu_info_lock);
1131 	TAILQ_FOREACH_SAFE(ppdu_info,
1132 			   &mon_pdev_be->rx_mon_free_queue,
1133 			   ppdu_free_list_elem,
1134 			   temp_ppdu_info) {
1135 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_free_queue,
1136 			     ppdu_info, ppdu_free_list_elem);
1137 
1138 		if (ppdu_info) {
1139 			mon_pdev_be->total_free_elem--;
1140 			break;
1141 		}
1142 	}
1143 	qdf_spin_unlock_bh(&mon_pdev_be->ppdu_info_lock);
1144 
1145 	return ppdu_info;
1146 }
1147 
1148 void
1149 __dp_rx_mon_free_ppdu_info(struct dp_mon_pdev *mon_pdev,
1150 			   struct hal_rx_ppdu_info *ppdu_info)
1151 {
1152 	struct dp_mon_pdev_be *mon_pdev_be =
1153 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1154 
1155 	qdf_spin_lock_bh(&mon_pdev_be->ppdu_info_lock);
1156 	if (ppdu_info) {
1157 		qdf_mem_zero(ppdu_info, sizeof(struct hal_rx_ppdu_info));
1158 		TAILQ_INSERT_TAIL(&mon_pdev_be->rx_mon_free_queue, ppdu_info,
1159 				  ppdu_free_list_elem);
1160 		mon_pdev_be->total_free_elem++;
1161 	}
1162 	qdf_spin_unlock_bh(&mon_pdev_be->ppdu_info_lock);
1163 }
1164 
1165 /**
1166  * dp_rx_mon_free_ppdu_info() - Free PPDU info
1167  * @pdev: DP pdev
1168  * @ppdu_info: PPDU info
1169  *
1170  * Return: Void
1171  */
1172 void
1173 dp_rx_mon_free_ppdu_info(struct dp_pdev *pdev,
1174 			 struct hal_rx_ppdu_info *ppdu_info)
1175 {
1176 	struct dp_mon_pdev *mon_pdev;
1177 
1178 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
1179 	dp_rx_mon_free_mpdu_queue(mon_pdev, ppdu_info);
1180 	__dp_rx_mon_free_ppdu_info(mon_pdev, ppdu_info);
1181 }
1182 #endif
1183 
1184 /**
1185  * dp_mon_free_parent_nbuf() - Free parent SKB
1186  *
1187  * @mon_pdev: monitor pdev
1188  * @nbuf: SKB to be freed
1189  *
1190  * Return: void
1191  */
1192 void dp_mon_free_parent_nbuf(struct dp_mon_pdev *mon_pdev,
1193 			     qdf_nbuf_t nbuf)
1194 {
1195 	mon_pdev->rx_mon_stats.parent_buf_free++;
1196 	qdf_nbuf_free(nbuf);
1197 }
1198 
1199 void dp_rx_mon_drain_wq(struct dp_pdev *pdev)
1200 {
1201 	struct dp_mon_pdev *mon_pdev;
1202 	struct hal_rx_ppdu_info *ppdu_info = NULL;
1203 	struct hal_rx_ppdu_info *temp_ppdu_info = NULL;
1204 	struct dp_mon_pdev_be *mon_pdev_be;
1205 
1206 	if (qdf_unlikely(!pdev)) {
1207 		dp_mon_debug("Pdev is NULL");
1208 		return;
1209 	}
1210 
1211 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
1212 	if (qdf_unlikely(!mon_pdev)) {
1213 		dp_mon_debug("monitor pdev is NULL");
1214 		return;
1215 	}
1216 
1217 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1218 
1219 	qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
1220 	TAILQ_FOREACH_SAFE(ppdu_info,
1221 			   &mon_pdev_be->rx_mon_queue,
1222 			   ppdu_list_elem,
1223 			   temp_ppdu_info) {
1224 		mon_pdev_be->rx_mon_queue_depth--;
1225 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_queue,
1226 			     ppdu_info, ppdu_list_elem);
1227 
1228 		dp_rx_mon_free_ppdu_info(pdev, ppdu_info);
1229 	}
1230 	qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
1231 }
1232 
1233 /**
1234  * dp_rx_mon_deliver_mpdu() - Deliver MPDU to osif layer
1235  *
1236  * @mon_pdev: monitor pdev
1237  * @mpdu: MPDU nbuf
1238  * @rx_status: monitor status
1239  *
1240  * Return: QDF_STATUS
1241  */
1242 static QDF_STATUS
1243 dp_rx_mon_deliver_mpdu(struct dp_mon_pdev *mon_pdev,
1244 		       qdf_nbuf_t mpdu,
1245 		       struct mon_rx_status *rx_status)
1246 {
1247 	qdf_nbuf_t nbuf;
1248 
1249 	if (mon_pdev->mvdev && mon_pdev->mvdev->monitor_vdev->osif_rx_mon) {
1250 		mon_pdev->rx_mon_stats.mpdus_buf_to_stack++;
1251 		nbuf = qdf_nbuf_get_ext_list(mpdu);
1252 
1253 		while (nbuf) {
1254 			mon_pdev->rx_mon_stats.mpdus_buf_to_stack++;
1255 			nbuf = nbuf->next;
1256 		}
1257 		mon_pdev->mvdev->monitor_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev,
1258 							   mpdu,
1259 							   rx_status);
1260 	} else {
1261 		return QDF_STATUS_E_FAILURE;
1262 	}
1263 
1264 	return QDF_STATUS_SUCCESS;
1265 }
1266 
1267 /**
1268  * dp_rx_mon_process_ppdu_info() - Process PPDU info
1269  * @pdev: DP pdev
1270  * @ppdu_info: PPDU info
1271  *
1272  * Return: Void
1273  */
1274 static void
1275 dp_rx_mon_process_ppdu_info(struct dp_pdev *pdev,
1276 			    struct hal_rx_ppdu_info *ppdu_info)
1277 {
1278 	struct dp_mon_pdev *mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
1279 	uint8_t user;
1280 	qdf_nbuf_t mpdu;
1281 
1282 	if (!ppdu_info)
1283 		return;
1284 
1285 	for (user = 0; user < ppdu_info->com_info.num_users; user++) {
1286 		uint16_t mpdu_count;
1287 		uint16_t mpdu_idx;
1288 		struct hal_rx_mon_mpdu_info *mpdu_meta;
1289 		QDF_STATUS status;
1290 
1291 		if (user >= HAL_MAX_UL_MU_USERS) {
1292 			dp_mon_err("num user exceeds max limit");
1293 			return;
1294 		}
1295 
1296 		mpdu_count  = ppdu_info->mpdu_count[user];
1297 		ppdu_info->rx_status.rx_user_status =
1298 					&ppdu_info->rx_user_status[user];
1299 		for (mpdu_idx = 0; mpdu_idx < mpdu_count; mpdu_idx++) {
1300 			mpdu = qdf_nbuf_queue_remove(&ppdu_info->mpdu_q[user]);
1301 
1302 			if (!mpdu)
1303 				continue;
1304 
1305 			mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(mpdu);
1306 
1307 			ppdu_info->rx_status.rs_fcs_err = mpdu_meta->fcs_err;
1308 			if (dp_lite_mon_is_rx_enabled(mon_pdev)) {
1309 				status = dp_lite_mon_rx_mpdu_process(pdev, ppdu_info,
1310 								     mpdu, mpdu_idx, user);
1311 				if (status != QDF_STATUS_SUCCESS) {
1312 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
1313 					continue;
1314 				}
1315 			} else {
1316 				if (mpdu_meta->full_pkt) {
1317 					if (qdf_unlikely(mpdu_meta->truncated)) {
1318 						dp_mon_free_parent_nbuf(mon_pdev, mpdu);
1319 						continue;
1320 					}
1321 
1322 					status = dp_rx_mon_handle_full_mon(pdev,
1323 									   ppdu_info, mpdu);
1324 					if (status != QDF_STATUS_SUCCESS) {
1325 						dp_mon_free_parent_nbuf(mon_pdev, mpdu);
1326 						continue;
1327 					}
1328 				} else {
1329 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
1330 					continue;
1331 				}
1332 
1333 				/* reset mpdu metadata and apply radiotap header over MPDU */
1334 				qdf_mem_zero(mpdu_meta, sizeof(struct hal_rx_mon_mpdu_info));
1335 				if (!qdf_nbuf_update_radiotap(&ppdu_info->rx_status,
1336 							      mpdu,
1337 							      qdf_nbuf_headroom(mpdu))) {
1338 					dp_mon_err("failed to update radiotap pdev: %pK",
1339 						   pdev);
1340 				}
1341 
1342 				dp_rx_mon_shift_pf_tag_in_headroom(mpdu,
1343 								   pdev->soc,
1344 								   ppdu_info);
1345 
1346 				dp_rx_mon_process_dest_pktlog(pdev->soc,
1347 							      pdev->pdev_id,
1348 							      mpdu);
1349 				/* Deliver MPDU to osif layer */
1350 				status = dp_rx_mon_deliver_mpdu(mon_pdev,
1351 								mpdu,
1352 								&ppdu_info->rx_status);
1353 				if (status != QDF_STATUS_SUCCESS)
1354 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
1355 			}
1356 			ppdu_info->rx_status.rs_fcs_err = false;
1357 		}
1358 	}
1359 
1360 	dp_rx_mon_free_mpdu_queue(mon_pdev, ppdu_info);
1361 }
1362 
1363 /**
1364  * dp_rx_mon_process_ppdu()-  Deferred monitor processing
1365  * This workqueue API handles:
1366  * a. Full monitor
1367  * b. Lite monitor
1368  *
1369  * @context: Opaque work context
1370  *
1371  * Return: none
1372  */
1373 void dp_rx_mon_process_ppdu(void *context)
1374 {
1375 	struct dp_pdev *pdev = (struct dp_pdev *)context;
1376 	struct dp_mon_pdev *mon_pdev;
1377 	struct hal_rx_ppdu_info *ppdu_info = NULL;
1378 	struct hal_rx_ppdu_info *temp_ppdu_info = NULL;
1379 	struct dp_mon_pdev_be *mon_pdev_be;
1380 
1381 	if (qdf_unlikely(!pdev)) {
1382 		dp_mon_debug("Pdev is NULL");
1383 		return;
1384 	}
1385 
1386 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
1387 	if (qdf_unlikely(!mon_pdev)) {
1388 		dp_mon_debug("monitor pdev is NULL");
1389 		return;
1390 	}
1391 
1392 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1393 
1394 	qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
1395 	TAILQ_FOREACH_SAFE(ppdu_info,
1396 			   &mon_pdev_be->rx_mon_queue,
1397 			   ppdu_list_elem, temp_ppdu_info) {
1398 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_queue,
1399 			     ppdu_info, ppdu_list_elem);
1400 
1401 		mon_pdev_be->rx_mon_queue_depth--;
1402 		dp_rx_mon_process_ppdu_info(pdev, ppdu_info);
1403 		__dp_rx_mon_free_ppdu_info(mon_pdev, ppdu_info);
1404 	}
1405 	qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
1406 }
1407 
1408 /**
1409  * dp_rx_mon_add_ppdu_info_to_wq() - Add PPDU info to workqueue
1410  *
1411  * @pdev: monitor pdev
1412  * @ppdu_info: ppdu info to be added to workqueue
1413  *
1414  * Return: SUCCESS or FAILIRE
1415  */
1416 
1417 static QDF_STATUS
1418 dp_rx_mon_add_ppdu_info_to_wq(struct dp_pdev *pdev,
1419 			      struct hal_rx_ppdu_info *ppdu_info)
1420 {
1421 	struct dp_mon_pdev *mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
1422 	struct dp_mon_pdev_be *mon_pdev_be =
1423 		dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1424 
1425 	/* Full monitor or lite monitor mode is not enabled, return */
1426 	if (!mon_pdev->monitor_configured &&
1427 	    !dp_lite_mon_is_rx_enabled(mon_pdev))
1428 		return QDF_STATUS_E_FAILURE;
1429 
1430 	if (qdf_likely(ppdu_info)) {
1431 		if (mon_pdev_be->rx_mon_queue_depth < DP_RX_MON_WQ_THRESHOLD) {
1432 			qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
1433 			TAILQ_INSERT_TAIL(&mon_pdev_be->rx_mon_queue,
1434 					  ppdu_info, ppdu_list_elem);
1435 			mon_pdev_be->rx_mon_queue_depth++;
1436 			mon_pdev->rx_mon_stats.total_ppdu_info_enq++;
1437 		} else {
1438 			mon_pdev->rx_mon_stats.total_ppdu_info_drop++;
1439 			dp_rx_mon_free_ppdu_info(pdev, ppdu_info);
1440 		}
1441 		qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
1442 
1443 		if (mon_pdev_be->rx_mon_queue_depth > DP_MON_QUEUE_DEPTH_MAX) {
1444 			qdf_queue_work(0, mon_pdev_be->rx_mon_workqueue,
1445 				       &mon_pdev_be->rx_mon_work);
1446 		}
1447 	}
1448 	return QDF_STATUS_SUCCESS;
1449 }
1450 
1451 QDF_STATUS
1452 dp_rx_mon_handle_full_mon(struct dp_pdev *pdev,
1453 			  struct hal_rx_ppdu_info *ppdu_info,
1454 			  qdf_nbuf_t mpdu)
1455 {
1456 	uint32_t wifi_hdr_len, sec_hdr_len, msdu_llc_len,
1457 		 mpdu_buf_len, decap_hdr_pull_bytes, dir,
1458 		 is_amsdu, amsdu_pad, frag_size, tot_msdu_len;
1459 	struct hal_rx_mon_mpdu_info *mpdu_meta;
1460 	struct hal_rx_mon_msdu_info *msdu_meta;
1461 	char *hdr_desc;
1462 	uint8_t num_frags, frag_iter, l2_hdr_offset;
1463 	struct ieee80211_frame *wh;
1464 	struct ieee80211_qoscntl *qos;
1465 	uint32_t hdr_frag_size, frag_page_offset, pad_byte_pholder;
1466 	qdf_nbuf_t head_msdu, msdu_cur;
1467 	void *frag_addr;
1468 	bool prev_msdu_end_received = false;
1469 	bool is_nbuf_head = true;
1470 
1471 	/***************************************************************************
1472 	 *********************** Non-raw packet ************************************
1473 	 ---------------------------------------------------------------------------
1474 	 |      | frag-0   | frag-1    | frag - 2 | frag - 3  | frag - 4 | frag - 5  |
1475 	 | skb  | rx_hdr-1 | rx_msdu-1 | rx_hdr-2 | rx_msdu-2 | rx_hdr-3 | rx-msdu-3 |
1476 	 ---------------------------------------------------------------------------
1477 	 **************************************************************************/
1478 
1479 	if (!mpdu) {
1480 		dp_mon_debug("nbuf is NULL, return");
1481 		return QDF_STATUS_E_FAILURE;
1482 	}
1483 
1484 	head_msdu = mpdu;
1485 
1486 	mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(mpdu);
1487 
1488 	if (mpdu_meta->decap_type == HAL_HW_RX_DECAP_FORMAT_RAW) {
1489 		if (qdf_unlikely(ppdu_info->rx_status.rs_fcs_err)) {
1490 			hdr_desc = qdf_nbuf_get_frag_addr(mpdu, 0);
1491 			wh = (struct ieee80211_frame *)hdr_desc;
1492 			if ((wh->i_fc[0] & QDF_IEEE80211_FC0_VERSION_MASK) !=
1493 			    QDF_IEEE80211_FC0_VERSION_0) {
1494 				DP_STATS_INC(pdev, dropped.mon_ver_err, 1);
1495 				return QDF_STATUS_E_FAILURE;
1496 			}
1497 		}
1498 		qdf_nbuf_trim_add_frag_size(mpdu,
1499 					    qdf_nbuf_get_nr_frags(mpdu) - 1,
1500 					    -HAL_RX_FCS_LEN, 0);
1501 		return QDF_STATUS_SUCCESS;
1502 	}
1503 
1504 	num_frags = qdf_nbuf_get_nr_frags(mpdu);
1505 	if (qdf_unlikely(num_frags < DP_MON_MIN_FRAGS_FOR_RESTITCH)) {
1506 		dp_mon_debug("not enough frags(%d) for restitch", num_frags);
1507 		return QDF_STATUS_E_FAILURE;
1508 	}
1509 
1510 	l2_hdr_offset = DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
1511 
1512 	/* hdr_desc points to 80211 hdr */
1513 	hdr_desc = qdf_nbuf_get_frag_addr(mpdu, 0);
1514 
1515 	/* Calculate Base header size */
1516 	wifi_hdr_len = sizeof(struct ieee80211_frame);
1517 	wh = (struct ieee80211_frame *)hdr_desc;
1518 
1519 	dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
1520 
1521 	if (dir == IEEE80211_FC1_DIR_DSTODS)
1522 		wifi_hdr_len += 6;
1523 
1524 	is_amsdu = 0;
1525 	if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
1526 		qos = (struct ieee80211_qoscntl *)
1527 			(hdr_desc + wifi_hdr_len);
1528 		wifi_hdr_len += 2;
1529 
1530 		is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
1531 	}
1532 
1533 	/*Calculate security header length based on 'Protected'
1534 	 * and 'EXT_IV' flag
1535 	 */
1536 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
1537 		char *iv = (char *)wh + wifi_hdr_len;
1538 
1539 		if (iv[3] & KEY_EXTIV)
1540 			sec_hdr_len = 8;
1541 		else
1542 			sec_hdr_len = 4;
1543 	} else {
1544 		sec_hdr_len = 0;
1545 	}
1546 	wifi_hdr_len += sec_hdr_len;
1547 
1548 	/* MSDU related stuff LLC - AMSDU subframe header etc */
1549 	msdu_llc_len = is_amsdu ? (DP_RX_MON_DECAP_HDR_SIZE +
1550 				   DP_RX_MON_LLC_SIZE +
1551 				   DP_RX_MON_SNAP_SIZE) :
1552 				   (DP_RX_MON_LLC_SIZE + DP_RX_MON_SNAP_SIZE);
1553 
1554 	mpdu_buf_len = wifi_hdr_len + msdu_llc_len;
1555 
1556 	/* "Decap" header to remove from MSDU buffer */
1557 	decap_hdr_pull_bytes = DP_RX_MON_DECAP_HDR_SIZE;
1558 
1559 	amsdu_pad = 0;
1560 	tot_msdu_len = 0;
1561 	tot_msdu_len = 0;
1562 
1563 	/*
1564 	 * Update protocol and flow tag for MSDU
1565 	 * update frag index in ctx_idx field.
1566 	 * Reset head pointer data of nbuf before updating.
1567 	 */
1568 	QDF_NBUF_CB_RX_CTX_ID(mpdu) = 0;
1569 
1570 	/* Construct destination address */
1571 	hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(mpdu, 0);
1572 
1573 	/* Adjust page frag offset to point to 802.11 header */
1574 	if (hdr_frag_size > mpdu_buf_len)
1575 		qdf_nbuf_trim_add_frag_size(head_msdu, 0, -(hdr_frag_size - mpdu_buf_len), 0);
1576 
1577 	msdu_meta = (struct hal_rx_mon_msdu_info *)(((void *)qdf_nbuf_get_frag_addr(mpdu, 1)) - DP_RX_MON_PACKET_OFFSET);
1578 
1579 	frag_size = qdf_nbuf_get_frag_size_by_idx(head_msdu, 1);
1580 	pad_byte_pholder =
1581 		RX_MONITOR_BUFFER_SIZE - (frag_size + DP_RX_MON_PACKET_OFFSET);
1582 
1583 	/* Adjust page frag offset to appropriate after decap header */
1584 	frag_page_offset =
1585 		decap_hdr_pull_bytes + l2_hdr_offset;
1586 	qdf_nbuf_move_frag_page_offset(head_msdu, 1, frag_page_offset);
1587 	frag_size = frag_size - frag_page_offset;
1588 
1589 	if (msdu_meta->first_buffer && msdu_meta->last_buffer) {
1590 		/* MSDU with single buffer */
1591 		amsdu_pad = frag_size & 0x3;
1592 		amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
1593 		if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
1594 			char *frag_addr_temp;
1595 
1596 			qdf_nbuf_trim_add_frag_size(mpdu, 1, amsdu_pad, 0);
1597 			frag_addr_temp =
1598 				(char *)qdf_nbuf_get_frag_addr(mpdu, 1);
1599 			frag_addr_temp = (frag_addr_temp +
1600 					  qdf_nbuf_get_frag_size_by_idx(mpdu, 1)) -
1601 				amsdu_pad;
1602 			qdf_mem_zero(frag_addr_temp, amsdu_pad);
1603 			amsdu_pad = 0;
1604 		}
1605 	} else {
1606 		tot_msdu_len = frag_size;
1607 		amsdu_pad = 0;
1608 	}
1609 
1610 	pad_byte_pholder = 0;
1611 	for (msdu_cur = mpdu; msdu_cur;) {
1612 		/* frag_iter will start from 0 for second skb onwards */
1613 		if (msdu_cur == mpdu)
1614 			frag_iter = 2;
1615 		else
1616 			frag_iter = 0;
1617 
1618 		num_frags = qdf_nbuf_get_nr_frags(msdu_cur);
1619 
1620 		for (; frag_iter < num_frags; frag_iter++) {
1621 			/* Construct destination address
1622 			 *  ----------------------------------------------------------
1623 			 * |            | L2_HDR_PAD   |   Decap HDR | Payload | Pad  |
1624 			 * |            | (First buffer)             |         |      |
1625 			 * |            |                            /        /       |
1626 			 * |            >Frag address points here   /        /        |
1627 			 * |            \                          /        /         |
1628 			 * |             \ This bytes needs to    /        /          |
1629 			 * |              \  removed to frame pkt/        /           |
1630 			 * |               ----------------------        /            |
1631 			 * |                                     |     /     Add      |
1632 			 * |                                     |    /   amsdu pad   |
1633 			 * |   LLC HDR will be added here      <-|    |   Byte for    |
1634 			 * |        |                            |    |   last frame  |
1635 			 * |         >Dest addr will point       |    |    if space   |
1636 			 * |            somewhere in this area   |    |    available  |
1637 			 * |  And amsdu_pad will be created if   |    |               |
1638 			 * | dint get added in last buffer       |    |               |
1639 			 * |       (First Buffer)                |    |               |
1640 			 *  ----------------------------------------------------------
1641 			 */
1642 			/* If previous msdu end has received, modify next frag's offset to point to LLC */
1643 			if (prev_msdu_end_received) {
1644 				hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter);
1645 				/* Adjust page frag offset to point to llc/snap header */
1646 				if (hdr_frag_size > msdu_llc_len)
1647 					qdf_nbuf_trim_add_frag_size(msdu_cur, frag_iter, -(hdr_frag_size - msdu_llc_len), 0);
1648 				prev_msdu_end_received = false;
1649 				continue;
1650 			}
1651 
1652 			frag_addr =
1653 				qdf_nbuf_get_frag_addr(msdu_cur, frag_iter) -
1654 						       DP_RX_MON_PACKET_OFFSET;
1655 			msdu_meta = (struct hal_rx_mon_msdu_info *)frag_addr;
1656 
1657 			/*
1658 			 * Update protocol and flow tag for MSDU
1659 			 * update frag index in ctx_idx field
1660 			 */
1661 			QDF_NBUF_CB_RX_CTX_ID(msdu_cur) = frag_iter;
1662 
1663 			frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur,
1664 					frag_iter);
1665 
1666 			/* If Middle buffer, dont add any header */
1667 			if ((!msdu_meta->first_buffer) &&
1668 					(!msdu_meta->last_buffer)) {
1669 				tot_msdu_len += frag_size;
1670 				amsdu_pad = 0;
1671 				pad_byte_pholder = 0;
1672 				continue;
1673 			}
1674 
1675 			/* Calculate if current buffer has placeholder
1676 			 * to accommodate amsdu pad byte
1677 			 */
1678 			pad_byte_pholder =
1679 				RX_MONITOR_BUFFER_SIZE - (frag_size + DP_RX_MON_PACKET_OFFSET);
1680 			/*
1681 			 * We will come here only only three condition:
1682 			 * 1. Msdu with single Buffer
1683 			 * 2. First buffer in case MSDU is spread in multiple
1684 			 *    buffer
1685 			 * 3. Last buffer in case MSDU is spread in multiple
1686 			 *    buffer
1687 			 *
1688 			 *         First buffER | Last buffer
1689 			 * Case 1:      1       |     1
1690 			 * Case 2:      1       |     0
1691 			 * Case 3:      0       |     1
1692 			 *
1693 			 * In 3rd case only l2_hdr_padding byte will be Zero and
1694 			 * in other case, It will be 2 Bytes.
1695 			 */
1696 			if (msdu_meta->first_buffer)
1697 				l2_hdr_offset =
1698 					DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
1699 			else
1700 				l2_hdr_offset = DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
1701 
1702 			if (msdu_meta->first_buffer) {
1703 				/* Adjust page frag offset to point to 802.11 header */
1704 				hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter-1);
1705 				if (hdr_frag_size > (msdu_llc_len + amsdu_pad))
1706 					qdf_nbuf_trim_add_frag_size(msdu_cur, frag_iter - 1, -(hdr_frag_size - (msdu_llc_len + amsdu_pad)), 0);
1707 
1708 				/* Adjust page frag offset to appropriate after decap header */
1709 				frag_page_offset =
1710 					(decap_hdr_pull_bytes + l2_hdr_offset);
1711 				if (frag_size > (decap_hdr_pull_bytes + l2_hdr_offset)) {
1712 					qdf_nbuf_move_frag_page_offset(msdu_cur, frag_iter, frag_page_offset);
1713 					frag_size = frag_size - (l2_hdr_offset + decap_hdr_pull_bytes);
1714 				}
1715 
1716 
1717 				/*
1718 				 * Calculate new page offset and create hole
1719 				 * if amsdu_pad required.
1720 				 */
1721 				tot_msdu_len = frag_size;
1722 				/*
1723 				 * No amsdu padding required for first frame of
1724 				 * continuation buffer
1725 				 */
1726 				if (!msdu_meta->last_buffer) {
1727 					amsdu_pad = 0;
1728 					continue;
1729 				}
1730 			} else {
1731 				tot_msdu_len += frag_size;
1732 			}
1733 
1734 			/* Will reach to this place in only two case:
1735 			 * 1. Single buffer MSDU
1736 			 * 2. Last buffer of MSDU in case of multiple buf MSDU
1737 			 */
1738 
1739 			/* This flag is used to identify msdu boundary */
1740 			prev_msdu_end_received = true;
1741 			/* Check size of buffer if amsdu padding required */
1742 			amsdu_pad = tot_msdu_len & 0x3;
1743 			amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
1744 
1745 			/* Create placeholder if current buffer can
1746 			 * accommodate padding.
1747 			 */
1748 			if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
1749 				char *frag_addr_temp;
1750 
1751 				qdf_nbuf_trim_add_frag_size(msdu_cur,
1752 						frag_iter,
1753 						amsdu_pad, 0);
1754 				frag_addr_temp = (char *)qdf_nbuf_get_frag_addr(msdu_cur,
1755 						frag_iter);
1756 				frag_addr_temp = (frag_addr_temp +
1757 						qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter)) -
1758 					amsdu_pad;
1759 				qdf_mem_zero(frag_addr_temp, amsdu_pad);
1760 				amsdu_pad = 0;
1761 			}
1762 
1763 			/* reset tot_msdu_len */
1764 			tot_msdu_len = 0;
1765 		}
1766 		if (is_nbuf_head) {
1767 			msdu_cur = qdf_nbuf_get_ext_list(msdu_cur);
1768 			is_nbuf_head = false;
1769 		} else {
1770 			msdu_cur = qdf_nbuf_queue_next(msdu_cur);
1771 		}
1772 	}
1773 
1774 	return QDF_STATUS_SUCCESS;
1775 }
1776 
1777 static inline int
1778 dp_rx_mon_flush_packet_tlv(struct dp_pdev *pdev, void *buf, uint16_t end_offset,
1779 			   union dp_mon_desc_list_elem_t **desc_list,
1780 			   union dp_mon_desc_list_elem_t **tail)
1781 {
1782 	struct dp_soc *soc = pdev->soc;
1783 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1784 	struct dp_mon_pdev_be *mon_pdev_be =
1785 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1786 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1787 	uint16_t work_done = 0;
1788 	qdf_frag_t addr;
1789 	uint8_t *rx_tlv;
1790 	uint8_t *rx_tlv_start;
1791 	uint16_t tlv_status = HAL_TLV_STATUS_BUF_DONE;
1792 	struct hal_rx_ppdu_info *ppdu_info;
1793 	uint32_t cookie_2;
1794 
1795 	if (!buf)
1796 		return work_done;
1797 
1798 	ppdu_info = &mon_pdev->ppdu_info;
1799 	if (!ppdu_info) {
1800 		dp_mon_debug("ppdu_info malloc failed pdev: %pK", pdev);
1801 		return work_done;
1802 	}
1803 	qdf_mem_zero(ppdu_info, sizeof(struct hal_rx_ppdu_info));
1804 	rx_tlv = buf;
1805 	rx_tlv_start = buf;
1806 
1807 	do {
1808 		tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
1809 							ppdu_info,
1810 							pdev->soc->hal_soc,
1811 							buf);
1812 
1813 		if (tlv_status == HAL_TLV_STATUS_MON_BUF_ADDR) {
1814 			struct dp_mon_desc *mon_desc;
1815 			unsigned long long desc = ppdu_info->packet_info.sw_cookie;
1816 
1817 			cookie_2 = DP_MON_GET_COOKIE(desc);
1818 			mon_desc = DP_MON_GET_DESC(desc);
1819 
1820 			qdf_assert_always(mon_desc);
1821 
1822 			if (mon_desc->cookie_2 != cookie_2) {
1823 				mon_pdev->rx_mon_stats.dup_mon_sw_desc++;
1824 				qdf_err("duplicate cookie found mon_desc:%pK", mon_desc);
1825 				qdf_assert_always(0);
1826 			}
1827 
1828 			/* WAR: sometimes duplicate pkt desc are received
1829 			 * from HW, this check gracefully handles
1830 			 * such cases.
1831 			 */
1832 			if ((mon_desc == mon_pdev_be->prev_rxmon_pkt_desc) &&
1833 			    (mon_desc->cookie ==
1834 			     mon_pdev_be->prev_rxmon_pkt_cookie)) {
1835 				dp_mon_err("duplicate pkt desc found mon_pdev: %pK mon_desc: %pK cookie: %d",
1836 					   mon_pdev, mon_desc,
1837 					   mon_desc->cookie);
1838 				mon_pdev->rx_mon_stats.dup_mon_buf_cnt++;
1839 				goto end;
1840 			}
1841 			mon_pdev_be->prev_rxmon_pkt_desc = mon_desc;
1842 			mon_pdev_be->prev_rxmon_pkt_cookie = mon_desc->cookie;
1843 
1844 			addr = mon_desc->buf_addr;
1845 
1846 			if (!mon_desc->unmapped) {
1847 				qdf_mem_unmap_page(soc->osdev,
1848 						   (qdf_dma_addr_t)mon_desc->paddr,
1849 						   DP_MON_DATA_BUFFER_SIZE,
1850 						   QDF_DMA_FROM_DEVICE);
1851 				mon_desc->unmapped = 1;
1852 			}
1853 			dp_mon_add_to_free_desc_list(desc_list, tail, mon_desc);
1854 			work_done++;
1855 
1856 			if (addr) {
1857 				qdf_frag_free(addr);
1858 				DP_STATS_INC(mon_soc, frag_free, 1);
1859 			}
1860 		}
1861 
1862 end:
1863 		rx_tlv = hal_rx_status_get_next_tlv(rx_tlv, 1);
1864 
1865 		if ((rx_tlv - rx_tlv_start) >= (end_offset + 1))
1866 			break;
1867 
1868 	} while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) ||
1869 		 (tlv_status == HAL_TLV_STATUS_HEADER) ||
1870 		 (tlv_status == HAL_TLV_STATUS_MPDU_END) ||
1871 		 (tlv_status == HAL_TLV_STATUS_MSDU_END) ||
1872 		 (tlv_status == HAL_TLV_STATUS_MON_BUF_ADDR) ||
1873 		 (tlv_status == HAL_TLV_STATUS_MPDU_START));
1874 
1875 	return work_done;
1876 }
1877 
1878 #endif
1879 
1880 /**
1881  * dp_rx_mon_flush_status_buf_queue() - Flush status buffer queue
1882  *
1883  * @pdev: DP pdev handle
1884  *
1885  *Return: void
1886  */
1887 static inline void
1888 dp_rx_mon_flush_status_buf_queue(struct dp_pdev *pdev)
1889 {
1890 	struct dp_soc *soc = pdev->soc;
1891 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1892 	struct dp_mon_pdev_be *mon_pdev_be =
1893 		dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1894 	union dp_mon_desc_list_elem_t *desc_list = NULL;
1895 	union dp_mon_desc_list_elem_t *tail = NULL;
1896 	struct dp_mon_desc *mon_desc;
1897 	uint16_t idx;
1898 	void *buf;
1899 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1900 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1901 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1902 	uint16_t work_done = 0;
1903 	uint16_t status_buf_count;
1904 	uint16_t end_offset = 0;
1905 
1906 	if (!mon_pdev_be->desc_count) {
1907 		dp_mon_info("no of status buffer count is zero: %pK", pdev);
1908 		return;
1909 	}
1910 
1911 	status_buf_count = mon_pdev_be->desc_count;
1912 	for (idx = 0; idx < status_buf_count; idx++) {
1913 		mon_desc = mon_pdev_be->status[idx];
1914 		if (!mon_desc) {
1915 			qdf_assert_always(0);
1916 			return;
1917 		}
1918 
1919 		buf = mon_desc->buf_addr;
1920 		end_offset = mon_desc->end_offset;
1921 
1922 		dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
1923 		work_done++;
1924 
1925 		work_done += dp_rx_mon_flush_packet_tlv(pdev, buf, end_offset,
1926 							&desc_list, &tail);
1927 
1928 		/* set status buffer pointer to NULL */
1929 		mon_pdev_be->status[idx] = NULL;
1930 		mon_pdev_be->desc_count--;
1931 
1932 		qdf_frag_free(buf);
1933 		DP_STATS_INC(mon_soc, frag_free, 1);
1934 	}
1935 
1936 	if (work_done) {
1937 		mon_pdev->rx_mon_stats.mon_rx_bufs_replenished_dest +=
1938 			work_done;
1939 		if (desc_list)
1940 			dp_mon_add_desc_list_to_free_list(soc,
1941 							  &desc_list, &tail,
1942 							  rx_mon_desc_pool);
1943 	}
1944 }
1945 
1946 /**
1947  * dp_rx_mon_handle_flush_n_trucated_ppdu() - Handle flush and truncated ppdu
1948  *
1949  * @soc: DP soc handle
1950  * @pdev: pdev handle
1951  * @mon_desc: mon sw desc
1952  */
1953 static inline void
1954 dp_rx_mon_handle_flush_n_trucated_ppdu(struct dp_soc *soc,
1955 				       struct dp_pdev *pdev,
1956 				       struct dp_mon_desc *mon_desc)
1957 {
1958 	union dp_mon_desc_list_elem_t *desc_list = NULL;
1959 	union dp_mon_desc_list_elem_t *tail = NULL;
1960 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1961 	struct dp_mon_soc_be *mon_soc_be =
1962 			dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1963 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1964 	uint16_t work_done;
1965 	void *buf;
1966 	uint16_t end_offset = 0;
1967 
1968 	/* Flush status buffers in queue */
1969 	dp_rx_mon_flush_status_buf_queue(pdev);
1970 	buf = mon_desc->buf_addr;
1971 	end_offset = mon_desc->end_offset;
1972 	dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
1973 	work_done = 1;
1974 	work_done += dp_rx_mon_flush_packet_tlv(pdev, buf, end_offset,
1975 						&desc_list, &tail);
1976 	if (buf) {
1977 		qdf_frag_free(buf);
1978 		DP_STATS_INC(mon_soc, frag_free, 1);
1979 	}
1980 
1981 	if (desc_list)
1982 		dp_mon_add_desc_list_to_free_list(soc, &desc_list, &tail,
1983 						  rx_mon_desc_pool);
1984 }
1985 
1986 void dp_rx_mon_append_nbuf(qdf_nbuf_t nbuf, qdf_nbuf_t tmp_nbuf)
1987 {
1988 	qdf_nbuf_t last_nbuf;
1989 
1990 	/*
1991 	 * If nbuf does not have fraglist, then append tmp_nbuf as fraglist,
1992 	 * else append tmp_nbuf as next of last_nbuf present in nbuf fraglist.
1993 	 */
1994 	if (!qdf_nbuf_has_fraglist(nbuf))
1995 		qdf_nbuf_append_ext_list(nbuf, tmp_nbuf,
1996 					 qdf_nbuf_len(tmp_nbuf));
1997 	else {
1998 		last_nbuf = qdf_nbuf_get_last_frag_list_nbuf(nbuf);
1999 
2000 		if (qdf_likely(last_nbuf))
2001 			qdf_nbuf_set_next(last_nbuf, tmp_nbuf);
2002 	}
2003 }
2004 
2005 static
2006 uint8_t dp_rx_mon_process_tlv_status(struct dp_pdev *pdev,
2007 				     struct hal_rx_ppdu_info *ppdu_info,
2008 				     void *status_frag,
2009 				     uint16_t tlv_status,
2010 				     union dp_mon_desc_list_elem_t **desc_list,
2011 				     union dp_mon_desc_list_elem_t **tail)
2012 {
2013 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2014 	uint8_t user_id = ppdu_info->user_id;
2015 	uint8_t num_buf_reaped = 0;
2016 	bool rx_hdr_valid = true;
2017 
2018 	if (!mon_pdev->monitor_configured &&
2019 	    !dp_lite_mon_is_rx_enabled(mon_pdev)) {
2020 		return num_buf_reaped;
2021 	}
2022 
2023 	/* If user id or rx header len is invalid drop this
2024 	 * mpdu. However we have to honor buffer address TLV
2025 	 * for this mpdu to free any associated packet buffer
2026 	 */
2027 	if (qdf_unlikely(user_id >= HAL_MAX_UL_MU_USERS ||
2028 			 ppdu_info->hdr_len > DP_RX_MON_MAX_RX_HEADER_LEN))
2029 		rx_hdr_valid = false;
2030 
2031 	switch (tlv_status) {
2032 	case HAL_TLV_STATUS_HEADER: {
2033 		if (qdf_unlikely(!rx_hdr_valid)) {
2034 			dp_mon_debug("rx hdr invalid userid: %d, len: %d ",
2035 				     user_id, ppdu_info->hdr_len);
2036 			mon_pdev->rx_mon_stats.rx_hdr_invalid_cnt++;
2037 			return num_buf_reaped;
2038 		}
2039 		dp_rx_mon_handle_rx_hdr(pdev, ppdu_info, status_frag);
2040 	}
2041 	break;
2042 	case HAL_TLV_STATUS_MON_BUF_ADDR:
2043 	{
2044 		num_buf_reaped = dp_rx_mon_handle_mon_buf_addr(pdev, ppdu_info, desc_list, tail);
2045 	}
2046 	break;
2047 	case HAL_TLV_STATUS_MSDU_END:
2048 	{
2049 		if (qdf_unlikely(!rx_hdr_valid))
2050 			break;
2051 
2052 		dp_rx_mon_handle_msdu_end(pdev, ppdu_info);
2053 	}
2054 	break;
2055 	case HAL_TLV_STATUS_MPDU_START:
2056 	{
2057 		if (qdf_unlikely(!rx_hdr_valid))
2058 			break;
2059 
2060 		dp_rx_mon_handle_mpdu_start(ppdu_info);
2061 	break;
2062 	}
2063 	case HAL_TLV_STATUS_MPDU_END:
2064 	{
2065 		if (qdf_unlikely(!rx_hdr_valid))
2066 			break;
2067 
2068 		dp_rx_mon_handle_mpdu_end(ppdu_info);
2069 	}
2070 	break;
2071 	case HAL_TLV_STATUS_MON_DROP:
2072 	{
2073 		mon_pdev->rx_mon_stats.ppdu_drop_cnt +=
2074 			ppdu_info->drop_cnt.ppdu_drop_cnt;
2075 		mon_pdev->rx_mon_stats.mpdu_drop_cnt +=
2076 			ppdu_info->drop_cnt.mpdu_drop_cnt;
2077 		mon_pdev->rx_mon_stats.end_of_ppdu_drop_cnt +=
2078 			ppdu_info->drop_cnt.end_of_ppdu_drop_cnt;
2079 		mon_pdev->rx_mon_stats.tlv_drop_cnt +=
2080 			ppdu_info->drop_cnt.tlv_drop_cnt;
2081 	}
2082 	break;
2083 	}
2084 	return num_buf_reaped;
2085 }
2086 
2087 /**
2088  * dp_rx_mon_process_status_tlv() - Handle mon status process TLV
2089  *
2090  * @pdev: DP pdev handle
2091  *
2092  * Return
2093  */
2094 static inline struct hal_rx_ppdu_info *
2095 dp_rx_mon_process_status_tlv(struct dp_pdev *pdev)
2096 {
2097 	struct dp_soc *soc = pdev->soc;
2098 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2099 	struct dp_mon_pdev_be *mon_pdev_be =
2100 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
2101 	union dp_mon_desc_list_elem_t *desc_list = NULL;
2102 	union dp_mon_desc_list_elem_t *tail = NULL;
2103 	struct dp_mon_desc *mon_desc;
2104 	uint16_t idx;
2105 	void *buf;
2106 	struct hal_rx_ppdu_info *ppdu_info;
2107 	uint8_t *rx_tlv;
2108 	uint8_t *rx_tlv_start;
2109 	uint16_t end_offset = 0;
2110 	uint16_t tlv_status = HAL_TLV_STATUS_BUF_DONE;
2111 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
2112 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
2113 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
2114 	uint16_t work_done = 0;
2115 	uint16_t status_buf_count;
2116 
2117 	if (!mon_pdev_be->desc_count) {
2118 		dp_mon_err("no of status buffer count is zero: %pK", pdev);
2119 		return NULL;
2120 	}
2121 
2122 	ppdu_info = dp_rx_mon_get_ppdu_info(mon_pdev);
2123 
2124 	if (!ppdu_info) {
2125 		dp_mon_debug("ppdu_info malloc failed pdev: %pK", pdev);
2126 		dp_rx_mon_flush_status_buf_queue(pdev);
2127 		return NULL;
2128 	}
2129 
2130 	mon_pdev->rx_mon_stats.total_ppdu_info_alloc++;
2131 
2132 	status_buf_count = mon_pdev_be->desc_count;
2133 	for (idx = 0; idx < status_buf_count; idx++) {
2134 		mon_desc = mon_pdev_be->status[idx];
2135 		if (!mon_desc) {
2136 			return NULL;
2137 		}
2138 
2139 		buf = mon_desc->buf_addr;
2140 		end_offset = mon_desc->end_offset;
2141 
2142 		dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
2143 		work_done++;
2144 
2145 		rx_tlv = buf;
2146 		rx_tlv_start = buf;
2147 
2148 		dp_mon_record_clear_buffer(mon_pdev_be);
2149 
2150 		do {
2151 			tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
2152 								ppdu_info,
2153 								pdev->soc->hal_soc,
2154 								buf);
2155 			dp_mon_record_tlv(mon_pdev_be, ppdu_info);
2156 			work_done += dp_rx_mon_process_tlv_status(pdev,
2157 								  ppdu_info,
2158 								  buf,
2159 								  tlv_status,
2160 								  &desc_list,
2161 								  &tail);
2162 			rx_tlv = hal_rx_status_get_next_tlv(rx_tlv, 1);
2163 
2164 			/* HW provides end_offset (how many bytes HW DMA'ed)
2165 			 * as part of descriptor, use this as delimiter for
2166 			 * status buffer
2167 			 */
2168 			if ((rx_tlv - rx_tlv_start) >= (end_offset + 1))
2169 				break;
2170 
2171 		} while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) ||
2172 			 (tlv_status == HAL_TLV_STATUS_HEADER) ||
2173 			 (tlv_status == HAL_TLV_STATUS_MPDU_END) ||
2174 			 (tlv_status == HAL_TLV_STATUS_MSDU_END) ||
2175 			 (tlv_status == HAL_TLV_STATUS_MON_BUF_ADDR) ||
2176 			 (tlv_status == HAL_TLV_STATUS_MPDU_START));
2177 
2178 		/* set status buffer pointer to NULL */
2179 		mon_pdev_be->status[idx] = NULL;
2180 		mon_pdev_be->desc_count--;
2181 
2182 		qdf_frag_free(buf);
2183 		DP_STATS_INC(mon_soc, frag_free, 1);
2184 		mon_pdev->rx_mon_stats.status_buf_count++;
2185 		dp_mon_record_index_update(mon_pdev_be);
2186 	}
2187 
2188 	dp_mon_rx_stats_update_rssi_dbm_params(mon_pdev, ppdu_info);
2189 	if (work_done) {
2190 		mon_pdev->rx_mon_stats.mon_rx_bufs_replenished_dest +=
2191 			work_done;
2192 		if (desc_list)
2193 			dp_mon_add_desc_list_to_free_list(soc,
2194 							  &desc_list, &tail,
2195 							  rx_mon_desc_pool);
2196 	}
2197 
2198 	ppdu_info->rx_status.tsft = ppdu_info->rx_status.tsft +
2199 				    pdev->timestamp.mlo_offset_lo_us +
2200 				    ((uint64_t)pdev->timestamp.mlo_offset_hi_us
2201 				    << 32);
2202 
2203 	return ppdu_info;
2204 }
2205 
2206 /**
2207  * dp_mon_pdev_flush_desc() - Flush status and packet desc during deinit
2208  *
2209  * @pdev: DP pdev handle
2210  *
2211  * Return
2212  */
2213 static QDF_STATUS dp_mon_pdev_flush_desc(struct dp_pdev *pdev)
2214 {
2215 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2216 	struct dp_mon_pdev_be *mon_pdev_be;
2217 
2218 	if (qdf_unlikely(!mon_pdev)) {
2219 		dp_mon_debug("monitor pdev is NULL");
2220 		return QDF_STATUS_E_FAILURE;
2221 	}
2222 
2223 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
2224 
2225 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
2226 
2227 	if (mon_pdev_be->desc_count) {
2228 		mon_pdev->rx_mon_stats.pending_desc_count +=
2229 						mon_pdev_be->desc_count;
2230 		dp_rx_mon_flush_status_buf_queue(pdev);
2231 	}
2232 
2233 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
2234 
2235 	return QDF_STATUS_SUCCESS;
2236 }
2237 
2238 #ifdef WLAN_FEATURE_11BE_MLO
2239 #define DP_PEER_ID_MASK 0x3FFF
2240 /**
2241  * dp_rx_mon_update_peer_id() - Update sw_peer_id with link peer_id
2242  *
2243  * @pdev: DP pdev handle
2244  * @ppdu_info: HAL PPDU Info buffer
2245  *
2246  * Return: none
2247  */
2248 static inline
2249 void dp_rx_mon_update_peer_id(struct dp_pdev *pdev,
2250 			      struct hal_rx_ppdu_info *ppdu_info)
2251 {
2252 	uint32_t i;
2253 	uint16_t peer_id;
2254 	struct dp_soc *soc = pdev->soc;
2255 	uint32_t num_users = ppdu_info->com_info.num_users;
2256 
2257 	for (i = 0; i < num_users; i++) {
2258 		peer_id = ppdu_info->rx_user_status[i].sw_peer_id;
2259 		if (peer_id == HTT_INVALID_PEER)
2260 			continue;
2261 		/*
2262 		+---------------------------------------------------------------------+
2263 		| 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
2264 		+---------------------------------------------------------------------+
2265 		| CHIP ID | ML |                     PEER ID                          |
2266 		+---------------------------------------------------------------------+
2267 		*/
2268 		peer_id &= DP_PEER_ID_MASK;
2269 		peer_id = dp_get_link_peer_id_by_lmac_id(soc, peer_id,
2270 							 pdev->lmac_id);
2271 		ppdu_info->rx_user_status[i].sw_peer_id = peer_id;
2272 	}
2273 }
2274 #else
2275 static inline
2276 void dp_rx_mon_update_peer_id(struct dp_pdev *pdev,
2277 			      struct hal_rx_ppdu_info *ppdu_info)
2278 {
2279 }
2280 #endif
2281 
2282 /*
2283  * HAL_RX_PKT_TYPE_11A     0 -> CDP_PKT_TYPE_OFDM
2284  * HAL_RX_PKT_TYPE_11B     1 -> CDP_PKT_TYPE_CCK
2285  * HAL_RX_PKT_TYPE_11N     2 -> CDP_PKT_TYPE_HT
2286  * HAL_RX_PKT_TYPE_11AC    3 -> CDP_PKT_TYPE_VHT
2287  * HAL_RX_PKT_TYPE_11AX    4 -> CDP_PKT_TYPE_HE
2288  * HAL_RX_PKT_TYPE_11BE    6 -> CDP_PKT_TYPE_EHT
2289  */
2290 
2291 static uint32_t const cdp_preamble_type_map[] = {
2292 	CDP_PKT_TYPE_OFDM,
2293 	CDP_PKT_TYPE_CCK,
2294 	CDP_PKT_TYPE_HT,
2295 	CDP_PKT_TYPE_VHT,
2296 	CDP_PKT_TYPE_HE,
2297 	CDP_PKT_TYPE_NO_SUP,
2298 #ifdef WLAN_FEATURE_11BE
2299 	CDP_PKT_TYPE_EHT,
2300 #endif
2301 	CDP_PKT_TYPE_MAX,
2302 };
2303 
2304 /*
2305  * HAL_RX_RECEPTION_TYPE_SU       -> CDP_RX_TYPE_SU
2306  * HAL_RX_RECEPTION_TYPE_MU_MIMO  -> CDP_RX_TYPE_MU_MIMO
2307  * HAL_RX_RECEPTION_TYPE_OFDMA    -> CDP_RX_TYPE_MU_OFDMA
2308  * HAL_RX_RECEPTION_TYPE_MU_OFDMA -> CDP_RX_TYPE_MU_OFDMA_MIMO
2309  */
2310 static uint32_t const cdp_reception_type_map[] = {
2311 	CDP_RX_TYPE_SU,
2312 	CDP_RX_TYPE_MU_MIMO,
2313 	CDP_RX_TYPE_MU_OFDMA,
2314 	CDP_RX_TYPE_MU_OFDMA_MIMO,
2315 };
2316 
2317 static uint32_t const cdp_mu_dl_up_map[] = {
2318 	CDP_MU_TYPE_DL,
2319 	CDP_MU_TYPE_UL,
2320 };
2321 
2322 static inline void
2323 dp_rx_mu_stats_update(
2324 	struct hal_rx_ppdu_info *ppdu_info,
2325 	struct cdp_pdev_mon_stats *rx_mon_sts,
2326 	uint32_t preamble_type,
2327 	uint32_t  recept_type,
2328 	uint32_t  mu_dl_ul,
2329 	uint32_t i
2330 )
2331 {
2332 	struct mon_rx_user_status *rx_user_status;
2333 
2334 	rx_user_status =  &ppdu_info->rx_user_status[i];
2335 	rx_mon_sts->mpdu_cnt_fcs_ok[preamble_type][recept_type][mu_dl_ul][i]
2336 			+= rx_user_status->mpdu_cnt_fcs_ok;
2337 	rx_mon_sts->mpdu_cnt_fcs_err[preamble_type][recept_type][mu_dl_ul][i]
2338 			+= rx_user_status->mpdu_cnt_fcs_err;
2339 }
2340 
2341 static inline void
2342 dp_rx_he_ppdu_stats_update(
2343 	struct cdp_pdev_mon_stats *stats,
2344 	struct hal_rx_u_sig_info *u_sig
2345 )
2346 {
2347 	stats->ppdu_eht_type_mode[u_sig->ppdu_type_comp_mode][u_sig->ul_dl]++;
2348 }
2349 
2350 static inline void
2351 dp_rx_he_ppdu_stats(struct dp_pdev *pdev, struct hal_rx_ppdu_info *ppdu_info)
2352 {
2353 	struct dp_mon_pdev *mon_pdev;
2354 	struct cdp_pdev_mon_stats *rx_mon_stats;
2355 
2356 	mon_pdev = pdev->monitor_pdev;
2357 	rx_mon_stats = &mon_pdev->rx_mon_stats;
2358 
2359 	if (ppdu_info->u_sig_info.ppdu_type_comp_mode < CDP_EHT_TYPE_MODE_MAX &&
2360 	    ppdu_info->u_sig_info.ul_dl < CDP_MU_TYPE_MAX)
2361 		dp_rx_he_ppdu_stats_update(
2362 			rx_mon_stats,
2363 			&ppdu_info->u_sig_info);
2364 		else
2365 			qdf_assert(0);
2366 }
2367 
2368 static inline void
2369 dp_rx_mu_stats(struct dp_pdev *pdev, struct hal_rx_ppdu_info *ppdu_info)
2370 {
2371 	struct dp_mon_pdev *mon_pdev;
2372 	struct cdp_pdev_mon_stats *rx_mon_stats;
2373 	struct mon_rx_status *rx_status;
2374 	uint32_t preamble_type, reception_type, mu_dl_ul, num_users, i;
2375 
2376 	mon_pdev = pdev->monitor_pdev;
2377 	rx_mon_stats = &mon_pdev->rx_mon_stats;
2378 	rx_status = &ppdu_info->rx_status;
2379 
2380 	num_users = ppdu_info->com_info.num_users;
2381 
2382 	if (rx_status->preamble_type < CDP_PKT_TYPE_MAX)
2383 		preamble_type = cdp_preamble_type_map[rx_status->preamble_type];
2384 	else
2385 		preamble_type = CDP_PKT_TYPE_NO_SUP;
2386 
2387 	reception_type = cdp_reception_type_map[rx_status->reception_type];
2388 	mu_dl_ul = cdp_mu_dl_up_map[rx_status->mu_dl_ul];
2389 
2390 	for (i = 0; i < num_users; i++) {
2391 		if (i >= CDP_MU_SNIF_USER_MAX)
2392 			return;
2393 
2394 		dp_rx_mu_stats_update(ppdu_info, rx_mon_stats, preamble_type,
2395 				      reception_type, mu_dl_ul, i);
2396 	}
2397 
2398 	if (rx_status->eht_flags)
2399 		dp_rx_he_ppdu_stats(pdev, ppdu_info);
2400 }
2401 
2402 static inline uint32_t
2403 dp_rx_mon_srng_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
2404 			   uint32_t mac_id, uint32_t quota)
2405 {
2406 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
2407 	struct dp_mon_pdev *mon_pdev;
2408 	struct dp_mon_pdev_be *mon_pdev_be;
2409 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
2410 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
2411 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
2412 	hal_soc_handle_t hal_soc = soc->hal_soc;
2413 	void *rx_mon_dst_ring_desc;
2414 	void *mon_dst_srng;
2415 	uint32_t work_done = 0;
2416 	struct hal_rx_ppdu_info *ppdu_info = NULL;
2417 	QDF_STATUS status;
2418 	uint32_t cookie_2;
2419 	if (!pdev || !hal_soc) {
2420 		dp_mon_err("%pK: pdev or hal_soc is null, mac_id = %d",
2421 			   soc, mac_id);
2422 		return work_done;
2423 	}
2424 
2425 	mon_pdev = pdev->monitor_pdev;
2426 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
2427 	mon_dst_srng = soc->rxdma_mon_dst_ring[mac_id].hal_srng;
2428 
2429 	if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) {
2430 		dp_mon_err("%pK: : HAL Monitor Destination Ring Init Failed -- %pK",
2431 			   soc, mon_dst_srng);
2432 		return work_done;
2433 	}
2434 
2435 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
2436 
2437 	if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, mon_dst_srng))) {
2438 		dp_mon_err("%s %d : HAL Mon Dest Ring access Failed -- %pK",
2439 			   __func__, __LINE__, mon_dst_srng);
2440 		qdf_spin_unlock_bh(&mon_pdev->mon_lock);
2441 		return work_done;
2442 	}
2443 
2444 	while (qdf_likely((rx_mon_dst_ring_desc =
2445 			  (void *)hal_srng_dst_peek(hal_soc, mon_dst_srng))
2446 				&& quota--)) {
2447 		struct hal_mon_desc hal_mon_rx_desc = {0};
2448 		struct dp_mon_desc *mon_desc;
2449 		unsigned long long desc;
2450 		hal_be_get_mon_dest_status(soc->hal_soc,
2451 					   rx_mon_dst_ring_desc,
2452 					   &hal_mon_rx_desc);
2453 		/* If it's empty descriptor, skip processing
2454 		 * and process next hW desc
2455 		 */
2456 		if (hal_mon_rx_desc.empty_descriptor == 1) {
2457 			dp_mon_debug("empty descriptor found mon_pdev: %pK",
2458 				     mon_pdev);
2459 			rx_mon_dst_ring_desc =
2460 				hal_srng_dst_get_next(hal_soc, mon_dst_srng);
2461 			dp_rx_mon_update_drop_cnt(mon_pdev, &hal_mon_rx_desc);
2462 			continue;
2463 		}
2464 		desc = hal_mon_rx_desc.buf_addr;
2465 		cookie_2 = DP_MON_GET_COOKIE(desc);
2466 		mon_desc = DP_MON_GET_DESC(desc);
2467 
2468 		qdf_assert_always(mon_desc);
2469 
2470 		if (mon_desc->cookie_2 != cookie_2) {
2471 			mon_pdev->rx_mon_stats.dup_mon_sw_desc++;
2472 			qdf_err("duplicate cookie found mon_desc:%pK", mon_desc);
2473 			qdf_assert_always(0);
2474 		}
2475 
2476 		if ((mon_desc == mon_pdev_be->prev_rxmon_desc) &&
2477 		    (mon_desc->cookie == mon_pdev_be->prev_rxmon_cookie)) {
2478 			dp_mon_err("duplicate descritout found mon_pdev: %pK mon_desc: %pK cookie: %d",
2479 				   mon_pdev, mon_desc, mon_desc->cookie);
2480 			mon_pdev->rx_mon_stats.dup_mon_buf_cnt++;
2481 			hal_srng_dst_get_next(hal_soc, mon_dst_srng);
2482 			continue;
2483 		}
2484 		mon_pdev_be->prev_rxmon_desc = mon_desc;
2485 		mon_pdev_be->prev_rxmon_cookie = mon_desc->cookie;
2486 
2487 		if (!mon_desc->unmapped) {
2488 			qdf_mem_unmap_page(soc->osdev, mon_desc->paddr,
2489 					   rx_mon_desc_pool->buf_size,
2490 					   QDF_DMA_FROM_DEVICE);
2491 			mon_desc->unmapped = 1;
2492 		}
2493 		mon_desc->end_offset = hal_mon_rx_desc.end_offset;
2494 
2495 		/* Flush and truncated status buffers content
2496 		 * need to discarded
2497 		 */
2498 		if (hal_mon_rx_desc.end_reason == HAL_MON_FLUSH_DETECTED ||
2499 		    hal_mon_rx_desc.end_reason == HAL_MON_PPDU_TRUNCATED) {
2500 			dp_mon_debug("end_resaon: %d mon_pdev: %pK",
2501 				     hal_mon_rx_desc.end_reason, mon_pdev);
2502 			mon_pdev->rx_mon_stats.status_ppdu_drop++;
2503 			dp_rx_mon_handle_flush_n_trucated_ppdu(soc,
2504 							       pdev,
2505 							       mon_desc);
2506 			rx_mon_dst_ring_desc = hal_srng_dst_get_next(hal_soc,
2507 							mon_dst_srng);
2508 			continue;
2509 		}
2510 		if (mon_pdev_be->desc_count >= DP_MON_MAX_STATUS_BUF)
2511 			qdf_assert_always(0);
2512 
2513 		mon_pdev_be->status[mon_pdev_be->desc_count++] = mon_desc;
2514 
2515 		rx_mon_dst_ring_desc = hal_srng_dst_get_next(hal_soc, mon_dst_srng);
2516 
2517 		dp_rx_process_pktlog_be(soc, pdev, ppdu_info,
2518 					mon_desc->buf_addr,
2519 					hal_mon_rx_desc.end_offset);
2520 
2521 		if (hal_mon_rx_desc.end_reason == HAL_MON_STATUS_BUFFER_FULL)
2522 			continue;
2523 
2524 		mon_pdev->rx_mon_stats.status_ppdu_done++;
2525 
2526 		ppdu_info = dp_rx_mon_process_status_tlv(pdev);
2527 
2528 		if (ppdu_info) {
2529 			mon_pdev->rx_mon_stats.start_user_info_cnt +=
2530 				ppdu_info->start_user_info_cnt;
2531 			ppdu_info->start_user_info_cnt = 0;
2532 
2533 			mon_pdev->rx_mon_stats.end_user_stats_cnt +=
2534 				ppdu_info->end_user_stats_cnt;
2535 			ppdu_info->end_user_stats_cnt = 0;
2536 
2537 			dp_rx_mon_update_peer_id(pdev, ppdu_info);
2538 			dp_rx_mu_stats(pdev, ppdu_info);
2539 		}
2540 
2541 		/* Call enhanced stats update API */
2542 		if (mon_pdev->enhanced_stats_en && ppdu_info)
2543 			dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info);
2544 		else if (dp_cfr_rcc_mode_status(pdev) && ppdu_info)
2545 			dp_rx_handle_cfr(soc, pdev, ppdu_info);
2546 
2547 		dp_rx_mon_update_user_ctrl_frame_stats(pdev, ppdu_info);
2548 
2549 		status = dp_rx_mon_add_ppdu_info_to_wq(pdev, ppdu_info);
2550 		if (status != QDF_STATUS_SUCCESS) {
2551 			if (ppdu_info)
2552 				dp_rx_mon_free_ppdu_info(pdev, ppdu_info);
2553 		}
2554 
2555 		work_done++;
2556 
2557 		/* desc_count should be zero  after PPDU status processing */
2558 		if (mon_pdev_be->desc_count > 0)
2559 			qdf_assert_always(0);
2560 
2561 		mon_pdev_be->desc_count = 0;
2562 	}
2563 	dp_rx_srng_access_end(int_ctx, soc, mon_dst_srng);
2564 
2565 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
2566 	dp_mon_info("mac_id: %d, work_done:%d", mac_id, work_done);
2567 	return work_done;
2568 }
2569 
2570 uint32_t
2571 dp_rx_mon_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
2572 		      uint32_t mac_id, uint32_t quota)
2573 {
2574 	uint32_t work_done;
2575 
2576 	work_done = dp_rx_mon_srng_process_2_0(soc, int_ctx, mac_id, quota);
2577 
2578 	return work_done;
2579 }
2580 
2581 /**
2582  * dp_mon_pdev_ext_init_2_0() - Init pdev ext param
2583  *
2584  * @pdev: DP pdev handle
2585  *
2586  * Return:  QDF_STATUS_SUCCESS: Success
2587  *          QDF_STATUS_E_FAILURE: failure
2588  */
2589 QDF_STATUS dp_mon_pdev_ext_init_2_0(struct dp_pdev *pdev)
2590 {
2591 	return dp_rx_mon_init_wq_sm(pdev);
2592 }
2593 
2594 /**
2595  * dp_mon_pdev_ext_deinit_2_0() - denit pdev ext param
2596  *
2597  * @pdev: DP pdev handle
2598  *
2599  * Return: QDF_STATUS_SUCCESS
2600  */
2601 QDF_STATUS dp_mon_pdev_ext_deinit_2_0(struct dp_pdev *pdev)
2602 {
2603 	dp_mon_pdev_flush_desc(pdev);
2604 
2605 	return dp_rx_mon_deinit_wq_sm(pdev);
2606 }
2607 
2608 #ifdef QCA_ENHANCED_STATS_SUPPORT
2609 void
2610 dp_rx_mon_populate_ppdu_usr_info_2_0(struct mon_rx_user_status *rx_user_status,
2611 				     struct cdp_rx_stats_ppdu_user *ppdu_user)
2612 {
2613 	ppdu_user->mpdu_retries = rx_user_status->retry_mpdu;
2614 }
2615 
2616 #ifdef WLAN_FEATURE_11BE
2617 void dp_rx_mon_stats_update_2_0(struct dp_mon_peer *mon_peer,
2618 				struct cdp_rx_indication_ppdu *ppdu,
2619 				struct cdp_rx_stats_ppdu_user *ppdu_user)
2620 {
2621 	uint8_t mcs, preamble, ppdu_type, punc_mode, res_mcs;
2622 	uint32_t num_msdu;
2623 
2624 	preamble = ppdu->u.preamble;
2625 	ppdu_type = ppdu->u.ppdu_type;
2626 	num_msdu = ppdu_user->num_msdu;
2627 	punc_mode = ppdu->punc_bw;
2628 
2629 	if (ppdu_type == HAL_RX_TYPE_SU)
2630 		mcs = ppdu->u.mcs;
2631 	else
2632 		mcs = ppdu_user->mcs;
2633 
2634 	DP_STATS_INC(mon_peer, rx.mpdu_retry_cnt, ppdu_user->mpdu_retries);
2635 	DP_STATS_INC(mon_peer, rx.punc_bw[punc_mode], num_msdu);
2636 
2637 	if (preamble == DOT11_BE) {
2638 		res_mcs = (mcs < MAX_MCS_11BE) ? mcs : (MAX_MCS - 1);
2639 
2640 		DP_STATS_INC(mon_peer,
2641 			     rx.pkt_type[preamble].mcs_count[res_mcs], num_msdu);
2642 		DP_STATS_INCC(mon_peer,
2643 			      rx.su_be_ppdu_cnt.mcs_count[res_mcs], 1,
2644 			      (ppdu_type == HAL_RX_TYPE_SU));
2645 		DP_STATS_INCC(mon_peer,
2646 			      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[res_mcs],
2647 			      1, (ppdu_type == HAL_RX_TYPE_MU_OFDMA));
2648 		DP_STATS_INCC(mon_peer,
2649 			      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[res_mcs],
2650 			      1, (ppdu_type == HAL_RX_TYPE_MU_MIMO));
2651 	}
2652 }
2653 
2654 void
2655 dp_rx_mon_populate_ppdu_info_2_0(struct hal_rx_ppdu_info *hal_ppdu_info,
2656 				 struct cdp_rx_indication_ppdu *ppdu)
2657 {
2658 	uint16_t puncture_pattern;
2659 	enum cdp_punctured_modes punc_mode;
2660 
2661 	/* Align bw value as per host data structures */
2662 	if (hal_ppdu_info->rx_status.bw == HAL_FULL_RX_BW_320)
2663 		ppdu->u.bw = CMN_BW_320MHZ;
2664 	else
2665 		ppdu->u.bw = hal_ppdu_info->rx_status.bw;
2666 	if (hal_ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11BE) {
2667 		/* Align preamble value as per host data structures */
2668 		ppdu->u.preamble = DOT11_BE;
2669 		ppdu->u.stbc = hal_ppdu_info->rx_status.is_stbc;
2670 		ppdu->u.dcm = hal_ppdu_info->rx_status.dcm;
2671 	} else {
2672 		ppdu->u.preamble = hal_ppdu_info->rx_status.preamble_type;
2673 	}
2674 
2675 	puncture_pattern = hal_ppdu_info->rx_status.punctured_pattern;
2676 	punc_mode = dp_mon_get_puncture_type(puncture_pattern,
2677 					     ppdu->u.bw);
2678 	ppdu->punc_bw = punc_mode;
2679 }
2680 #else
2681 void dp_rx_mon_stats_update_2_0(struct dp_mon_peer *mon_peer,
2682 				struct cdp_rx_indication_ppdu *ppdu,
2683 				struct cdp_rx_stats_ppdu_user *ppdu_user)
2684 {
2685 	DP_STATS_INC(mon_peer, rx.mpdu_retry_cnt, ppdu_user->mpdu_retries);
2686 }
2687 
2688 void
2689 dp_rx_mon_populate_ppdu_info_2_0(struct hal_rx_ppdu_info *hal_ppdu_info,
2690 				 struct cdp_rx_indication_ppdu *ppdu)
2691 {
2692 	ppdu->punc_bw = NO_PUNCTURE;
2693 }
2694 #endif
2695 void dp_mon_rx_print_advanced_stats_2_0(struct dp_soc *soc,
2696 					struct dp_pdev *pdev)
2697 {
2698 	struct cdp_pdev_mon_stats *rx_mon_stats;
2699 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
2700 	struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc;
2701 	struct dp_mon_pdev_be *mon_pdev_be =
2702 				dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
2703 
2704 	rx_mon_stats = &mon_pdev->rx_mon_stats;
2705 
2706 	DP_PRINT_STATS("total_ppdu_info_alloc = %d",
2707 		       rx_mon_stats->total_ppdu_info_alloc);
2708 	DP_PRINT_STATS("total_ppdu_info_free = %d",
2709 		       rx_mon_stats->total_ppdu_info_free);
2710 	DP_PRINT_STATS("total_ppdu_info_enq = %d",
2711 		       rx_mon_stats->total_ppdu_info_enq);
2712 	DP_PRINT_STATS("total_ppdu_info_drop = %d",
2713 		       rx_mon_stats->total_ppdu_info_drop);
2714 	DP_PRINT_STATS("rx_hdr_not_received = %d",
2715 		       rx_mon_stats->rx_hdr_not_received);
2716 	DP_PRINT_STATS("parent_buf_alloc = %d",
2717 		       rx_mon_stats->parent_buf_alloc);
2718 	DP_PRINT_STATS("parent_buf_free = %d",
2719 		       rx_mon_stats->parent_buf_free);
2720 	DP_PRINT_STATS("mpdus_buf_to_stack = %d",
2721 		       rx_mon_stats->mpdus_buf_to_stack);
2722 	DP_PRINT_STATS("frag_alloc = %d",
2723 		       mon_soc->stats.frag_alloc);
2724 	DP_PRINT_STATS("total frag_free = %d",
2725 		       mon_soc->stats.frag_free);
2726 	DP_PRINT_STATS("frag_free due to empty queue= %d",
2727 		       mon_soc->stats.empty_queue);
2728 	DP_PRINT_STATS("status_buf_count = %d",
2729 		       rx_mon_stats->status_buf_count);
2730 	DP_PRINT_STATS("pkt_buf_count = %d",
2731 		       rx_mon_stats->pkt_buf_count);
2732 	DP_PRINT_STATS("rx_mon_queue_depth= %d",
2733 		       mon_pdev_be->rx_mon_queue_depth);
2734 	DP_PRINT_STATS("empty_desc= %d",
2735 		       mon_pdev->rx_mon_stats.empty_desc_ppdu);
2736 	DP_PRINT_STATS("mpdu_dropped_due_invalid_decap= %d",
2737 		       mon_pdev->rx_mon_stats.mpdu_decap_type_invalid);
2738 	DP_PRINT_STATS("total_free_elem= %d",
2739 		       mon_pdev_be->total_free_elem);
2740 	DP_PRINT_STATS("ppdu_drop_cnt= %d",
2741 		       mon_pdev->rx_mon_stats.ppdu_drop_cnt);
2742 	DP_PRINT_STATS("mpdu_drop_cnt= %d",
2743 		       mon_pdev->rx_mon_stats.mpdu_drop_cnt);
2744 	DP_PRINT_STATS("end_of_ppdu_drop_cnt= %d",
2745 		       mon_pdev->rx_mon_stats.end_of_ppdu_drop_cnt);
2746 	DP_PRINT_STATS("tlv_drop_cnt= %d",
2747 		       mon_pdev->rx_mon_stats.tlv_drop_cnt);
2748 	DP_PRINT_STATS("rx_hdr_invalid_cnt = %d",
2749 		       rx_mon_stats->rx_hdr_invalid_cnt);
2750 	DP_PRINT_STATS("invalid_dma_length Received = %d",
2751 		       rx_mon_stats->invalid_dma_length);
2752 	DP_PRINT_STATS("pending_desc_count= %d",
2753 		       mon_pdev->rx_mon_stats.pending_desc_count);
2754 }
2755 #endif
2756 
2757 #ifdef WLAN_PKT_CAPTURE_RX_2_0
2758 void
2759 dp_rx_mon_buf_desc_pool_deinit(struct dp_soc *soc)
2760 {
2761 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
2762 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
2763 
2764 	/* Drain page frag cachce before pool deinit */
2765 	qdf_frag_cache_drain(&mon_soc_be->rx_desc_mon.pf_cache);
2766 	dp_mon_desc_pool_deinit(&mon_soc_be->rx_desc_mon);
2767 }
2768 
2769 QDF_STATUS
2770 dp_rx_mon_buf_desc_pool_init(struct dp_soc *soc)
2771 {
2772 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
2773 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
2774 	uint32_t num_entries;
2775 
2776 	num_entries =
2777 		wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc->wlan_cfg_ctx);
2778 	return dp_mon_desc_pool_init(&mon_soc_be->rx_desc_mon, num_entries);
2779 }
2780 
2781 void dp_rx_mon_buf_desc_pool_free(struct dp_soc *soc)
2782 {
2783 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
2784 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
2785 
2786 	if (mon_soc)
2787 		dp_mon_desc_pool_free(soc, &mon_soc_be->rx_desc_mon,
2788 				      DP_MON_RX_DESC_POOL_TYPE);
2789 }
2790 
2791 void dp_rx_mon_soc_detach_2_0(struct dp_soc *soc, int lmac_id)
2792 {
2793 	dp_rx_mon_buf_desc_pool_free(soc);
2794 	dp_srng_free(soc, &soc->rxdma_mon_buf_ring[lmac_id]);
2795 }
2796 
2797 void dp_rx_mon_soc_deinit_2_0(struct dp_soc *soc, uint32_t lmac_id)
2798 {
2799 	dp_rx_mon_buffers_free(soc);
2800 	dp_rx_mon_buf_desc_pool_deinit(soc);
2801 	dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[lmac_id],
2802 			RXDMA_MONITOR_BUF, 0);
2803 }
2804 
2805 QDF_STATUS
2806 dp_rx_mon_buf_desc_pool_alloc(struct dp_soc *soc)
2807 {
2808 	struct dp_srng *mon_buf_ring;
2809 	struct dp_mon_desc_pool *rx_mon_desc_pool;
2810 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
2811 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
2812 	int entries;
2813 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2814 
2815 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2816 
2817 	entries = wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc_cfg_ctx);
2818 	mon_buf_ring = &soc->rxdma_mon_buf_ring[0];
2819 
2820 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
2821 
2822 	qdf_print("%s:%d rx mon buf desc pool entries: %d", __func__, __LINE__, entries);
2823 	return dp_mon_desc_pool_alloc(soc, DP_MON_RX_DESC_POOL_TYPE,
2824 				      entries, rx_mon_desc_pool);
2825 }
2826 
2827 void
2828 dp_rx_mon_buffers_free(struct dp_soc *soc)
2829 {
2830 	struct dp_mon_desc_pool *rx_mon_desc_pool;
2831 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
2832 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
2833 
2834 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
2835 
2836 	dp_mon_pool_frag_unmap_and_free(soc, rx_mon_desc_pool);
2837 }
2838 
2839 QDF_STATUS
2840 dp_rx_mon_buffers_alloc(struct dp_soc *soc, uint32_t size)
2841 {
2842 	struct dp_srng *mon_buf_ring;
2843 	struct dp_mon_desc_pool *rx_mon_desc_pool;
2844 	union dp_mon_desc_list_elem_t *desc_list = NULL;
2845 	union dp_mon_desc_list_elem_t *tail = NULL;
2846 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
2847 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
2848 
2849 	mon_buf_ring = &soc->rxdma_mon_buf_ring[0];
2850 
2851 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
2852 
2853 	return dp_mon_buffers_replenish(soc, mon_buf_ring,
2854 					rx_mon_desc_pool,
2855 					size,
2856 					&desc_list, &tail, NULL);
2857 }
2858 
2859 QDF_STATUS dp_rx_mon_soc_init_2_0(struct dp_soc *soc)
2860 {
2861 	if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[0],
2862 			 RXDMA_MONITOR_BUF, 0, 0)) {
2863 		dp_mon_err("%pK: " RNG_ERR "rx_mon_buf_ring", soc);
2864 		goto fail;
2865 	}
2866 
2867 	if (dp_rx_mon_buf_desc_pool_init(soc)) {
2868 		dp_mon_err("%pK: " RNG_ERR "rx mon desc pool init", soc);
2869 		goto fail;
2870 	}
2871 
2872 	/* monitor buffers for src */
2873 	if (dp_rx_mon_buffers_alloc(soc, DP_MON_RING_FILL_LEVEL_DEFAULT)) {
2874 		dp_mon_err("%pK: Rx mon buffers allocation failed", soc);
2875 		goto fail;
2876 	}
2877 
2878 	return QDF_STATUS_SUCCESS;
2879 fail:
2880 	return QDF_STATUS_E_FAILURE;
2881 }
2882 
2883 QDF_STATUS dp_rx_mon_pdev_htt_srng_setup_2_0(struct dp_soc *soc,
2884 					    struct dp_pdev *pdev,
2885 					    int mac_id,
2886 					    int mac_for_pdev)
2887 {
2888 	return htt_srng_setup(soc->htt_handle, mac_for_pdev,
2889 			      soc->rxdma_mon_dst_ring[mac_id].hal_srng,
2890 			      RXDMA_MONITOR_DST);
2891 }
2892 
2893 QDF_STATUS dp_rx_mon_soc_htt_srng_setup_2_0(struct dp_soc *soc,
2894 					    int mac_id)
2895 {
2896 	hal_set_low_threshold(soc->rxdma_mon_buf_ring[0].hal_srng,
2897 			      MON_BUF_MIN_ENTRIES << 2);
2898 	return htt_srng_setup(soc->htt_handle, 0,
2899 			soc->rxdma_mon_buf_ring[0].hal_srng,
2900 			RXDMA_MONITOR_BUF);
2901 }
2902 
2903 QDF_STATUS dp_rx_mon_pdev_rings_alloc_2_0(struct dp_pdev *pdev, int lmac_id)
2904 {
2905 	struct dp_soc *soc = pdev->soc;
2906 	int entries;
2907 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
2908 
2909 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
2910 	entries = wlan_cfg_get_dma_rx_mon_dest_ring_size(pdev_cfg_ctx);
2911 
2912 	return dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id],
2913 				  RXDMA_MONITOR_DST, entries, 0);
2914 }
2915 
2916 void dp_rx_mon_pdev_rings_free_2_0(struct dp_pdev *pdev, int lmac_id)
2917 {
2918 	struct dp_soc *soc = pdev->soc;
2919 
2920 	dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]);
2921 }
2922 
2923 QDF_STATUS dp_rx_mon_pdev_rings_init_2_0(struct dp_pdev *pdev, int lmac_id)
2924 {
2925 	struct dp_soc *soc = pdev->soc;
2926 
2927 	return dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id],
2928 				 RXDMA_MONITOR_DST, pdev->pdev_id, lmac_id);
2929 }
2930 
2931 void dp_rx_mon_pdev_rings_deinit_2_0(struct dp_pdev *pdev, int lmac_id)
2932 {
2933 	struct dp_soc *soc = pdev->soc;
2934 
2935 	dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id],
2936 		       RXDMA_MONITOR_DST, pdev->pdev_id);
2937 }
2938 
2939 QDF_STATUS dp_rx_mon_soc_attach_2_0(struct dp_soc *soc, int lmac_id)
2940 {
2941 	int entries;
2942 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
2943 
2944 	entries = wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc_cfg_ctx);
2945 	qdf_print("%s:%d rx mon buf entries: %d", __func__, __LINE__, entries);
2946 
2947 	return dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[lmac_id],
2948 			  RXDMA_MONITOR_BUF, entries, 0);
2949 }
2950 
2951 #endif /* WLAN_PKT_CAPTURE_RX_2_0 */
2952