xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/2.0/dp_rx_mon_2.0.c (revision 8b3dca18206e1a0461492f082fa6e270b092c035)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "hal_be_hw_headers.h"
19 #include "dp_types.h"
20 #include "hal_be_rx.h"
21 #include "hal_api.h"
22 #include "qdf_trace.h"
23 #include "hal_be_api_mon.h"
24 #include "dp_internal.h"
25 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
26 #include <qdf_flex_mem.h>
27 #include "qdf_nbuf_frag.h"
28 #include "dp_mon.h"
29 #include <dp_rx_mon.h>
30 #include <dp_mon_2.0.h>
31 #include <dp_rx_mon.h>
32 #include <dp_rx_mon_2.0.h>
33 #include <dp_rx.h>
34 #include <dp_be.h>
35 #include <hal_be_api_mon.h>
36 #ifdef QCA_SUPPORT_LITE_MONITOR
37 #include "dp_lite_mon.h"
38 #endif
39 
40 #define F_MASK 0xFFFF
41 #define TEST_MASK 0xCBF
42 
43 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
44 	    defined(WLAN_SUPPORT_RX_FLOW_TAG)
45 
46 #ifdef QCA_TEST_MON_PF_TAGS_STATS
47 
48 static
49 void dp_rx_mon_print_tag_buf(uint8_t *buf, uint16_t room)
50 {
51 	print_hex_dump(KERN_ERR, "TLV BUFFER: ", DUMP_PREFIX_NONE,
52 		       32, 2, buf, room, false);
53 }
54 
55 #else
56 static
57 void dp_rx_mon_print_tag_buf(uint8_t *buf, uint16_t room)
58 {
59 }
60 
61 #endif
62 
63 static
64 void dp_rx_mon_set_zero(qdf_nbuf_t nbuf)
65 {
66 	qdf_mem_zero(qdf_nbuf_head(nbuf), DP_RX_MON_TLV_ROOM);
67 }
68 
69 /**
70  * dp_rx_mon_get_ppdu_info() - Get PPDU info from freelist
71  *
72  * @mon_pdev: monitor pdev
73  *
74  * Return: ppdu_info
75  */
76 static inline struct hal_rx_ppdu_info*
77 dp_rx_mon_get_ppdu_info(struct dp_mon_pdev *mon_pdev)
78 {
79 	struct dp_mon_pdev_be *mon_pdev_be =
80 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
81 	struct hal_rx_ppdu_info *ppdu_info, *temp_ppdu_info;
82 
83 	qdf_spin_lock_bh(&mon_pdev_be->ppdu_info_lock);
84 	TAILQ_FOREACH_SAFE(ppdu_info,
85 			   &mon_pdev_be->rx_mon_free_queue,
86 			   ppdu_list_elem,
87 			   temp_ppdu_info) {
88 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_free_queue,
89 			     ppdu_info, ppdu_free_list_elem);
90 
91 		if (ppdu_info) {
92 			mon_pdev_be->total_free_elem--;
93 			break;
94 		}
95 	}
96 	qdf_spin_unlock_bh(&mon_pdev_be->ppdu_info_lock);
97 
98 	return ppdu_info;
99 }
100 
101 static inline void
102 __dp_rx_mon_free_ppdu_info(struct dp_mon_pdev *mon_pdev,
103 			   struct hal_rx_ppdu_info *ppdu_info)
104 {
105 	struct dp_mon_pdev_be *mon_pdev_be =
106 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
107 
108 	qdf_spin_lock_bh(&mon_pdev_be->ppdu_info_lock);
109 	if (ppdu_info) {
110 		TAILQ_INSERT_TAIL(&mon_pdev_be->rx_mon_free_queue, ppdu_info,
111 				  ppdu_free_list_elem);
112 		mon_pdev_be->total_free_elem++;
113 	}
114 	qdf_spin_unlock_bh(&mon_pdev_be->ppdu_info_lock);
115 }
116 
117 /**
118  * dp_rx_mon_nbuf_add_rx_frag () -  Add frag to SKB
119  *
120  * @nbuf: SKB to which frag is going to be added
121  * @frag: frag to be added to SKB
122  * @frag_len: frag length
123  * @offset: frag offset
124  * @buf_size: buffer size
125  * @frag_ref: take frag ref
126  *
127  * Return: QDF_STATUS
128  */
129 static inline QDF_STATUS
130 dp_rx_mon_nbuf_add_rx_frag(qdf_nbuf_t nbuf, qdf_frag_t *frag,
131 			   uint16_t frag_len, uint16_t offset,
132 			   uint16_t buf_size, bool frag_ref)
133 {
134 	uint8_t num_frags;
135 
136 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
137 	if (num_frags < QDF_NBUF_MAX_FRAGS) {
138 		qdf_nbuf_add_rx_frag(frag, nbuf,
139 				     offset,
140 				     frag_len,
141 				     buf_size,
142 				     frag_ref);
143 		return QDF_STATUS_SUCCESS;
144 	}
145 	return QDF_STATUS_E_FAILURE;
146 }
147 
148 /**
149  * dp_mon_free_parent_nbuf() - Free parent SKB
150  *
151  * @mon_pdev: monitor pdev
152  * @nbuf: SKB to be freed
153  *
154  * @Return: void
155  */
156 void
157 dp_mon_free_parent_nbuf(struct dp_mon_pdev *mon_pdev,
158 			qdf_nbuf_t nbuf)
159 {
160 	mon_pdev->rx_mon_stats.parent_buf_free++;
161 	qdf_nbuf_free(nbuf);
162 }
163 
164 void
165 dp_rx_mon_shift_pf_tag_in_headroom(qdf_nbuf_t nbuf, struct dp_soc *soc,
166 				   struct hal_rx_ppdu_info *ppdu_info)
167 {
168 	uint32_t room = 0;
169 	uint16_t msdu_count = 0;
170 	uint16_t *dp = NULL;
171 	uint16_t *hp = NULL;
172 	uint16_t tlv_data_len, total_tlv_len;
173 	uint32_t bytes = 0;
174 
175 	if (qdf_unlikely(!soc)) {
176 		dp_mon_err("Soc[%pK] Null. Can't update pftag to nbuf headroom",
177 			   soc);
178 		qdf_assert_always(0);
179 	}
180 
181 	if (!wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx))
182 		return;
183 
184 	if (qdf_unlikely(!nbuf))
185 		return;
186 
187 	/* Headroom must be have enough space for tlv to be added*/
188 	if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < DP_RX_MON_TLV_ROOM)) {
189 		dp_mon_err("Headroom[%d] < DP_RX_MON_TLV_ROOM[%d]",
190 			   qdf_nbuf_headroom(nbuf), DP_RX_MON_TLV_ROOM);
191 		return;
192 	}
193 
194 	hp = (uint16_t *)qdf_nbuf_head(nbuf);
195 	msdu_count = *hp;
196 
197 	if (qdf_unlikely(!msdu_count))
198 		return;
199 
200 	dp_mon_debug("msdu_count: %d", msdu_count);
201 
202 	room = DP_RX_MON_PF_TAG_LEN_PER_FRAG * msdu_count;
203 	tlv_data_len = DP_RX_MON_TLV_MSDU_CNT + (room);
204 	total_tlv_len = DP_RX_MON_TLV_HDR_LEN + tlv_data_len;
205 
206 	//1. store space for MARKER
207 	dp = (uint16_t *)qdf_nbuf_push_head(nbuf, sizeof(uint16_t));
208 	if (qdf_likely(dp)) {
209 		*(uint16_t *)dp = DP_RX_MON_TLV_HDR_MARKER;
210 		bytes += sizeof(uint16_t);
211 	}
212 
213 	//2. store space for total size
214 	dp = (uint16_t *)qdf_nbuf_push_head(nbuf, sizeof(uint16_t));
215 	if (qdf_likely(dp)) {
216 		*(uint16_t *)dp = total_tlv_len;
217 		bytes += sizeof(uint16_t);
218 	}
219 
220 	//create TLV
221 	bytes += dp_mon_rx_add_tlv(DP_RX_MON_TLV_PF_ID, tlv_data_len, hp, nbuf);
222 
223 	dp_rx_mon_print_tag_buf(qdf_nbuf_data(nbuf), total_tlv_len);
224 
225 	qdf_nbuf_pull_head(nbuf, bytes);
226 
227 }
228 
229 void
230 dp_rx_mon_pf_tag_to_buf_headroom_2_0(void *nbuf,
231 				     struct hal_rx_ppdu_info *ppdu_info,
232 				     struct dp_pdev *pdev, struct dp_soc *soc)
233 {
234 	uint8_t *nbuf_head = NULL;
235 	uint8_t user_id;
236 	struct hal_rx_mon_msdu_info *msdu_info;
237 	uint16_t flow_id;
238 	uint16_t cce_metadata;
239 	uint16_t protocol_tag = 0;
240 	uint32_t flow_tag;
241 	uint8_t invalid_cce = 0, invalid_fse = 0;
242 
243 	if (qdf_unlikely(!soc)) {
244 		dp_mon_err("Soc[%pK] Null. Can't update pftag to nbuf headroom",
245 			   soc);
246 		qdf_assert_always(0);
247 	}
248 
249 	if (!wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx))
250 		return;
251 
252 	if (qdf_unlikely(!nbuf))
253 		return;
254 
255 	/* Headroom must be have enough space for tlv to be added*/
256 	if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < DP_RX_MON_TLV_ROOM)) {
257 		dp_mon_err("Headroom[%d] < DP_RX_MON_TLV_ROOM[%d]",
258 			   qdf_nbuf_headroom(nbuf), DP_RX_MON_TLV_ROOM);
259 		return;
260 	}
261 
262 	user_id = ppdu_info->user_id;
263 	if (qdf_unlikely(user_id > HAL_MAX_UL_MU_USERS)) {
264 		dp_mon_debug("Invalid user_id user_id: %d pdev: %pK", user_id, pdev);
265 		return;
266 	}
267 
268 	msdu_info = &ppdu_info->msdu[user_id];
269 	flow_id = ppdu_info->rx_msdu_info[user_id].flow_idx;
270 	cce_metadata = ppdu_info->rx_msdu_info[user_id].cce_metadata -
271 		       RX_PROTOCOL_TAG_START_OFFSET;
272 
273 	flow_tag = ppdu_info->rx_msdu_info[user_id].fse_metadata & F_MASK;
274 
275 	if (qdf_unlikely((cce_metadata > RX_PROTOCOL_TAG_MAX - 1) ||
276 			 (cce_metadata > 0 && cce_metadata < 4))) {
277 		dp_mon_debug("Invalid user_id cce_metadata: %d pdev: %pK", cce_metadata, pdev);
278 		invalid_cce = 1;
279 		protocol_tag = cce_metadata;
280 	} else {
281 		protocol_tag = pdev->rx_proto_tag_map[cce_metadata].tag;
282 		dp_mon_rx_update_rx_protocol_tag_stats(pdev, cce_metadata);
283 	}
284 
285 	if (flow_tag > 0) {
286 		dp_mon_rx_update_rx_flow_tag_stats(pdev, flow_id);
287 	} else {
288 		dp_mon_debug("Invalid flow_tag: %d pdev: %pK ", flow_tag, pdev);
289 		invalid_fse = 1;
290 	}
291 
292 	if (invalid_cce && invalid_fse)
293 		return;
294 
295 	if (msdu_info->msdu_index >= DP_RX_MON_MAX_MSDU) {
296 		dp_mon_err("msdu_index causes overflow in headroom");
297 		return;
298 	}
299 
300 	dp_mon_debug("protocol_tag: %d, cce_metadata: %d, flow_tag: %d",
301 		     protocol_tag, cce_metadata, flow_tag);
302 
303 	dp_mon_debug("msdu_index: %d", msdu_info->msdu_index);
304 
305 
306 	nbuf_head = qdf_nbuf_head(nbuf);
307 
308 	*((uint16_t *)nbuf_head) = msdu_info->msdu_index + 1;
309 	nbuf_head += DP_RX_MON_TLV_MSDU_CNT;
310 
311 	nbuf_head += ((msdu_info->msdu_index) * DP_RX_MON_PF_TAG_SIZE);
312 	if (!invalid_cce)
313 		*((uint16_t *)nbuf_head) = protocol_tag;
314 	nbuf_head += sizeof(uint16_t);
315 	if (!invalid_fse)
316 		*((uint16_t *)nbuf_head) = flow_tag;
317 }
318 
319 #else
320 
321 static
322 void dp_rx_mon_set_zero(qdf_nbuf_t nbuf)
323 {
324 }
325 
326 static
327 void dp_rx_mon_shift_pf_tag_in_headroom(qdf_nbuf_t nbuf, struct dp_soc *soc,
328 					struct hal_rx_ppdu_info *ppdu_info)
329 {
330 }
331 
332 static
333 void dp_rx_mon_pf_tag_to_buf_headroom_2_0(void *nbuf,
334 					  struct hal_rx_ppdu_info *ppdu_info,
335 					  struct dp_pdev *pdev,
336 					  struct dp_soc *soc)
337 {
338 }
339 
340 #endif
341 
342 /**
343  * dp_rx_mon_free_ppdu_info () - Free PPDU info
344  * @pdev: DP pdev
345  * @ppdu_info: PPDU info
346  *
347  * Return: Void
348  */
349 static void
350 dp_rx_mon_free_ppdu_info(struct dp_pdev *pdev,
351 			 struct hal_rx_ppdu_info *ppdu_info)
352 {
353 	uint8_t user;
354 	struct dp_mon_pdev *mon_pdev;
355 
356 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
357 	for (user = 0; user < ppdu_info->com_info.num_users; user++) {
358 		uint16_t mpdu_count  = ppdu_info->mpdu_count[user];
359 		uint16_t mpdu_idx;
360 		qdf_nbuf_t mpdu;
361 
362 		for (mpdu_idx = 0; mpdu_idx < mpdu_count; mpdu_idx++) {
363 			mpdu = qdf_nbuf_queue_remove(&ppdu_info->mpdu_q[user]);
364 
365 			if (!mpdu)
366 				continue;
367 			dp_mon_free_parent_nbuf(mon_pdev, mpdu);
368 		}
369 	}
370 	__dp_rx_mon_free_ppdu_info(mon_pdev, ppdu_info);
371 }
372 
373 void dp_rx_mon_drain_wq(struct dp_pdev *pdev)
374 {
375 	struct dp_mon_pdev *mon_pdev;
376 	struct hal_rx_ppdu_info *ppdu_info = NULL;
377 	struct hal_rx_ppdu_info *temp_ppdu_info = NULL;
378 	struct dp_mon_pdev_be *mon_pdev_be;
379 
380 	if (qdf_unlikely(!pdev)) {
381 		dp_mon_debug("Pdev is NULL");
382 		return;
383 	}
384 
385 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
386 	if (qdf_unlikely(!mon_pdev)) {
387 		dp_mon_debug("monitor pdev is NULL");
388 		return;
389 	}
390 
391 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
392 
393 	qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
394 	TAILQ_FOREACH_SAFE(ppdu_info,
395 			   &mon_pdev_be->rx_mon_queue,
396 			   ppdu_list_elem,
397 			   temp_ppdu_info) {
398 		mon_pdev_be->rx_mon_queue_depth--;
399 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_queue,
400 			     ppdu_info, ppdu_list_elem);
401 
402 		__dp_rx_mon_free_ppdu_info(mon_pdev, ppdu_info);
403 	}
404 	qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
405 }
406 
407 /**
408  * dp_rx_mon_deliver_mpdu() - Deliver MPDU to osif layer
409  *
410  * @mon_pdev: monitor pdev
411  * @mpdu: MPDU nbuf
412  * @status: monitor status
413  *
414  * Return: QDF_STATUS
415  */
416 static QDF_STATUS
417 dp_rx_mon_deliver_mpdu(struct dp_mon_pdev *mon_pdev,
418 		       qdf_nbuf_t mpdu,
419 		       struct mon_rx_status *rx_status)
420 {
421 	qdf_nbuf_t nbuf;
422 
423 	if (mon_pdev->mvdev && mon_pdev->mvdev->monitor_vdev->osif_rx_mon) {
424 		mon_pdev->rx_mon_stats.mpdus_buf_to_stack++;
425 		nbuf = qdf_nbuf_get_ext_list(mpdu);
426 
427 		while (nbuf) {
428 			mon_pdev->rx_mon_stats.mpdus_buf_to_stack++;
429 			nbuf = nbuf->next;
430 		}
431 		mon_pdev->mvdev->monitor_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev,
432 							   mpdu,
433 							   rx_status);
434 	} else {
435 		return QDF_STATUS_E_FAILURE;
436 	}
437 
438 	return QDF_STATUS_SUCCESS;
439 }
440 
441 /**
442  * dp_rx_mon_process_ppdu_info () - Process PPDU info
443  * @pdev: DP pdev
444  * @ppdu_info: PPDU info
445  *
446  * Return: Void
447  */
448 static void
449 dp_rx_mon_process_ppdu_info(struct dp_pdev *pdev,
450 			    struct hal_rx_ppdu_info *ppdu_info)
451 {
452 	struct dp_mon_pdev *mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
453 	uint8_t user;
454 
455 	if (!ppdu_info)
456 		return;
457 
458 	mon_pdev->ppdu_info.rx_status.chan_noise_floor = pdev->chan_noise_floor;
459 
460 	for (user = 0; user < ppdu_info->com_info.num_users; user++) {
461 		uint16_t mpdu_count  = ppdu_info->mpdu_count[user];
462 		uint16_t mpdu_idx;
463 		qdf_nbuf_t mpdu;
464 		struct hal_rx_mon_mpdu_info *mpdu_meta;
465 		QDF_STATUS status;
466 
467 		for (mpdu_idx = 0; mpdu_idx < mpdu_count; mpdu_idx++) {
468 			mpdu = qdf_nbuf_queue_remove(&ppdu_info->mpdu_q[user]);
469 
470 			if (!mpdu)
471 				continue;
472 
473 			mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(mpdu);
474 
475 			if (dp_lite_mon_is_rx_enabled(mon_pdev)) {
476 				status = dp_lite_mon_rx_mpdu_process(pdev, ppdu_info,
477 								     mpdu, mpdu_idx, user);
478 				if (status != QDF_STATUS_SUCCESS) {
479 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
480 					continue;
481 				}
482 			} else {
483 				if (mpdu_meta->full_pkt) {
484 					if (qdf_unlikely(mpdu_meta->truncated)) {
485 						dp_mon_free_parent_nbuf(mon_pdev, mpdu);
486 						continue;
487 					}
488 
489 					status = dp_rx_mon_handle_full_mon(pdev,
490 									   ppdu_info, mpdu);
491 					if (status != QDF_STATUS_SUCCESS) {
492 						dp_mon_free_parent_nbuf(mon_pdev, mpdu);
493 						continue;
494 					}
495 				} else {
496 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
497 					continue;
498 				}
499 
500 				/* reset mpdu metadata and apply radiotap header over MPDU */
501 				qdf_mem_zero(mpdu_meta, sizeof(struct hal_rx_mon_mpdu_info));
502 				if (!qdf_nbuf_update_radiotap(&ppdu_info->rx_status,
503 							      mpdu,
504 							      qdf_nbuf_headroom(mpdu))) {
505 					dp_mon_err("failed to update radiotap pdev: %pK",
506 						   pdev);
507 				}
508 
509 				dp_rx_mon_shift_pf_tag_in_headroom(mpdu,
510 								   pdev->soc,
511 								   ppdu_info);
512 
513 				/* Deliver MPDU to osif layer */
514 				status = dp_rx_mon_deliver_mpdu(mon_pdev,
515 								mpdu,
516 								&ppdu_info->rx_status);
517 				if (status != QDF_STATUS_SUCCESS)
518 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
519 			}
520 		}
521 	}
522 }
523 
524 /**
525  * dp_rx_mon_process_ppdu ()-  Deferred monitor processing
526  * This workqueue API handles:
527  * a. Full monitor
528  * b. Lite monitor
529  *
530  * @context: Opaque work context
531  *
532  * Return: none
533  */
534 void dp_rx_mon_process_ppdu(void *context)
535 {
536 	struct dp_pdev *pdev = (struct dp_pdev *)context;
537 	struct dp_mon_pdev *mon_pdev;
538 	struct hal_rx_ppdu_info *ppdu_info = NULL;
539 	struct hal_rx_ppdu_info *temp_ppdu_info = NULL;
540 	struct dp_mon_pdev_be *mon_pdev_be;
541 
542 	if (qdf_unlikely(!pdev)) {
543 		dp_mon_debug("Pdev is NULL");
544 		return;
545 	}
546 
547 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
548 	if (qdf_unlikely(!mon_pdev)) {
549 		dp_mon_debug("monitor pdev is NULL");
550 		return;
551 	}
552 
553 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
554 
555 	qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
556 	TAILQ_FOREACH_SAFE(ppdu_info,
557 			   &mon_pdev_be->rx_mon_queue,
558 			   ppdu_list_elem, temp_ppdu_info) {
559 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_queue,
560 			     ppdu_info, ppdu_list_elem);
561 
562 		mon_pdev_be->rx_mon_queue_depth--;
563 		dp_rx_mon_process_ppdu_info(pdev, ppdu_info);
564 		__dp_rx_mon_free_ppdu_info(mon_pdev, ppdu_info);
565 	}
566 	qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
567 }
568 
569 /**
570  * dp_rx_mon_add_ppdu_info_to_wq () - Add PPDU info to workqueue
571  *
572  * @mon_pdev: monitor pdev
573  * @ppdu_info: ppdu info to be added to workqueue
574  *
575  * Return: SUCCESS or FAILIRE
576  */
577 
578 static QDF_STATUS
579 dp_rx_mon_add_ppdu_info_to_wq(struct dp_pdev *pdev,
580 			      struct hal_rx_ppdu_info *ppdu_info)
581 {
582 	struct dp_mon_pdev *mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
583 	struct dp_mon_pdev_be *mon_pdev_be =
584 		dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
585 
586 	/* Full monitor or lite monitor mode is not enabled, return */
587 	if (!mon_pdev->monitor_configured &&
588 	    !dp_lite_mon_is_rx_enabled(mon_pdev))
589 		return QDF_STATUS_E_FAILURE;
590 
591 	if (qdf_likely(ppdu_info)) {
592 		if (mon_pdev_be->rx_mon_queue_depth < DP_RX_MON_WQ_THRESHOLD) {
593 			qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
594 			TAILQ_INSERT_TAIL(&mon_pdev_be->rx_mon_queue,
595 					  ppdu_info, ppdu_list_elem);
596 			mon_pdev_be->rx_mon_queue_depth++;
597 			mon_pdev->rx_mon_stats.total_ppdu_info_enq++;
598 		} else {
599 			mon_pdev->rx_mon_stats.total_ppdu_info_drop++;
600 			dp_rx_mon_free_ppdu_info(pdev, ppdu_info);
601 		}
602 		qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
603 
604 		if (mon_pdev_be->rx_mon_queue_depth > DP_MON_QUEUE_DEPTH_MAX) {
605 			qdf_queue_work(0, mon_pdev_be->rx_mon_workqueue,
606 				       &mon_pdev_be->rx_mon_work);
607 		}
608 	}
609 	return QDF_STATUS_SUCCESS;
610 }
611 
612 QDF_STATUS
613 dp_rx_mon_handle_full_mon(struct dp_pdev *pdev,
614 			  struct hal_rx_ppdu_info *ppdu_info,
615 			  qdf_nbuf_t mpdu)
616 {
617 	uint32_t wifi_hdr_len, sec_hdr_len, msdu_llc_len,
618 		 mpdu_buf_len, decap_hdr_pull_bytes, dir,
619 		 is_amsdu, amsdu_pad, frag_size, tot_msdu_len;
620 	struct hal_rx_mon_mpdu_info *mpdu_meta;
621 	struct hal_rx_mon_msdu_info *msdu_meta;
622 	char *hdr_desc;
623 	uint8_t num_frags, frag_iter, l2_hdr_offset;
624 	struct ieee80211_frame *wh;
625 	struct ieee80211_qoscntl *qos;
626 	void *hdr_frag_addr;
627 	uint32_t hdr_frag_size, frag_page_offset, pad_byte_pholder,
628 		 msdu_len;
629 	qdf_nbuf_t head_msdu, msdu_cur;
630 	void *frag_addr;
631 	bool prev_msdu_end_received = false;
632 	bool is_nbuf_head = true;
633 
634 	/***************************************************************************
635 	 *********************** Non-raw packet ************************************
636 	 ---------------------------------------------------------------------------
637 	 |      | frag-0   | frag-1    | frag - 2 | frag - 3  | frag - 4 | frag - 5  |
638 	 | skb  | rx_hdr-1 | rx_msdu-1 | rx_hdr-2 | rx_msdu-2 | rx_hdr-3 | rx-msdu-3 |
639 	 ---------------------------------------------------------------------------
640 	 **************************************************************************/
641 
642 	if (!mpdu) {
643 		dp_mon_debug("nbuf is NULL, return");
644 		return QDF_STATUS_E_FAILURE;
645 	}
646 
647 	head_msdu = mpdu;
648 
649 	mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(mpdu);
650 
651 	if (mpdu_meta->decap_type == HAL_HW_RX_DECAP_FORMAT_RAW) {
652 		qdf_nbuf_trim_add_frag_size(mpdu,
653 					    qdf_nbuf_get_nr_frags(mpdu) - 1,
654 					    -HAL_RX_FCS_LEN, 0);
655 		return QDF_STATUS_SUCCESS;
656 	}
657 
658 	num_frags = qdf_nbuf_get_nr_frags(mpdu);
659 	if (qdf_unlikely(num_frags < DP_MON_MIN_FRAGS_FOR_RESTITCH)) {
660 		dp_mon_debug("not enough frags(%d) for restitch", num_frags);
661 		return QDF_STATUS_E_FAILURE;
662 	}
663 
664 	l2_hdr_offset = DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
665 
666 	/* hdr_desc points to 80211 hdr */
667 	hdr_desc = qdf_nbuf_get_frag_addr(mpdu, 0);
668 
669 	/* Calculate Base header size */
670 	wifi_hdr_len = sizeof(struct ieee80211_frame);
671 	wh = (struct ieee80211_frame *)hdr_desc;
672 
673 	dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
674 
675 	if (dir == IEEE80211_FC1_DIR_DSTODS)
676 		wifi_hdr_len += 6;
677 
678 	is_amsdu = 0;
679 	if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
680 		qos = (struct ieee80211_qoscntl *)
681 			(hdr_desc + wifi_hdr_len);
682 		wifi_hdr_len += 2;
683 
684 		is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
685 	}
686 
687 	/*Calculate security header length based on 'Protected'
688 	 * and 'EXT_IV' flag
689 	 */
690 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
691 		char *iv = (char *)wh + wifi_hdr_len;
692 
693 		if (iv[3] & KEY_EXTIV)
694 			sec_hdr_len = 8;
695 		else
696 			sec_hdr_len = 4;
697 	} else {
698 		sec_hdr_len = 0;
699 	}
700 	wifi_hdr_len += sec_hdr_len;
701 
702 	/* MSDU related stuff LLC - AMSDU subframe header etc */
703 	msdu_llc_len = is_amsdu ? (DP_RX_MON_DECAP_HDR_SIZE +
704 				   DP_RX_MON_LLC_SIZE +
705 				   DP_RX_MON_SNAP_SIZE) :
706 				   (DP_RX_MON_LLC_SIZE + DP_RX_MON_SNAP_SIZE);
707 
708 	mpdu_buf_len = wifi_hdr_len + msdu_llc_len;
709 
710 	/* "Decap" header to remove from MSDU buffer */
711 	decap_hdr_pull_bytes = DP_RX_MON_DECAP_HDR_SIZE;
712 
713 	amsdu_pad = 0;
714 	tot_msdu_len = 0;
715 	tot_msdu_len = 0;
716 
717 	/*
718 	 * Update protocol and flow tag for MSDU
719 	 * update frag index in ctx_idx field.
720 	 * Reset head pointer data of nbuf before updating.
721 	 */
722 	QDF_NBUF_CB_RX_CTX_ID(mpdu) = 0;
723 
724 	/* Construct destination address */
725 	hdr_frag_addr = qdf_nbuf_get_frag_addr(mpdu, 0);
726 	hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(mpdu, 0);
727 
728 	/* Adjust page frag offset to point to 802.11 header */
729 	qdf_nbuf_trim_add_frag_size(head_msdu, 0, -(hdr_frag_size - mpdu_buf_len), 0);
730 
731 	msdu_meta = (struct hal_rx_mon_msdu_info *)(((void *)qdf_nbuf_get_frag_addr(mpdu, 1)) - (DP_RX_MON_PACKET_OFFSET + DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE));
732 
733 	msdu_len = msdu_meta->msdu_len;
734 
735 	/* Adjust page frag offset to appropriate after decap header */
736 	frag_page_offset =
737 		decap_hdr_pull_bytes;
738 	qdf_nbuf_move_frag_page_offset(head_msdu, 1, frag_page_offset);
739 
740 	frag_size = qdf_nbuf_get_frag_size_by_idx(head_msdu, 1);
741 	pad_byte_pholder =
742 		RX_MONITOR_BUFFER_SIZE - (frag_size + DP_RX_MON_PACKET_OFFSET + DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE);
743 
744 	if (msdu_meta->first_buffer && msdu_meta->last_buffer) {
745 		/* MSDU with single bufffer */
746 		amsdu_pad = frag_size & 0x3;
747 		amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
748 		if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
749 			char *frag_addr_temp;
750 
751 			qdf_nbuf_trim_add_frag_size(mpdu, 1, amsdu_pad, 0);
752 			frag_addr_temp =
753 				(char *)qdf_nbuf_get_frag_addr(mpdu, 1);
754 			frag_addr_temp = (frag_addr_temp +
755 					  qdf_nbuf_get_frag_size_by_idx(mpdu, 1)) -
756 				amsdu_pad;
757 			qdf_mem_zero(frag_addr_temp, amsdu_pad);
758 			amsdu_pad = 0;
759 		}
760 	} else {
761 		tot_msdu_len = frag_size;
762 		amsdu_pad = 0;
763 	}
764 
765 	pad_byte_pholder = 0;
766 	for (msdu_cur = mpdu; msdu_cur;) {
767 		/* frag_iter will start from 0 for second skb onwards */
768 		if (msdu_cur == mpdu)
769 			frag_iter = 2;
770 		else
771 			frag_iter = 0;
772 
773 		num_frags = qdf_nbuf_get_nr_frags(msdu_cur);
774 
775 		for (; frag_iter < num_frags; frag_iter++) {
776 			/* Construct destination address
777 			 *  ----------------------------------------------------------
778 			 * |            | L2_HDR_PAD   |   Decap HDR | Payload | Pad  |
779 			 * |            | (First buffer)             |         |      |
780 			 * |            |                            /        /       |
781 			 * |            >Frag address points here   /        /        |
782 			 * |            \                          /        /         |
783 			 * |             \ This bytes needs to    /        /          |
784 			 * |              \  removed to frame pkt/        /           |
785 			 * |               ----------------------        /            |
786 			 * |                                     |     /     Add      |
787 			 * |                                     |    /   amsdu pad   |
788 			 * |   LLC HDR will be added here      <-|    |   Byte for    |
789 			 * |        |                            |    |   last frame  |
790 			 * |         >Dest addr will point       |    |    if space   |
791 			 * |            somewhere in this area   |    |    available  |
792 			 * |  And amsdu_pad will be created if   |    |               |
793 			 * | dint get added in last buffer       |    |               |
794 			 * |       (First Buffer)                |    |               |
795 			 *  ----------------------------------------------------------
796 			 */
797 			/* If previous msdu end has received, modify next frag's offset to point to LLC */
798 			if (prev_msdu_end_received) {
799 				hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter);
800 				/* Adjust page frag offset to point to llc/snap header */
801 				qdf_nbuf_trim_add_frag_size(msdu_cur, frag_iter, -(hdr_frag_size - msdu_llc_len), 0);
802 				prev_msdu_end_received = false;
803 				continue;
804 			}
805 
806 			frag_addr =
807 				qdf_nbuf_get_frag_addr(msdu_cur, frag_iter) -
808 						       (DP_RX_MON_PACKET_OFFSET +
809 						       DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE);
810 			msdu_meta = (struct hal_rx_mon_msdu_info *)frag_addr;
811 
812 			/*
813 			 * Update protocol and flow tag for MSDU
814 			 * update frag index in ctx_idx field
815 			 */
816 			QDF_NBUF_CB_RX_CTX_ID(msdu_cur) = frag_iter;
817 
818 			frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur,
819 					frag_iter);
820 
821 			/* If Middle buffer, dont add any header */
822 			if ((!msdu_meta->first_buffer) &&
823 					(!msdu_meta->last_buffer)) {
824 				tot_msdu_len += frag_size;
825 				amsdu_pad = 0;
826 				pad_byte_pholder = 0;
827 				continue;
828 			}
829 
830 			/* Calculate if current buffer has placeholder
831 			 * to accommodate amsdu pad byte
832 			 */
833 			pad_byte_pholder =
834 				RX_MONITOR_BUFFER_SIZE - (frag_size + (DP_RX_MON_PACKET_OFFSET +
835 							  DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE));
836 			/*
837 			 * We will come here only only three condition:
838 			 * 1. Msdu with single Buffer
839 			 * 2. First buffer in case MSDU is spread in multiple
840 			 *    buffer
841 			 * 3. Last buffer in case MSDU is spread in multiple
842 			 *    buffer
843 			 *
844 			 *         First buffER | Last buffer
845 			 * Case 1:      1       |     1
846 			 * Case 2:      1       |     0
847 			 * Case 3:      0       |     1
848 			 *
849 			 * In 3rd case only l2_hdr_padding byte will be Zero and
850 			 * in other case, It will be 2 Bytes.
851 			 */
852 			if (msdu_meta->first_buffer)
853 				l2_hdr_offset =
854 					DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
855 			else
856 				l2_hdr_offset = DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
857 
858 			if (msdu_meta->first_buffer) {
859 				/* Adjust page frag offset to point to 802.11 header */
860 				hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter-1);
861 				qdf_nbuf_trim_add_frag_size(msdu_cur, frag_iter - 1, -(hdr_frag_size - (msdu_llc_len + amsdu_pad)), 0);
862 
863 				/* Adjust page frag offset to appropriate after decap header */
864 				frag_page_offset =
865 					(decap_hdr_pull_bytes + l2_hdr_offset);
866 				if (frag_size > (decap_hdr_pull_bytes + l2_hdr_offset)) {
867 					qdf_nbuf_move_frag_page_offset(msdu_cur, frag_iter, frag_page_offset);
868 					frag_size = frag_size - (l2_hdr_offset + decap_hdr_pull_bytes);
869 				}
870 
871 
872 				/*
873 				 * Calculate new page offset and create hole
874 				 * if amsdu_pad required.
875 				 */
876 				tot_msdu_len = frag_size;
877 				/*
878 				 * No amsdu padding required for first frame of
879 				 * continuation buffer
880 				 */
881 				if (!msdu_meta->last_buffer) {
882 					amsdu_pad = 0;
883 					continue;
884 				}
885 			} else {
886 				tot_msdu_len += frag_size;
887 			}
888 
889 			/* Will reach to this place in only two case:
890 			 * 1. Single buffer MSDU
891 			 * 2. Last buffer of MSDU in case of multiple buf MSDU
892 			 */
893 
894 			/* This flag is used to identify msdu boundry */
895 			prev_msdu_end_received = true;
896 			/* Check size of buffer if amsdu padding required */
897 			amsdu_pad = tot_msdu_len & 0x3;
898 			amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
899 
900 			/* Create placeholder if current bufer can
901 			 * accommodate padding.
902 			 */
903 			if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
904 				char *frag_addr_temp;
905 
906 				qdf_nbuf_trim_add_frag_size(msdu_cur,
907 						frag_iter,
908 						amsdu_pad, 0);
909 				frag_addr_temp = (char *)qdf_nbuf_get_frag_addr(msdu_cur,
910 						frag_iter);
911 				frag_addr_temp = (frag_addr_temp +
912 						qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter)) -
913 					amsdu_pad;
914 				qdf_mem_zero(frag_addr_temp, amsdu_pad);
915 				amsdu_pad = 0;
916 			}
917 
918 			/* reset tot_msdu_len */
919 			tot_msdu_len = 0;
920 		}
921 		if (is_nbuf_head) {
922 			msdu_cur = qdf_nbuf_get_ext_list(msdu_cur);
923 			is_nbuf_head = false;
924 		} else {
925 			msdu_cur = qdf_nbuf_queue_next(msdu_cur);
926 		}
927 	}
928 
929 	return QDF_STATUS_SUCCESS;
930 }
931 
932 /**
933  * dp_rx_mon_flush_status_buf_queue () - Flush status buffer queue
934  *
935  * @pdev: DP pdev handle
936  *
937  *Return: void
938  */
939 static inline void
940 dp_rx_mon_flush_status_buf_queue(struct dp_pdev *pdev)
941 {
942 	struct dp_soc *soc = pdev->soc;
943 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
944 	struct dp_mon_pdev_be *mon_pdev_be =
945 		dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
946 	union dp_mon_desc_list_elem_t *desc_list = NULL;
947 	union dp_mon_desc_list_elem_t *tail = NULL;
948 	struct dp_mon_desc *mon_desc;
949 	uint8_t idx;
950 	void *buf;
951 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
952 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
953 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
954 	uint8_t work_done = 0;
955 	uint16_t status_buf_count;
956 
957 	if (!mon_pdev_be->desc_count) {
958 		dp_mon_info("no of status buffer count is zero: %pK", pdev);
959 		return;
960 	}
961 
962 	status_buf_count = mon_pdev_be->desc_count;
963 	for (idx = 0; idx < status_buf_count; idx++) {
964 		mon_desc = mon_pdev_be->status[idx];
965 		if (!mon_desc) {
966 			qdf_assert_always(0);
967 			return;
968 		}
969 
970 		buf = mon_desc->buf_addr;
971 
972 		dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
973 		work_done++;
974 
975 		/* set status buffer pointer to NULL */
976 		mon_pdev_be->status[idx] = NULL;
977 		mon_pdev_be->desc_count--;
978 
979 		qdf_frag_free(buf);
980 		DP_STATS_INC(mon_soc, frag_free, 1);
981 	}
982 
983 	if (work_done) {
984 		mon_pdev->rx_mon_stats.mon_rx_bufs_replenished_dest +=
985 			work_done;
986 		dp_mon_buffers_replenish(soc, &soc->rxdma_mon_buf_ring[0],
987 					 rx_mon_desc_pool,
988 					 work_done,
989 					 &desc_list, &tail, NULL);
990 	}
991 }
992 
993 /**
994  * dp_rx_mon_handle_flush_n_trucated_ppdu () - Handle flush and truncated ppdu
995  *
996  * @soc: DP soc handle
997  * @pdev: pdev handle
998  * @mon_desc: mon sw desc
999  */
1000 static inline void
1001 dp_rx_mon_handle_flush_n_trucated_ppdu(struct dp_soc *soc,
1002 				       struct dp_pdev *pdev,
1003 				       struct dp_mon_desc *mon_desc)
1004 {
1005 	union dp_mon_desc_list_elem_t *desc_list = NULL;
1006 	union dp_mon_desc_list_elem_t *tail = NULL;
1007 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1008 	struct dp_mon_soc_be *mon_soc_be =
1009 			dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1010 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1011 	uint16_t work_done;
1012 
1013 	/* Flush status buffers in queue */
1014 	dp_rx_mon_flush_status_buf_queue(pdev);
1015 	qdf_frag_free(mon_desc->buf_addr);
1016 	DP_STATS_INC(mon_soc, frag_free, 1);
1017 	dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
1018 	work_done = 1;
1019 	dp_mon_buffers_replenish(soc, &soc->rxdma_mon_buf_ring[0],
1020 				 rx_mon_desc_pool,
1021 				 work_done,
1022 				 &desc_list, &tail, NULL);
1023 }
1024 
1025 uint8_t dp_rx_mon_process_tlv_status(struct dp_pdev *pdev,
1026 				     struct hal_rx_ppdu_info *ppdu_info,
1027 				     void *status_frag,
1028 				     uint16_t tlv_status,
1029 				     union dp_mon_desc_list_elem_t **desc_list,
1030 				     union dp_mon_desc_list_elem_t **tail)
1031 {
1032 	struct dp_soc *soc  = pdev->soc;
1033 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1034 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1035 	qdf_nbuf_t nbuf, tmp_nbuf;
1036 	qdf_frag_t addr;
1037 	uint8_t user_id = ppdu_info->user_id;
1038 	uint8_t mpdu_idx = ppdu_info->mpdu_count[user_id];
1039 	uint16_t num_frags;
1040 	uint8_t num_buf_reaped = 0;
1041 	QDF_STATUS status;
1042 
1043 	if (!mon_pdev->monitor_configured &&
1044 	    !dp_lite_mon_is_rx_enabled(mon_pdev)) {
1045 		return num_buf_reaped;
1046 	}
1047 
1048 	switch (tlv_status) {
1049 	case HAL_TLV_STATUS_HEADER: {
1050 		/* If this is first RX_HEADER for MPDU, allocate skb
1051 		 * else add frag to already allocated skb
1052 		 */
1053 
1054 		if (!ppdu_info->mpdu_info[user_id].mpdu_start_received) {
1055 
1056 			nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1057 					      DP_RX_MON_TLV_ROOM +
1058 					      DP_RX_MON_MAX_RADIO_TAP_HDR,
1059 					      DP_RX_MON_TLV_ROOM +
1060 					      DP_RX_MON_MAX_RADIO_TAP_HDR,
1061 					      4, FALSE);
1062 
1063 			/* Set *head_msdu->next as NULL as all msdus are
1064 			 *                          * mapped via nr frags
1065 			 *                                                   */
1066 			if (qdf_unlikely(!nbuf)) {
1067 				dp_mon_err("malloc failed pdev: %pK ", pdev);
1068 				return num_buf_reaped;
1069 			}
1070 
1071 			mon_pdev->rx_mon_stats.parent_buf_alloc++;
1072 
1073 			dp_rx_mon_set_zero(nbuf);
1074 
1075 			qdf_nbuf_set_next(nbuf, NULL);
1076 
1077 			qdf_nbuf_queue_add(&ppdu_info->mpdu_q[user_id], nbuf);
1078 
1079 			status = dp_rx_mon_nbuf_add_rx_frag(nbuf, status_frag,
1080 							    ppdu_info->hdr_len - DP_RX_MON_RX_HDR_OFFSET,
1081 							    ppdu_info->data - (unsigned char *)status_frag + 4,
1082 							    DP_MON_DATA_BUFFER_SIZE, true);
1083 			if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
1084 				dp_mon_err("num_frags exceeding MAX frags");
1085 				qdf_assert_always(0);
1086 			}
1087 			ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = true;
1088 			ppdu_info->mpdu_info[user_id].first_rx_hdr_rcvd = true;
1089 			/* initialize decap type to invalid, this will be set to appropriate
1090 			 * value once the mpdu start tlv is received
1091 			 */
1092 			ppdu_info->mpdu_info[user_id].decap_type = DP_MON_DECAP_FORMAT_INVALID;
1093 		} else {
1094 			if (ppdu_info->mpdu_info[user_id].decap_type ==
1095 					HAL_HW_RX_DECAP_FORMAT_RAW) {
1096 				return num_buf_reaped;
1097 			}
1098 
1099 			if (dp_lite_mon_is_rx_enabled(mon_pdev) &&
1100 			    !dp_lite_mon_is_level_msdu(mon_pdev))
1101 				break;
1102 
1103 			nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1104 			if (qdf_unlikely(!nbuf)) {
1105 				dp_mon_debug("nbuf is NULL");
1106 				return num_buf_reaped;
1107 			}
1108 
1109 			tmp_nbuf = qdf_get_nbuf_valid_frag(nbuf);
1110 
1111 			if (!tmp_nbuf) {
1112 				tmp_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1113 							  DP_RX_MON_MAX_MONITOR_HEADER,
1114 							  DP_RX_MON_MAX_MONITOR_HEADER,
1115 							  4, FALSE);
1116 				if (qdf_unlikely(!tmp_nbuf)) {
1117 					dp_mon_err("nbuf is NULL");
1118 					qdf_assert_always(0);
1119 				}
1120 				mon_pdev->rx_mon_stats.parent_buf_alloc++;
1121 				/* add new skb to frag list */
1122 				qdf_nbuf_append_ext_list(nbuf, tmp_nbuf,
1123 							 qdf_nbuf_len(tmp_nbuf));
1124 			}
1125 			dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, status_frag,
1126 						   ppdu_info->hdr_len - DP_RX_MON_RX_HDR_OFFSET,
1127 						   ppdu_info->data - (unsigned char *)status_frag + 4,
1128 						   DP_MON_DATA_BUFFER_SIZE,
1129 						   true);
1130 		}
1131 		ppdu_info->rx_hdr_rcvd[user_id] = true;
1132 	}
1133 	break;
1134 	case HAL_TLV_STATUS_MON_BUF_ADDR:
1135 	{
1136 		struct hal_rx_mon_msdu_info *buf_info;
1137 		struct hal_mon_packet_info *packet_info = &ppdu_info->packet_info;
1138 		struct dp_mon_desc *mon_desc = (struct dp_mon_desc *)(uintptr_t)ppdu_info->packet_info.sw_cookie;
1139 		struct hal_rx_mon_mpdu_info *mpdu_info;
1140 		uint16_t frag_idx = 0;
1141 
1142 		qdf_assert_always(mon_desc);
1143 
1144 		if (mon_desc->magic != DP_MON_DESC_MAGIC)
1145 			qdf_assert_always(0);
1146 
1147 		addr = mon_desc->buf_addr;
1148 		qdf_assert_always(addr);
1149 
1150 		mpdu_info = &ppdu_info->mpdu_info[user_id];
1151 		if (!mon_desc->unmapped) {
1152 			qdf_mem_unmap_page(soc->osdev,
1153 					   (qdf_dma_addr_t)mon_desc->paddr,
1154 				   DP_MON_DATA_BUFFER_SIZE,
1155 					   QDF_DMA_FROM_DEVICE);
1156 			mon_desc->unmapped = 1;
1157 		}
1158 		dp_mon_add_to_free_desc_list(desc_list, tail, mon_desc);
1159 		num_buf_reaped++;
1160 
1161 		mon_pdev->rx_mon_stats.pkt_buf_count++;
1162 
1163 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1164 
1165 			/* WAR: RX_HDR is not received for this MPDU, drop this frame */
1166 			mon_pdev->rx_mon_stats.rx_hdr_not_received++;
1167 			DP_STATS_INC(mon_soc, frag_free, 1);
1168 			qdf_frag_free(addr);
1169 			return num_buf_reaped;
1170 		}
1171 
1172 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1173 
1174 		if (mpdu_info->decap_type == DP_MON_DECAP_FORMAT_INVALID) {
1175 			/* decap type is invalid, drop the frame */
1176 			mon_pdev->rx_mon_stats.mpdu_decap_type_invalid++;
1177 			DP_STATS_INC(mon_soc, frag_free, 1);
1178 			mon_pdev->rx_mon_stats.parent_buf_free++;
1179 			qdf_frag_free(addr);
1180 			qdf_nbuf_queue_remove_last(&ppdu_info->mpdu_q[user_id]);
1181 			qdf_nbuf_free(nbuf);
1182 			/* we have freed the nbuf mark the q entry null */
1183 			return num_buf_reaped;
1184 		}
1185 
1186 		tmp_nbuf = qdf_get_nbuf_valid_frag(nbuf);
1187 
1188 		if (!tmp_nbuf) {
1189 			tmp_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1190 						  DP_RX_MON_MAX_MONITOR_HEADER,
1191 						  DP_RX_MON_MAX_MONITOR_HEADER,
1192 						  4, FALSE);
1193 			if (qdf_unlikely(!tmp_nbuf)) {
1194 				dp_mon_err("nbuf is NULL");
1195 				DP_STATS_INC(mon_soc, frag_free, 1);
1196 				mon_pdev->rx_mon_stats.parent_buf_free++;
1197 				qdf_frag_free(addr);
1198 				/* remove this nbuf from queue */
1199 				qdf_nbuf_queue_remove_last(&ppdu_info->mpdu_q[user_id]);
1200 				qdf_nbuf_free(nbuf);
1201 				return num_buf_reaped;
1202 			}
1203 			mon_pdev->rx_mon_stats.parent_buf_alloc++;
1204 			/* add new skb to frag list */
1205 			qdf_nbuf_append_ext_list(nbuf, tmp_nbuf,
1206 						 qdf_nbuf_len(tmp_nbuf));
1207 		}
1208 		mpdu_info->full_pkt = true;
1209 
1210 		if (mpdu_info->decap_type == HAL_HW_RX_DECAP_FORMAT_RAW) {
1211 			if (mpdu_info->first_rx_hdr_rcvd) {
1212 				qdf_nbuf_remove_frag(nbuf, frag_idx, DP_MON_DATA_BUFFER_SIZE);
1213 				dp_rx_mon_nbuf_add_rx_frag(nbuf, addr,
1214 							   packet_info->dma_length,
1215 							   DP_RX_MON_PACKET_OFFSET,
1216 							   DP_MON_DATA_BUFFER_SIZE,
1217 							   false);
1218 				DP_STATS_INC(mon_soc, frag_free, 1);
1219 				mpdu_info->first_rx_hdr_rcvd = false;
1220 			} else {
1221 				dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, addr,
1222 							   packet_info->dma_length,
1223 							   DP_RX_MON_PACKET_OFFSET,
1224 							   DP_MON_DATA_BUFFER_SIZE,
1225 							   false);
1226 				DP_STATS_INC(mon_soc, frag_free, 1);
1227 			}
1228 		} else {
1229 			dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, addr,
1230 						   packet_info->dma_length,
1231 						   DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE +
1232 						   DP_RX_MON_PACKET_OFFSET,
1233 						   DP_MON_DATA_BUFFER_SIZE,
1234 						   false);
1235 				DP_STATS_INC(mon_soc, frag_free, 1);
1236 			buf_info = addr;
1237 
1238 			if (!ppdu_info->msdu[user_id].first_buffer) {
1239 				buf_info->first_buffer = true;
1240 				ppdu_info->msdu[user_id].first_buffer = true;
1241 			} else {
1242 				buf_info->first_buffer = false;
1243 			}
1244 
1245 			if (packet_info->msdu_continuation)
1246 				buf_info->last_buffer = false;
1247 			else
1248 				buf_info->last_buffer = true;
1249 
1250 			buf_info->frag_len = packet_info->dma_length;
1251 		}
1252 		if (qdf_unlikely(packet_info->truncated))
1253 			mpdu_info->truncated = true;
1254 	}
1255 	break;
1256 	case HAL_TLV_STATUS_MSDU_END:
1257 	{
1258 		struct hal_rx_mon_msdu_info *msdu_info = &ppdu_info->msdu[user_id];
1259 		struct hal_rx_mon_msdu_info *last_buf_info;
1260 		/* update msdu metadata at last buffer of msdu in MPDU */
1261 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1262 			/* reset msdu info for next msdu for same user */
1263 			qdf_mem_zero(msdu_info, sizeof(*msdu_info));
1264 			dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d",
1265 				     __LINE__, user_id, mpdu_idx);
1266 			break;
1267 		}
1268 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1269 		num_frags = qdf_nbuf_get_nr_frags(nbuf);
1270 		if (ppdu_info->mpdu_info[user_id].decap_type ==
1271 				HAL_HW_RX_DECAP_FORMAT_RAW) {
1272 			break;
1273 		}
1274 		/* This points to last buffer of MSDU . update metadata here */
1275 		addr = qdf_nbuf_get_frag_addr(nbuf, num_frags - 1) -
1276 					      (DP_RX_MON_PACKET_OFFSET +
1277 					       DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE);
1278 		last_buf_info = addr;
1279 
1280 		last_buf_info->first_msdu = msdu_info->first_msdu;
1281 		last_buf_info->last_msdu = msdu_info->last_msdu;
1282 		last_buf_info->decap_type = msdu_info->decap_type;
1283 		last_buf_info->msdu_index = msdu_info->msdu_index;
1284 		last_buf_info->user_rssi = msdu_info->user_rssi;
1285 		last_buf_info->reception_type = msdu_info->reception_type;
1286 		last_buf_info->msdu_len = msdu_info->msdu_len;
1287 
1288 		dp_rx_mon_pf_tag_to_buf_headroom_2_0(nbuf, ppdu_info, pdev,
1289 						     soc);
1290 		/* reset msdu info for next msdu for same user */
1291 		qdf_mem_zero(msdu_info, sizeof(*msdu_info));
1292 
1293 		/* If flow classification is enabled,
1294 		 * update cce_metadata and fse_metadata
1295 		 */
1296 	}
1297 	break;
1298 	case HAL_TLV_STATUS_MPDU_START:
1299 	{
1300 		struct hal_rx_mon_mpdu_info *mpdu_info, *mpdu_meta;
1301 
1302 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1303 			dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d", __LINE__, user_id, mpdu_idx);
1304 			break;
1305 		}
1306 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1307 		mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(nbuf);
1308 		mpdu_info = &ppdu_info->mpdu_info[user_id];
1309 		mpdu_meta->decap_type = mpdu_info->decap_type;
1310 		ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = true;
1311 	break;
1312 	}
1313 	case HAL_TLV_STATUS_MPDU_END:
1314 	{
1315 		struct hal_rx_mon_mpdu_info *mpdu_info, *mpdu_meta;
1316 		mpdu_info = &ppdu_info->mpdu_info[user_id];
1317 		if (qdf_unlikely(!ppdu_info->rx_hdr_rcvd[user_id])) {
1318 			/* reset mpdu info for next mpdu for same user */
1319 			qdf_mem_zero(mpdu_info, sizeof(*mpdu_info));
1320 			dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d",
1321 				     __LINE__, user_id, mpdu_idx);
1322 			break;
1323 		}
1324 		nbuf = qdf_nbuf_queue_last(&ppdu_info->mpdu_q[user_id]);
1325 		mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(nbuf);
1326 		mpdu_meta->mpdu_length_err = mpdu_info->mpdu_length_err;
1327 		mpdu_meta->fcs_err = mpdu_info->fcs_err;
1328 		ppdu_info->rx_status.rs_fcs_err = mpdu_info->fcs_err;
1329 		mpdu_meta->overflow_err = mpdu_info->overflow_err;
1330 		mpdu_meta->decrypt_err = mpdu_info->decrypt_err;
1331 		mpdu_meta->full_pkt = mpdu_info->full_pkt;
1332 		mpdu_meta->truncated = mpdu_info->truncated;
1333 
1334 		/* reset mpdu info for next mpdu for same user */
1335 		qdf_mem_zero(mpdu_info, sizeof(*mpdu_info));
1336 		ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = false;
1337 		ppdu_info->mpdu_count[user_id]++;
1338 		ppdu_info->rx_hdr_rcvd[user_id] = false;
1339 	}
1340 	break;
1341 	}
1342 	return num_buf_reaped;
1343 }
1344 
1345 /**
1346  * dp_rx_mon_process_status_tlv () - Handle mon status process TLV
1347  *
1348  * @pdev: DP pdev handle
1349  *
1350  * Return
1351  */
1352 static inline struct hal_rx_ppdu_info *
1353 dp_rx_mon_process_status_tlv(struct dp_pdev *pdev)
1354 {
1355 	struct dp_soc *soc = pdev->soc;
1356 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1357 	struct dp_mon_pdev_be *mon_pdev_be =
1358 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1359 	union dp_mon_desc_list_elem_t *desc_list = NULL;
1360 	union dp_mon_desc_list_elem_t *tail = NULL;
1361 	struct dp_mon_desc *mon_desc;
1362 	uint8_t idx, user;
1363 	void *buf;
1364 	struct hal_rx_ppdu_info *ppdu_info;
1365 	uint8_t *rx_tlv;
1366 	uint8_t *rx_tlv_start;
1367 	uint16_t end_offset = 0;
1368 	uint16_t tlv_status = HAL_TLV_STATUS_BUF_DONE;
1369 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1370 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1371 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1372 	uint8_t work_done = 0;
1373 	uint16_t status_buf_count;
1374 
1375 	if (!mon_pdev_be->desc_count) {
1376 		dp_mon_err("no of status buffer count is zero: %pK", pdev);
1377 		return NULL;
1378 	}
1379 
1380 	ppdu_info = dp_rx_mon_get_ppdu_info(mon_pdev);
1381 
1382 	if (!ppdu_info) {
1383 		dp_mon_err("ppdu_info malloc failed pdev: %pK", pdev);
1384 		dp_rx_mon_flush_status_buf_queue(pdev);
1385 		return NULL;
1386 	}
1387 	mon_pdev->rx_mon_stats.total_ppdu_info_alloc++;
1388 
1389 	for (user = 0; user < HAL_MAX_UL_MU_USERS; user++)
1390 		qdf_nbuf_queue_init(&ppdu_info->mpdu_q[user]);
1391 
1392 	status_buf_count = mon_pdev_be->desc_count;
1393 	for (idx = 0; idx < status_buf_count; idx++) {
1394 		mon_desc = mon_pdev_be->status[idx];
1395 		if (!mon_desc) {
1396 			qdf_assert_always(0);
1397 			return NULL;
1398 		}
1399 
1400 		buf = mon_desc->buf_addr;
1401 		end_offset = mon_desc->end_offset;
1402 
1403 		dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
1404 		work_done++;
1405 
1406 		rx_tlv = buf;
1407 		rx_tlv_start = buf;
1408 
1409 		do {
1410 			tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
1411 								ppdu_info,
1412 								pdev->soc->hal_soc,
1413 								buf);
1414 
1415 			work_done += dp_rx_mon_process_tlv_status(pdev,
1416 								  ppdu_info,
1417 								  buf,
1418 								  tlv_status,
1419 								  &desc_list,
1420 								  &tail);
1421 			rx_tlv = hal_rx_status_get_next_tlv(rx_tlv, 1);
1422 
1423 			/* HW provides end_offset (how many bytes HW DMA'ed)
1424 			 * as part of descriptor, use this as delimiter for
1425 			 * status buffer
1426 			 */
1427 			if ((rx_tlv - rx_tlv_start) >= (end_offset + 1))
1428 				break;
1429 
1430 	} while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) ||
1431 			(tlv_status == HAL_TLV_STATUS_HEADER) ||
1432 			(tlv_status == HAL_TLV_STATUS_MPDU_END) ||
1433 			(tlv_status == HAL_TLV_STATUS_MSDU_END) ||
1434 			(tlv_status == HAL_TLV_STATUS_MON_BUF_ADDR) ||
1435 			(tlv_status == HAL_TLV_STATUS_MPDU_START));
1436 
1437 		/* set status buffer pointer to NULL */
1438 		mon_pdev_be->status[idx] = NULL;
1439 		mon_pdev_be->desc_count--;
1440 
1441 		qdf_frag_free(buf);
1442 		DP_STATS_INC(mon_soc, frag_free, 1);
1443 		mon_pdev->rx_mon_stats.status_buf_count++;
1444 	}
1445 
1446 	if (work_done) {
1447 		mon_pdev->rx_mon_stats.mon_rx_bufs_replenished_dest +=
1448 				work_done;
1449 		dp_mon_buffers_replenish(soc, &soc->rxdma_mon_buf_ring[0],
1450 					 rx_mon_desc_pool,
1451 					 work_done,
1452 					 &desc_list, &tail, NULL);
1453 	}
1454 
1455 	ppdu_info->rx_status.tsft = ppdu_info->rx_status.tsft +
1456 				    pdev->timestamp.mlo_offset_lo_us +
1457 				    ((uint64_t)pdev->timestamp.mlo_offset_hi_us
1458 				    << 32);
1459 
1460 	return ppdu_info;
1461 }
1462 
1463 /**
1464  * dp_rx_mon_update_peer_id() - Update sw_peer_id with link peer_id
1465  *
1466  * @pdev: DP pdev handle
1467  * @ppdu_info: HAL PPDU Info buffer
1468  *
1469  * Return: none
1470  */
1471 #ifdef WLAN_FEATURE_11BE_MLO
1472 #define DP_PEER_ID_MASK 0x3FFF
1473 static inline
1474 void dp_rx_mon_update_peer_id(struct dp_pdev *pdev,
1475 			      struct hal_rx_ppdu_info *ppdu_info)
1476 {
1477 	uint32_t i;
1478 	uint16_t peer_id;
1479 	struct dp_soc *soc = pdev->soc;
1480 	uint32_t num_users = ppdu_info->com_info.num_users;
1481 
1482 	for (i = 0; i < num_users; i++) {
1483 		peer_id = ppdu_info->rx_user_status[i].sw_peer_id;
1484 		if (peer_id == HTT_INVALID_PEER)
1485 			continue;
1486 		/*
1487 		+---------------------------------------------------------------------+
1488 		| 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
1489 		+---------------------------------------------------------------------+
1490 		| CHIP ID | ML |                     PEER ID                          |
1491 		+---------------------------------------------------------------------+
1492 		*/
1493 		peer_id &= DP_PEER_ID_MASK;
1494 		peer_id = dp_get_link_peer_id_by_lmac_id(soc, peer_id,
1495 							 pdev->lmac_id);
1496 		ppdu_info->rx_user_status[i].sw_peer_id = peer_id;
1497 	}
1498 }
1499 #else
1500 static inline
1501 void dp_rx_mon_update_peer_id(struct dp_pdev *pdev,
1502 			      struct hal_rx_ppdu_info *ppdu_info)
1503 {
1504 }
1505 #endif
1506 
1507 static inline uint32_t
1508 dp_rx_mon_srng_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
1509 			   uint32_t mac_id, uint32_t quota)
1510 {
1511 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1512 	struct dp_mon_pdev *mon_pdev;
1513 	struct dp_mon_pdev_be *mon_pdev_be;
1514 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1515 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1516 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1517 	hal_soc_handle_t hal_soc = soc->hal_soc;
1518 	void *rx_mon_dst_ring_desc;
1519 	void *mon_dst_srng;
1520 	uint32_t work_done = 0;
1521 	struct hal_rx_ppdu_info *ppdu_info = NULL;
1522 	QDF_STATUS status;
1523 
1524 	if (!pdev) {
1525 		dp_mon_err("%pK: pdev is null for mac_id = %d", soc, mac_id);
1526 		return work_done;
1527 	}
1528 
1529 	mon_pdev = pdev->monitor_pdev;
1530 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1531 	mon_dst_srng = soc->rxdma_mon_dst_ring[mac_id].hal_srng;
1532 
1533 	if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) {
1534 		dp_mon_err("%pK: : HAL Monitor Destination Ring Init Failed -- %pK",
1535 			   soc, mon_dst_srng);
1536 		return work_done;
1537 	}
1538 
1539 	hal_soc = soc->hal_soc;
1540 
1541 	qdf_assert((hal_soc && pdev));
1542 
1543 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
1544 
1545 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_dst_srng))) {
1546 		dp_mon_err("%s %d : HAL Mon Dest Ring access Failed -- %pK",
1547 			   __func__, __LINE__, mon_dst_srng);
1548 		qdf_spin_unlock_bh(&mon_pdev->mon_lock);
1549 		return work_done;
1550 	}
1551 
1552 	while (qdf_likely((rx_mon_dst_ring_desc =
1553 			  (void *)hal_srng_dst_peek(hal_soc, mon_dst_srng))
1554 				&& quota--)) {
1555 		struct hal_mon_desc hal_mon_rx_desc = {0};
1556 		struct dp_mon_desc *mon_desc;
1557 		hal_be_get_mon_dest_status(soc->hal_soc,
1558 					   rx_mon_dst_ring_desc,
1559 					   &hal_mon_rx_desc);
1560 		/* If it's empty descriptor, skip processing
1561 		 * and process next hW desc
1562 		 */
1563 		if (hal_mon_rx_desc.empty_descriptor == 1) {
1564 			dp_mon_debug("empty descriptor found mon_pdev: %pK",
1565 				     mon_pdev);
1566 			rx_mon_dst_ring_desc =
1567 				hal_srng_dst_get_next(hal_soc, mon_dst_srng);
1568 			mon_pdev->rx_mon_stats.empty_desc_ppdu++;
1569 			continue;
1570 		}
1571 		mon_desc = (struct dp_mon_desc *)(uintptr_t)(hal_mon_rx_desc.buf_addr);
1572 		qdf_assert_always(mon_desc);
1573 
1574 		if ((mon_desc == mon_pdev_be->prev_rxmon_desc) &&
1575 		    (mon_desc->cookie == mon_pdev_be->prev_rxmon_cookie)) {
1576 			dp_mon_err("duplicate descritout found mon_pdev: %pK mon_desc: %pK cookie: %d",
1577 				   mon_pdev, mon_desc, mon_desc->cookie);
1578 			mon_pdev->rx_mon_stats.dup_mon_buf_cnt++;
1579 			hal_srng_dst_get_next(hal_soc, mon_dst_srng);
1580 			continue;
1581 		}
1582 		mon_pdev_be->prev_rxmon_desc = mon_desc;
1583 		mon_pdev_be->prev_rxmon_cookie = mon_desc->cookie;
1584 
1585 		if (!mon_desc->unmapped) {
1586 			qdf_mem_unmap_page(soc->osdev, mon_desc->paddr,
1587 					   rx_mon_desc_pool->buf_size,
1588 					   QDF_DMA_FROM_DEVICE);
1589 			mon_desc->unmapped = 1;
1590 		}
1591 		mon_desc->end_offset = hal_mon_rx_desc.end_offset;
1592 
1593 		/* Flush and truncated status buffers content
1594 		 * need to discarded
1595 		 */
1596 		if (hal_mon_rx_desc.end_reason == HAL_MON_FLUSH_DETECTED ||
1597 		    hal_mon_rx_desc.end_reason == HAL_MON_PPDU_TRUNCATED) {
1598 			dp_mon_debug("end_resaon: %d mon_pdev: %pK",
1599 				     hal_mon_rx_desc.end_reason, mon_pdev);
1600 			mon_pdev->rx_mon_stats.status_ppdu_drop++;
1601 			dp_rx_mon_handle_flush_n_trucated_ppdu(soc,
1602 							       pdev,
1603 							       mon_desc);
1604 			rx_mon_dst_ring_desc = hal_srng_dst_get_next(hal_soc,
1605 							mon_dst_srng);
1606 			continue;
1607 		}
1608 		if (mon_pdev_be->desc_count >= DP_MON_MAX_STATUS_BUF)
1609 			qdf_assert_always(0);
1610 
1611 		mon_pdev_be->status[mon_pdev_be->desc_count++] = mon_desc;
1612 
1613 		rx_mon_dst_ring_desc = hal_srng_dst_get_next(hal_soc, mon_dst_srng);
1614 
1615 		status = dp_rx_process_pktlog_be(soc, pdev, ppdu_info,
1616 						 mon_desc->buf_addr,
1617 						 hal_mon_rx_desc.end_offset);
1618 
1619 		if (hal_mon_rx_desc.end_reason == HAL_MON_STATUS_BUFFER_FULL)
1620 			continue;
1621 
1622 		mon_pdev->rx_mon_stats.status_ppdu_done++;
1623 
1624 		ppdu_info = dp_rx_mon_process_status_tlv(pdev);
1625 
1626 		if (ppdu_info)
1627 			dp_rx_mon_update_peer_id(pdev, ppdu_info);
1628 
1629 		/* Call enhanced stats update API */
1630 		if (mon_pdev->enhanced_stats_en && ppdu_info)
1631 			dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info);
1632 		else if (dp_cfr_rcc_mode_status(pdev) && ppdu_info)
1633 			dp_rx_handle_cfr(soc, pdev, ppdu_info);
1634 
1635 		status = dp_rx_mon_add_ppdu_info_to_wq(pdev, ppdu_info);
1636 		if (status != QDF_STATUS_SUCCESS) {
1637 			if (ppdu_info)
1638 				__dp_rx_mon_free_ppdu_info(mon_pdev, ppdu_info);
1639 		}
1640 
1641 		work_done++;
1642 
1643 		/* desc_count should be zero  after PPDU status processing */
1644 		if (mon_pdev_be->desc_count > 0)
1645 			qdf_assert_always(0);
1646 
1647 		mon_pdev_be->desc_count = 0;
1648 	}
1649 	dp_srng_access_end(int_ctx, soc, mon_dst_srng);
1650 
1651 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
1652 	dp_mon_info("mac_id: %d, work_done:%d", mac_id, work_done);
1653 	return work_done;
1654 }
1655 
1656 uint32_t
1657 dp_rx_mon_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
1658 		      uint32_t mac_id, uint32_t quota)
1659 {
1660 	uint32_t work_done;
1661 
1662 	work_done = dp_rx_mon_srng_process_2_0(soc, int_ctx, mac_id, quota);
1663 
1664 	return work_done;
1665 }
1666 
1667 void
1668 dp_rx_mon_buf_desc_pool_deinit(struct dp_soc *soc)
1669 {
1670 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1671 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1672 
1673 	/* Drain page frag cachce before pool deinit */
1674 	qdf_frag_cache_drain(&mon_soc_be->rx_desc_mon.pf_cache);
1675 	dp_mon_desc_pool_deinit(&mon_soc_be->rx_desc_mon);
1676 }
1677 
1678 QDF_STATUS
1679 dp_rx_mon_buf_desc_pool_init(struct dp_soc *soc)
1680 {
1681 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1682 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1683 	uint32_t num_entries;
1684 
1685 	num_entries =
1686 		wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc->wlan_cfg_ctx);
1687 	return dp_mon_desc_pool_init(&mon_soc_be->rx_desc_mon, num_entries);
1688 }
1689 
1690 void dp_rx_mon_buf_desc_pool_free(struct dp_soc *soc)
1691 {
1692 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1693 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1694 
1695 	if (mon_soc)
1696 		dp_mon_desc_pool_free(&mon_soc_be->rx_desc_mon);
1697 }
1698 
1699 QDF_STATUS
1700 dp_rx_mon_buf_desc_pool_alloc(struct dp_soc *soc)
1701 {
1702 	struct dp_srng *mon_buf_ring;
1703 	struct dp_mon_desc_pool *rx_mon_desc_pool;
1704 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1705 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1706 	int entries;
1707 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1708 
1709 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1710 
1711 	entries = wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc_cfg_ctx);
1712 	mon_buf_ring = &soc->rxdma_mon_buf_ring[0];
1713 
1714 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1715 
1716 	qdf_print("%s:%d rx mon buf desc pool entries: %d", __func__, __LINE__, entries);
1717 	return dp_mon_desc_pool_alloc(entries, rx_mon_desc_pool);
1718 }
1719 
1720 void
1721 dp_rx_mon_buffers_free(struct dp_soc *soc)
1722 {
1723 	struct dp_mon_desc_pool *rx_mon_desc_pool;
1724 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1725 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1726 
1727 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1728 
1729 	dp_mon_pool_frag_unmap_and_free(soc, rx_mon_desc_pool);
1730 }
1731 
1732 QDF_STATUS
1733 dp_rx_mon_buffers_alloc(struct dp_soc *soc, uint32_t size)
1734 {
1735 	struct dp_srng *mon_buf_ring;
1736 	struct dp_mon_desc_pool *rx_mon_desc_pool;
1737 	union dp_mon_desc_list_elem_t *desc_list = NULL;
1738 	union dp_mon_desc_list_elem_t *tail = NULL;
1739 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1740 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1741 
1742 	mon_buf_ring = &soc->rxdma_mon_buf_ring[0];
1743 
1744 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1745 
1746 	return dp_mon_buffers_replenish(soc, mon_buf_ring,
1747 					rx_mon_desc_pool,
1748 					size,
1749 					&desc_list, &tail, NULL);
1750 }
1751 
1752 #ifdef QCA_ENHANCED_STATS_SUPPORT
1753 void
1754 dp_rx_mon_populate_ppdu_usr_info_2_0(struct mon_rx_user_status *rx_user_status,
1755 				     struct cdp_rx_stats_ppdu_user *ppdu_user)
1756 {
1757 	ppdu_user->mpdu_retries = rx_user_status->retry_mpdu;
1758 }
1759 
1760 #ifdef WLAN_FEATURE_11BE
1761 void dp_rx_mon_stats_update_2_0(struct dp_mon_peer *mon_peer,
1762 				struct cdp_rx_indication_ppdu *ppdu,
1763 				struct cdp_rx_stats_ppdu_user *ppdu_user)
1764 {
1765 	uint8_t mcs, preamble, ppdu_type, punc_mode;
1766 	uint32_t num_msdu;
1767 
1768 	preamble = ppdu->u.preamble;
1769 	ppdu_type = ppdu->u.ppdu_type;
1770 	num_msdu = ppdu_user->num_msdu;
1771 	punc_mode = ppdu->punc_bw;
1772 
1773 	if (ppdu_type == HAL_RX_TYPE_SU)
1774 		mcs = ppdu->u.mcs;
1775 	else
1776 		mcs = ppdu_user->mcs;
1777 
1778 	DP_STATS_INC(mon_peer, rx.mpdu_retry_cnt, ppdu_user->mpdu_retries);
1779 	DP_STATS_INC(mon_peer, rx.punc_bw[punc_mode], num_msdu);
1780 	DP_STATS_INCC(mon_peer,
1781 		      rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
1782 		      ((mcs >= MAX_MCS_11BE) && (preamble == DOT11_BE)));
1783 	DP_STATS_INCC(mon_peer,
1784 		      rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
1785 		      ((mcs < MAX_MCS_11BE) && (preamble == DOT11_BE)));
1786 	DP_STATS_INCC(mon_peer,
1787 		      rx.su_be_ppdu_cnt.mcs_count[MAX_MCS - 1], 1,
1788 		      ((mcs >= (MAX_MCS_11BE)) && (preamble == DOT11_BE) &&
1789 		      (ppdu_type == HAL_RX_TYPE_SU)));
1790 	DP_STATS_INCC(mon_peer,
1791 		      rx.su_be_ppdu_cnt.mcs_count[mcs], 1,
1792 		      ((mcs < (MAX_MCS_11BE)) && (preamble == DOT11_BE) &&
1793 		      (ppdu_type == HAL_RX_TYPE_SU)));
1794 	DP_STATS_INCC(mon_peer,
1795 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[MAX_MCS - 1],
1796 		      1, ((mcs >= (MAX_MCS_11BE)) &&
1797 		      (preamble == DOT11_BE) &&
1798 		      (ppdu_type == HAL_RX_TYPE_MU_OFDMA)));
1799 	DP_STATS_INCC(mon_peer,
1800 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[mcs],
1801 		      1, ((mcs < (MAX_MCS_11BE)) &&
1802 		      (preamble == DOT11_BE) &&
1803 		      (ppdu_type == HAL_RX_TYPE_MU_OFDMA)));
1804 	DP_STATS_INCC(mon_peer,
1805 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[MAX_MCS - 1],
1806 		      1, ((mcs >= (MAX_MCS_11BE)) &&
1807 		      (preamble == DOT11_BE) &&
1808 		      (ppdu_type == HAL_RX_TYPE_MU_MIMO)));
1809 	DP_STATS_INCC(mon_peer,
1810 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[mcs],
1811 		      1, ((mcs < (MAX_MCS_11BE)) &&
1812 		      (preamble == DOT11_BE) &&
1813 		      (ppdu_type == HAL_RX_TYPE_MU_MIMO)));
1814 }
1815 
1816 void
1817 dp_rx_mon_populate_ppdu_info_2_0(struct hal_rx_ppdu_info *hal_ppdu_info,
1818 				 struct cdp_rx_indication_ppdu *ppdu)
1819 {
1820 	uint16_t puncture_pattern;
1821 	enum cdp_punctured_modes punc_mode;
1822 
1823 	/* Align bw value as per host data structures */
1824 	if (hal_ppdu_info->rx_status.bw == HAL_FULL_RX_BW_320)
1825 		ppdu->u.bw = CMN_BW_320MHZ;
1826 	else
1827 		ppdu->u.bw = hal_ppdu_info->rx_status.bw;
1828 	if (hal_ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11BE) {
1829 		/* Align preamble value as per host data structures */
1830 		ppdu->u.preamble = DOT11_BE;
1831 		ppdu->u.stbc = hal_ppdu_info->rx_status.is_stbc;
1832 		ppdu->u.dcm = hal_ppdu_info->rx_status.dcm;
1833 	} else {
1834 		ppdu->u.preamble = hal_ppdu_info->rx_status.preamble_type;
1835 	}
1836 
1837 	puncture_pattern = hal_ppdu_info->rx_status.punctured_pattern;
1838 	punc_mode = dp_mon_get_puncture_type(puncture_pattern,
1839 					     ppdu->u.bw);
1840 	ppdu->punc_bw = punc_mode;
1841 }
1842 #else
1843 void dp_rx_mon_stats_update_2_0(struct dp_mon_peer *mon_peer,
1844 				struct cdp_rx_indication_ppdu *ppdu,
1845 				struct cdp_rx_stats_ppdu_user *ppdu_user)
1846 {
1847 	DP_STATS_INC(mon_peer, rx.mpdu_retry_cnt, ppdu_user->mpdu_retries);
1848 }
1849 
1850 void
1851 dp_rx_mon_populate_ppdu_info_2_0(struct hal_rx_ppdu_info *hal_ppdu_info,
1852 				 struct cdp_rx_indication_ppdu *ppdu)
1853 {
1854 	ppdu->punc_bw = NO_PUNCTURE;
1855 }
1856 #endif
1857 void dp_mon_rx_print_advanced_stats_2_0(struct dp_soc *soc,
1858 					struct dp_pdev *pdev)
1859 {
1860 	struct cdp_pdev_mon_stats *rx_mon_stats;
1861 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1862 	struct dp_mon_soc *mon_soc = pdev->soc->monitor_soc;
1863 	struct dp_mon_pdev_be *mon_pdev_be =
1864 				dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1865 
1866 	rx_mon_stats = &mon_pdev->rx_mon_stats;
1867 
1868 	DP_PRINT_STATS("total_ppdu_info_alloc = %d",
1869 		       rx_mon_stats->total_ppdu_info_alloc);
1870 	DP_PRINT_STATS("total_ppdu_info_free = %d",
1871 		       rx_mon_stats->total_ppdu_info_free);
1872 	DP_PRINT_STATS("total_ppdu_info_enq = %d",
1873 		       rx_mon_stats->total_ppdu_info_enq);
1874 	DP_PRINT_STATS("total_ppdu_info_drop = %d",
1875 		       rx_mon_stats->total_ppdu_info_drop);
1876 	DP_PRINT_STATS("rx_hdr_not_received = %d",
1877 		       rx_mon_stats->rx_hdr_not_received);
1878 	DP_PRINT_STATS("parent_buf_alloc = %d",
1879 		       rx_mon_stats->parent_buf_alloc);
1880 	DP_PRINT_STATS("parent_buf_free = %d",
1881 		       rx_mon_stats->parent_buf_free);
1882 	DP_PRINT_STATS("mpdus_buf_to_stack = %d",
1883 		       rx_mon_stats->mpdus_buf_to_stack);
1884 	DP_PRINT_STATS("frag_alloc = %d",
1885 		       mon_soc->stats.frag_alloc);
1886 	DP_PRINT_STATS("frag_free = %d",
1887 		       mon_soc->stats.frag_free);
1888 	DP_PRINT_STATS("status_buf_count = %d",
1889 		       rx_mon_stats->status_buf_count);
1890 	DP_PRINT_STATS("pkt_buf_count = %d",
1891 		       rx_mon_stats->pkt_buf_count);
1892 	DP_PRINT_STATS("rx_mon_queue_depth= %d",
1893 		       mon_pdev_be->rx_mon_queue_depth);
1894 	DP_PRINT_STATS("empty_desc= %d",
1895 		       mon_pdev->rx_mon_stats.empty_desc_ppdu);
1896 	DP_PRINT_STATS("mpdu_dropped_due_invalid_decap= %d",
1897 		       mon_pdev->rx_mon_stats.mpdu_decap_type_invalid);
1898 	DP_PRINT_STATS("total_free_elem= %d",
1899 		       mon_pdev_be->total_free_elem);
1900 }
1901 #endif
1902