xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/2.0/dp_rx_mon_2.0.c (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "hal_be_hw_headers.h"
19 #include "dp_types.h"
20 #include "hal_be_rx.h"
21 #include "hal_api.h"
22 #include "qdf_trace.h"
23 #include "hal_be_api_mon.h"
24 #include "dp_internal.h"
25 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
26 #include <qdf_flex_mem.h>
27 #include "qdf_nbuf_frag.h"
28 #include "dp_mon.h"
29 #include <dp_rx_mon.h>
30 #include <dp_mon_2.0.h>
31 #include <dp_rx_mon.h>
32 #include <dp_rx_mon_2.0.h>
33 #include <dp_rx.h>
34 #include <dp_be.h>
35 #include <hal_be_api_mon.h>
36 #ifdef QCA_SUPPORT_LITE_MONITOR
37 #include "dp_lite_mon.h"
38 #endif
39 
40 #define F_MASK 0xFFFF
41 #define TEST_MASK 0xCBF
42 
43 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
44 	    defined(WLAN_SUPPORT_RX_FLOW_TAG)
45 
46 #ifdef QCA_TEST_MON_PF_TAGS_STATS
47 
48 static
49 void dp_rx_mon_print_tag_buf(uint8_t *buf, uint16_t test, uint16_t room)
50 {
51 	if (test != TEST_MASK)
52 		return;
53 	print_hex_dump(KERN_ERR, "TLV BUFFER: ", DUMP_PREFIX_NONE,
54 		       32, 2, buf, room, false);
55 }
56 
57 static
58 void dp_rx_mon_enable_pf_test(uint16_t **nbuf)
59 {
60 	uint16_t *nbuf_head = *nbuf;
61 
62 	*((uint16_t *)nbuf_head) = TEST_MASK;
63 	nbuf_head += sizeof(uint16_t);
64 
65 	*nbuf = nbuf_head;
66 }
67 
68 #else
69 static
70 void dp_rx_mon_print_tag_buf(uint8_t *buf, uint16_t test, uint16_t room)
71 {
72 }
73 
74 static
75 void dp_rx_mon_enable_pf_test(uint8_t **nbuf)
76 {
77 	uint8_t *nbuf_head = *nbuf;
78 
79 	nbuf_head += sizeof(uint16_t);
80 	*nbuf = nbuf_head;
81 }
82 #endif
83 
84 static
85 void dp_rx_mon_set_zero(qdf_nbuf_t nbuf)
86 {
87 	qdf_mem_zero(qdf_nbuf_head(nbuf), DP_RX_MON_TLV_ROOM);
88 }
89 
90 /**
91  * dp_rx_mon_nbuf_add_rx_frag () -  Add frag to SKB
92  *
93  * @nbuf: SKB to which frag is going to be added
94  * @frag: frag to be added to SKB
95  * @frag_len: frag length
96  * @offset: frag offset
97  * @buf_size: buffer size
98  * @frag_ref: take frag ref
99  *
100  * Return: QDF_STATUS
101  */
102 static inline QDF_STATUS
103 dp_rx_mon_nbuf_add_rx_frag(qdf_nbuf_t nbuf, qdf_frag_t *frag,
104 			   uint16_t frag_len, uint16_t offset,
105 			   uint16_t buf_size, bool frag_ref)
106 {
107 	uint8_t num_frags;
108 
109 	num_frags = qdf_nbuf_get_nr_frags(nbuf);
110 	if (num_frags < QDF_NBUF_MAX_FRAGS) {
111 		qdf_nbuf_add_rx_frag(frag, nbuf,
112 				     offset,
113 				     frag_len,
114 				     buf_size,
115 				     frag_ref);
116 		return QDF_STATUS_SUCCESS;
117 	}
118 	return QDF_STATUS_E_FAILURE;
119 }
120 
121 /**
122  * dp_mon_free_parent_nbuf() - Free parent SKB
123  *
124  * @mon_pdev: monitor pdev
125  * @nbuf: SKB to be freed
126  *
127  * @Return: void
128  */
129 void
130 dp_mon_free_parent_nbuf(struct dp_mon_pdev *mon_pdev,
131 			qdf_nbuf_t nbuf)
132 {
133 	mon_pdev->rx_mon_stats.parent_buf_free++;
134 	qdf_nbuf_free(nbuf);
135 }
136 
137 void
138 dp_rx_mon_shift_pf_tag_in_headroom(qdf_nbuf_t nbuf, struct dp_soc *soc,
139 				   struct hal_rx_ppdu_info *ppdu_info)
140 {
141 	uint32_t test = 0;
142 	uint32_t room = 0;
143 	uint16_t msdu_count = 0;
144 	uint16_t *dp = NULL;
145 	uint16_t *hp = NULL;
146 	uint16_t tlv_data_len, total_tlv_len;
147 	uint32_t bytes = 0;
148 
149 	if (qdf_unlikely(!soc)) {
150 		dp_mon_err("Soc[%pK] Null. Can't update pftag to nbuf headroom",
151 			   soc);
152 		qdf_assert_always(0);
153 	}
154 
155 	if (!wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx))
156 		return;
157 
158 	if (qdf_unlikely(!nbuf))
159 		return;
160 
161 	/* Headroom must be have enough space for tlv to be added*/
162 	if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < DP_RX_MON_TLV_ROOM)) {
163 		dp_mon_err("Headroom[%d] < DP_RX_MON_TLV_ROOM[%d]",
164 			   qdf_nbuf_headroom(nbuf), DP_RX_MON_TLV_ROOM);
165 		return;
166 	}
167 
168 	hp = (uint16_t *)qdf_nbuf_head(nbuf);
169 	test = *hp & F_MASK;
170 	hp += sizeof(uint16_t);
171 	msdu_count = *hp;
172 
173 	if (qdf_unlikely(!msdu_count))
174 		return;
175 
176 	dp_mon_debug("msdu_count: %d", msdu_count);
177 
178 	room = DP_RX_MON_PF_TAG_LEN_PER_FRAG * msdu_count;
179 	tlv_data_len = DP_RX_MON_TLV_MSDU_CNT + (room);
180 	total_tlv_len = DP_RX_MON_TLV_HDR_LEN + tlv_data_len;
181 
182 	//1. store space for MARKER
183 	dp = (uint16_t *)qdf_nbuf_push_head(nbuf, sizeof(uint16_t));
184 	if (qdf_likely(dp)) {
185 		*(uint16_t *)dp = DP_RX_MON_TLV_HDR_MARKER;
186 		bytes += sizeof(uint16_t);
187 	}
188 
189 	//2. store space for total size
190 	dp = (uint16_t *)qdf_nbuf_push_head(nbuf, sizeof(uint16_t));
191 	if (qdf_likely(dp)) {
192 		*(uint16_t *)dp = total_tlv_len;
193 		bytes += sizeof(uint16_t);
194 	}
195 
196 	//create TLV
197 	bytes += dp_mon_rx_add_tlv(DP_RX_MON_TLV_PF_ID, tlv_data_len, hp, nbuf);
198 
199 	dp_rx_mon_print_tag_buf(qdf_nbuf_data(nbuf), test, total_tlv_len);
200 
201 	qdf_nbuf_pull_head(nbuf, bytes);
202 
203 }
204 
205 void
206 dp_rx_mon_pf_tag_to_buf_headroom_2_0(void *nbuf,
207 				     struct hal_rx_ppdu_info *ppdu_info,
208 				     struct dp_pdev *pdev, struct dp_soc *soc)
209 {
210 	uint8_t *nbuf_head = NULL;
211 	uint8_t user_id;
212 	struct hal_rx_mon_msdu_info *msdu_info;
213 	uint16_t flow_id;
214 	uint16_t cce_metadata;
215 	uint16_t protocol_tag = 0;
216 	uint32_t flow_tag;
217 	uint8_t invalid_cce = 0, invalid_fse = 0;
218 
219 	if (qdf_unlikely(!soc)) {
220 		dp_mon_err("Soc[%pK] Null. Can't update pftag to nbuf headroom",
221 			   soc);
222 		qdf_assert_always(0);
223 	}
224 
225 	if (!wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(soc->wlan_cfg_ctx))
226 		return;
227 
228 	if (qdf_unlikely(!nbuf))
229 		return;
230 
231 	/* Headroom must be have enough space for tlv to be added*/
232 	if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < DP_RX_MON_TLV_ROOM)) {
233 		dp_mon_err("Headroom[%d] < DP_RX_MON_TLV_ROOM[%d]",
234 			   qdf_nbuf_headroom(nbuf), DP_RX_MON_TLV_ROOM);
235 		return;
236 	}
237 
238 	user_id = ppdu_info->user_id;
239 	if (qdf_unlikely(user_id > HAL_MAX_UL_MU_USERS)) {
240 		dp_mon_debug("Invalid user_id user_id: %d pdev: %pK", user_id, pdev);
241 		return;
242 	}
243 
244 	msdu_info = &ppdu_info->msdu[user_id];
245 	flow_id = ppdu_info->rx_msdu_info[user_id].flow_idx;
246 	cce_metadata = ppdu_info->rx_msdu_info[user_id].cce_metadata -
247 		       RX_PROTOCOL_TAG_START_OFFSET;
248 
249 	flow_tag = ppdu_info->rx_msdu_info[user_id].fse_metadata & F_MASK;
250 
251 	if (qdf_unlikely((cce_metadata > RX_PROTOCOL_TAG_MAX - 1) ||
252 			 (cce_metadata > 0 && cce_metadata < 4))) {
253 		dp_mon_debug("Invalid user_id cce_metadata: %d pdev: %pK", cce_metadata, pdev);
254 		invalid_cce = 1;
255 		protocol_tag = cce_metadata;
256 	} else {
257 		protocol_tag = pdev->rx_proto_tag_map[cce_metadata].tag;
258 		dp_mon_rx_update_rx_protocol_tag_stats(pdev, cce_metadata);
259 	}
260 
261 	if (flow_tag > 0) {
262 		dp_mon_rx_update_rx_flow_tag_stats(pdev, flow_id);
263 	} else {
264 		dp_mon_debug("Invalid flow_tag: %d pdev: %pK ", flow_tag, pdev);
265 		invalid_fse = 1;
266 	}
267 
268 	if (invalid_cce && invalid_fse)
269 		return;
270 
271 	if (msdu_info->msdu_index >= DP_RX_MON_MAX_MSDU) {
272 		dp_mon_err("msdu_index causes overflow in headroom");
273 		return;
274 	}
275 
276 	dp_mon_debug("protocol_tag: %d, cce_metadata: %d, flow_tag: %d",
277 		     protocol_tag, cce_metadata, flow_tag);
278 
279 	dp_mon_debug("msdu_index: %d", msdu_info->msdu_index);
280 
281 
282 	nbuf_head = qdf_nbuf_head(nbuf);
283 	dp_rx_mon_enable_pf_test(&nbuf_head);
284 
285 	*((uint16_t *)nbuf_head) = msdu_info->msdu_index + 1;
286 	nbuf_head += DP_RX_MON_TLV_MSDU_CNT;
287 
288 	nbuf_head += ((msdu_info->msdu_index) * DP_RX_MON_PF_TAG_SIZE);
289 	if (!invalid_cce)
290 		*((uint16_t *)nbuf_head) = protocol_tag;
291 	nbuf_head += sizeof(uint16_t);
292 	if (!invalid_fse)
293 		*((uint16_t *)nbuf_head) = flow_tag;
294 }
295 
296 #else
297 
298 static
299 void dp_rx_mon_set_zero(qdf_nbuf_t nbuf)
300 {
301 }
302 
303 static
304 void dp_rx_mon_shift_pf_tag_in_headroom(qdf_nbuf_t nbuf, struct dp_soc *soc,
305 					struct hal_rx_ppdu_info *ppdu_info)
306 {
307 }
308 
309 static
310 void dp_rx_mon_pf_tag_to_buf_headroom_2_0(void *nbuf,
311 					  struct hal_rx_ppdu_info *ppdu_info,
312 					  struct dp_pdev *pdev,
313 					  struct dp_soc *soc)
314 {
315 }
316 
317 #endif
318 
319 /**
320  * dp_rx_mon_free_ppdu_info () - Free PPDU info
321  * @pdev: DP pdev
322  * @ppdu_info: PPDU info
323  *
324  * Return: Void
325  */
326 static void
327 dp_rx_mon_free_ppdu_info(struct dp_pdev *pdev,
328 			 struct hal_rx_ppdu_info *ppdu_info)
329 {
330 	uint8_t user;
331 	struct dp_mon_pdev *mon_pdev;
332 
333 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
334 	for (user = 0; user < ppdu_info->com_info.num_users; user++) {
335 		uint16_t mpdu_count  = ppdu_info->mpdu_count[user];
336 		uint16_t mpdu_idx;
337 		qdf_nbuf_t mpdu;
338 
339 		for (mpdu_idx = 0; mpdu_idx < mpdu_count; mpdu_idx++) {
340 			mpdu = (qdf_nbuf_t)ppdu_info->mpdu_q[user][mpdu_idx];
341 
342 			if (!mpdu)
343 				continue;
344 			dp_mon_free_parent_nbuf(mon_pdev, mpdu);
345 		}
346 	}
347 }
348 
349 void dp_rx_mon_drain_wq(struct dp_pdev *pdev)
350 {
351 	struct dp_mon_pdev *mon_pdev;
352 	struct hal_rx_ppdu_info *ppdu_info = NULL;
353 	struct hal_rx_ppdu_info *temp_ppdu_info = NULL;
354 	struct dp_mon_pdev_be *mon_pdev_be;
355 
356 	if (qdf_unlikely(!pdev)) {
357 		dp_mon_debug("Pdev is NULL");
358 		return;
359 	}
360 
361 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
362 	if (qdf_unlikely(!mon_pdev)) {
363 		dp_mon_debug("monitor pdev is NULL");
364 		return;
365 	}
366 
367 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
368 
369 	qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
370 	TAILQ_FOREACH_SAFE(ppdu_info,
371 			   &mon_pdev_be->rx_mon_queue,
372 			   ppdu_list_elem,
373 			   temp_ppdu_info) {
374 		mon_pdev_be->rx_mon_queue_depth--;
375 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_queue,
376 			     ppdu_info, ppdu_list_elem);
377 
378 		dp_rx_mon_free_ppdu_info(pdev, ppdu_info);
379 	}
380 	qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
381 }
382 
383 /**
384  * dp_rx_mon_deliver_mpdu() - Deliver MPDU to osif layer
385  *
386  * @mon_pdev: monitor pdev
387  * @mpdu: MPDU nbuf
388  * @status: monitor status
389  *
390  * Return: QDF_STATUS
391  */
392 static QDF_STATUS
393 dp_rx_mon_deliver_mpdu(struct dp_mon_pdev *mon_pdev,
394 		       qdf_nbuf_t mpdu,
395 		       struct mon_rx_status *rx_status)
396 {
397 	qdf_nbuf_t nbuf;
398 
399 	if (mon_pdev->mvdev && mon_pdev->mvdev->monitor_vdev->osif_rx_mon) {
400 		mon_pdev->rx_mon_stats.mpdus_buf_to_stack++;
401 		nbuf = qdf_nbuf_get_ext_list(mpdu);
402 
403 		while (nbuf) {
404 			mon_pdev->rx_mon_stats.mpdus_buf_to_stack++;
405 			nbuf = nbuf->next;
406 		}
407 		mon_pdev->mvdev->monitor_vdev->osif_rx_mon(mon_pdev->mvdev->osif_vdev,
408 							   mpdu,
409 							   rx_status);
410 	} else {
411 		return QDF_STATUS_E_FAILURE;
412 	}
413 
414 	return QDF_STATUS_SUCCESS;
415 }
416 
417 /**
418  * dp_rx_mon_process_ppdu_info () - Process PPDU info
419  * @pdev: DP pdev
420  * @ppdu_info: PPDU info
421  *
422  * Return: Void
423  */
424 static void
425 dp_rx_mon_process_ppdu_info(struct dp_pdev *pdev,
426 			    struct hal_rx_ppdu_info *ppdu_info)
427 {
428 	struct dp_mon_pdev *mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
429 	uint8_t user;
430 
431 	if (!ppdu_info)
432 		return;
433 
434 	for (user = 0; user < ppdu_info->com_info.num_users; user++) {
435 		uint16_t mpdu_count  = ppdu_info->mpdu_count[user];
436 		uint16_t mpdu_idx;
437 		qdf_nbuf_t mpdu;
438 		struct hal_rx_mon_mpdu_info *mpdu_meta;
439 		QDF_STATUS status;
440 
441 		for (mpdu_idx = 0; mpdu_idx < mpdu_count; mpdu_idx++) {
442 			mpdu = (qdf_nbuf_t)ppdu_info->mpdu_q[user][mpdu_idx];
443 
444 			if (!mpdu)
445 				continue;
446 
447 			mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(mpdu);
448 
449 			if (dp_lite_mon_is_rx_enabled(mon_pdev)) {
450 				status = dp_lite_mon_rx_mpdu_process(pdev, ppdu_info,
451 								     mpdu, mpdu_idx, user);
452 				if (status != QDF_STATUS_SUCCESS) {
453 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
454 					continue;
455 				}
456 			} else {
457 				if (mpdu_meta->full_pkt) {
458 					if (qdf_unlikely(mpdu_meta->truncated)) {
459 						dp_mon_free_parent_nbuf(mon_pdev, mpdu);
460 						continue;
461 					}
462 
463 					status = dp_rx_mon_handle_full_mon(pdev,
464 									   ppdu_info, mpdu);
465 					if (status != QDF_STATUS_SUCCESS) {
466 						dp_mon_free_parent_nbuf(mon_pdev, mpdu);
467 						continue;
468 					}
469 				} else {
470 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
471 					continue;
472 				}
473 
474 				/* reset mpdu metadata and apply radiotap header over MPDU */
475 				qdf_mem_zero(mpdu_meta, sizeof(struct hal_rx_mon_mpdu_info));
476 				if (!qdf_nbuf_update_radiotap(&ppdu_info->rx_status,
477 							      mpdu,
478 							      qdf_nbuf_headroom(mpdu))) {
479 					dp_mon_err("failed to update radiotap pdev: %pK",
480 						   pdev);
481 				}
482 
483 				dp_rx_mon_shift_pf_tag_in_headroom(mpdu,
484 								   pdev->soc,
485 								   ppdu_info);
486 
487 				/* Deliver MPDU to osif layer */
488 				status = dp_rx_mon_deliver_mpdu(mon_pdev,
489 								mpdu,
490 								&ppdu_info->rx_status);
491 				if (status != QDF_STATUS_SUCCESS)
492 					dp_mon_free_parent_nbuf(mon_pdev, mpdu);
493 			}
494 		}
495 	}
496 }
497 
498 /**
499  * dp_rx_mon_process_ppdu ()-  Deferred monitor processing
500  * This workqueue API handles:
501  * a. Full monitor
502  * b. Lite monitor
503  *
504  * @context: Opaque work context
505  *
506  * Return: none
507  */
508 void dp_rx_mon_process_ppdu(void *context)
509 {
510 	struct dp_pdev *pdev = (struct dp_pdev *)context;
511 	struct dp_mon_pdev *mon_pdev;
512 	struct hal_rx_ppdu_info *ppdu_info = NULL;
513 	struct hal_rx_ppdu_info *temp_ppdu_info = NULL;
514 	struct dp_mon_pdev_be *mon_pdev_be;
515 
516 	if (qdf_unlikely(!pdev)) {
517 		dp_mon_debug("Pdev is NULL");
518 		return;
519 	}
520 
521 	mon_pdev = (struct dp_mon_pdev *)pdev->monitor_pdev;
522 	if (qdf_unlikely(!mon_pdev)) {
523 		dp_mon_debug("monitor pdev is NULL");
524 		return;
525 	}
526 
527 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
528 
529 	qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
530 	TAILQ_FOREACH_SAFE(ppdu_info,
531 			   &mon_pdev_be->rx_mon_queue,
532 			   ppdu_list_elem, temp_ppdu_info) {
533 		TAILQ_REMOVE(&mon_pdev_be->rx_mon_queue,
534 			     ppdu_info, ppdu_list_elem);
535 
536 		mon_pdev_be->rx_mon_queue_depth--;
537 		dp_rx_mon_process_ppdu_info(pdev, ppdu_info);
538 		qdf_mem_free(ppdu_info);
539 	}
540 	qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
541 }
542 
543 /**
544  * dp_rx_mon_add_ppdu_info_to_wq () - Add PPDU info to workqueue
545  *
546  * @mon_pdev: monitor pdev
547  * @ppdu_info: ppdu info to be added to workqueue
548  *
549  * Return: SUCCESS or FAILIRE
550  */
551 
552 QDF_STATUS
553 dp_rx_mon_add_ppdu_info_to_wq(struct dp_mon_pdev *mon_pdev,
554 			      struct hal_rx_ppdu_info *ppdu_info)
555 {
556 	struct dp_mon_pdev_be *mon_pdev_be =
557 		dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
558 
559 	/* Full monitor or lite monitor mode is not enabled, return */
560 	if (!mon_pdev->monitor_configured &&
561 	    !dp_lite_mon_is_rx_enabled(mon_pdev))
562 		return QDF_STATUS_E_FAILURE;
563 
564 	if (qdf_likely(ppdu_info)) {
565 		qdf_spin_lock_bh(&mon_pdev_be->rx_mon_wq_lock);
566 		TAILQ_INSERT_TAIL(&mon_pdev_be->rx_mon_queue,
567 				  ppdu_info, ppdu_list_elem);
568 		mon_pdev_be->rx_mon_queue_depth++;
569 		qdf_spin_unlock_bh(&mon_pdev_be->rx_mon_wq_lock);
570 
571 		if (mon_pdev_be->rx_mon_queue_depth > DP_MON_QUEUE_DEPTH_MAX) {
572 			qdf_queue_work(0, mon_pdev_be->rx_mon_workqueue,
573 				       &mon_pdev_be->rx_mon_work);
574 		}
575 	}
576 	return QDF_STATUS_SUCCESS;
577 }
578 
579 QDF_STATUS
580 dp_rx_mon_handle_full_mon(struct dp_pdev *pdev,
581 			  struct hal_rx_ppdu_info *ppdu_info,
582 			  qdf_nbuf_t mpdu)
583 {
584 	uint32_t wifi_hdr_len, sec_hdr_len, msdu_llc_len,
585 		 mpdu_buf_len, decap_hdr_pull_bytes, dir,
586 		 is_amsdu, amsdu_pad, frag_size, tot_msdu_len;
587 	struct hal_rx_mon_mpdu_info *mpdu_meta;
588 	struct hal_rx_mon_msdu_info *msdu_meta;
589 	char *hdr_desc;
590 	uint8_t num_frags, frag_iter, l2_hdr_offset;
591 	struct ieee80211_frame *wh;
592 	struct ieee80211_qoscntl *qos;
593 	void *hdr_frag_addr;
594 	uint32_t hdr_frag_size, frag_page_offset, pad_byte_pholder,
595 		 msdu_len;
596 	qdf_nbuf_t head_msdu, msdu_cur;
597 	void *frag_addr;
598 	bool prev_msdu_end_received = false;
599 	bool is_nbuf_head = true;
600 
601 	/***************************************************************************
602 	 *********************** Non-raw packet ************************************
603 	 ---------------------------------------------------------------------------
604 	 |      | frag-0   | frag-1    | frag - 2 | frag - 3  | frag - 4 | frag - 5  |
605 	 | skb  | rx_hdr-1 | rx_msdu-1 | rx_hdr-2 | rx_msdu-2 | rx_hdr-3 | rx-msdu-3 |
606 	 ---------------------------------------------------------------------------
607 	 **************************************************************************/
608 
609 	if (!mpdu) {
610 		dp_mon_debug("nbuf is NULL, return");
611 		return QDF_STATUS_E_FAILURE;
612 	}
613 
614 	head_msdu = mpdu;
615 
616 	mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(mpdu);
617 
618 	if (mpdu_meta->decap_type == HAL_HW_RX_DECAP_FORMAT_RAW) {
619 		qdf_nbuf_trim_add_frag_size(mpdu,
620 					    qdf_nbuf_get_nr_frags(mpdu) - 1,
621 					    -HAL_RX_FCS_LEN, 0);
622 		return QDF_STATUS_SUCCESS;
623 	}
624 
625 	num_frags = qdf_nbuf_get_nr_frags(mpdu);
626 	if (qdf_unlikely(num_frags < DP_MON_MIN_FRAGS_FOR_RESTITCH)) {
627 		dp_mon_debug("not enough frags(%d) for restitch", num_frags);
628 		return QDF_STATUS_E_FAILURE;
629 	}
630 
631 	l2_hdr_offset = DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
632 
633 	/* hdr_desc points to 80211 hdr */
634 	hdr_desc = qdf_nbuf_get_frag_addr(mpdu, 0);
635 
636 	/* Calculate Base header size */
637 	wifi_hdr_len = sizeof(struct ieee80211_frame);
638 	wh = (struct ieee80211_frame *)hdr_desc;
639 
640 	dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
641 
642 	if (dir == IEEE80211_FC1_DIR_DSTODS)
643 		wifi_hdr_len += 6;
644 
645 	is_amsdu = 0;
646 	if (wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) {
647 		qos = (struct ieee80211_qoscntl *)
648 			(hdr_desc + wifi_hdr_len);
649 		wifi_hdr_len += 2;
650 
651 		is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
652 	}
653 
654 	/*Calculate security header length based on 'Protected'
655 	 * and 'EXT_IV' flag
656 	 */
657 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
658 		char *iv = (char *)wh + wifi_hdr_len;
659 
660 		if (iv[3] & KEY_EXTIV)
661 			sec_hdr_len = 8;
662 		else
663 			sec_hdr_len = 4;
664 	} else {
665 		sec_hdr_len = 0;
666 	}
667 	wifi_hdr_len += sec_hdr_len;
668 
669 	/* MSDU related stuff LLC - AMSDU subframe header etc */
670 	msdu_llc_len = is_amsdu ? (DP_RX_MON_DECAP_HDR_SIZE +
671 				   DP_RX_MON_LLC_SIZE +
672 				   DP_RX_MON_SNAP_SIZE) :
673 				   (DP_RX_MON_LLC_SIZE + DP_RX_MON_SNAP_SIZE);
674 
675 	mpdu_buf_len = wifi_hdr_len + msdu_llc_len;
676 
677 	/* "Decap" header to remove from MSDU buffer */
678 	decap_hdr_pull_bytes = DP_RX_MON_DECAP_HDR_SIZE;
679 
680 	amsdu_pad = 0;
681 	tot_msdu_len = 0;
682 	tot_msdu_len = 0;
683 
684 	/*
685 	 * Update protocol and flow tag for MSDU
686 	 * update frag index in ctx_idx field.
687 	 * Reset head pointer data of nbuf before updating.
688 	 */
689 	QDF_NBUF_CB_RX_CTX_ID(mpdu) = 0;
690 
691 	/* Construct destination address */
692 	hdr_frag_addr = qdf_nbuf_get_frag_addr(mpdu, 0);
693 	hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(mpdu, 0);
694 
695 	/* Adjust page frag offset to point to 802.11 header */
696 	qdf_nbuf_trim_add_frag_size(head_msdu, 0, -(hdr_frag_size - mpdu_buf_len), 0);
697 
698 	msdu_meta = (struct hal_rx_mon_msdu_info *)(qdf_nbuf_get_frag_addr(mpdu, 1) - DP_RX_MON_PACKET_OFFSET + DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE);
699 
700 	msdu_len = msdu_meta->msdu_len;
701 
702 	/* Adjust page frag offset to appropriate after decap header */
703 	frag_page_offset =
704 		decap_hdr_pull_bytes;
705 	qdf_nbuf_move_frag_page_offset(head_msdu, 1, frag_page_offset);
706 
707 	frag_size = qdf_nbuf_get_frag_size_by_idx(head_msdu, 1);
708 	pad_byte_pholder =
709 		RX_MONITOR_BUFFER_SIZE - (frag_size + DP_RX_MON_PACKET_OFFSET + DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE);
710 
711 	if (msdu_meta->first_buffer && msdu_meta->last_buffer) {
712 		/* MSDU with single bufffer */
713 		amsdu_pad = frag_size & 0x3;
714 		amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
715 		if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
716 			char *frag_addr_temp;
717 
718 			qdf_nbuf_trim_add_frag_size(mpdu, 1, amsdu_pad, 0);
719 			frag_addr_temp =
720 				(char *)qdf_nbuf_get_frag_addr(mpdu, 1);
721 			frag_addr_temp = (frag_addr_temp +
722 					  qdf_nbuf_get_frag_size_by_idx(mpdu, 1)) -
723 				amsdu_pad;
724 			qdf_mem_zero(frag_addr_temp, amsdu_pad);
725 			amsdu_pad = 0;
726 		}
727 	} else {
728 		tot_msdu_len = frag_size;
729 		amsdu_pad = 0;
730 	}
731 
732 	pad_byte_pholder = 0;
733 	for (msdu_cur = mpdu; msdu_cur;) {
734 		/* frag_iter will start from 0 for second skb onwards */
735 		if (msdu_cur == mpdu)
736 			frag_iter = 2;
737 		else
738 			frag_iter = 0;
739 
740 		num_frags = qdf_nbuf_get_nr_frags(msdu_cur);
741 
742 		for (; frag_iter < num_frags; frag_iter++) {
743 			/* Construct destination address
744 			 *  ----------------------------------------------------------
745 			 * |            | L2_HDR_PAD   |   Decap HDR | Payload | Pad  |
746 			 * |            | (First buffer)             |         |      |
747 			 * |            |                            /        /       |
748 			 * |            >Frag address points here   /        /        |
749 			 * |            \                          /        /         |
750 			 * |             \ This bytes needs to    /        /          |
751 			 * |              \  removed to frame pkt/        /           |
752 			 * |               ----------------------        /            |
753 			 * |                                     |     /     Add      |
754 			 * |                                     |    /   amsdu pad   |
755 			 * |   LLC HDR will be added here      <-|    |   Byte for    |
756 			 * |        |                            |    |   last frame  |
757 			 * |         >Dest addr will point       |    |    if space   |
758 			 * |            somewhere in this area   |    |    available  |
759 			 * |  And amsdu_pad will be created if   |    |               |
760 			 * | dint get added in last buffer       |    |               |
761 			 * |       (First Buffer)                |    |               |
762 			 *  ----------------------------------------------------------
763 			 */
764 			/* If previous msdu end has received, modify next frag's offset to point to LLC */
765 			if (prev_msdu_end_received) {
766 				hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter);
767 				/* Adjust page frag offset to point to llc/snap header */
768 				qdf_nbuf_trim_add_frag_size(msdu_cur, frag_iter, -(hdr_frag_size - msdu_llc_len), 0);
769 				prev_msdu_end_received = false;
770 				continue;
771 			}
772 
773 			frag_addr =
774 				qdf_nbuf_get_frag_addr(msdu_cur, frag_iter) -
775 						       (DP_RX_MON_PACKET_OFFSET +
776 						       DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE);
777 			msdu_meta = (struct hal_rx_mon_msdu_info *)frag_addr;
778 
779 			/*
780 			 * Update protocol and flow tag for MSDU
781 			 * update frag index in ctx_idx field
782 			 */
783 			QDF_NBUF_CB_RX_CTX_ID(msdu_cur) = frag_iter;
784 
785 			frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur,
786 					frag_iter);
787 
788 			/* If Middle buffer, dont add any header */
789 			if ((!msdu_meta->first_buffer) &&
790 					(!msdu_meta->last_buffer)) {
791 				tot_msdu_len += frag_size;
792 				amsdu_pad = 0;
793 				pad_byte_pholder = 0;
794 				continue;
795 			}
796 
797 			/* Calculate if current buffer has placeholder
798 			 * to accommodate amsdu pad byte
799 			 */
800 			pad_byte_pholder =
801 				RX_MONITOR_BUFFER_SIZE - (frag_size + (DP_RX_MON_PACKET_OFFSET +
802 							  DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE));
803 			/*
804 			 * We will come here only only three condition:
805 			 * 1. Msdu with single Buffer
806 			 * 2. First buffer in case MSDU is spread in multiple
807 			 *    buffer
808 			 * 3. Last buffer in case MSDU is spread in multiple
809 			 *    buffer
810 			 *
811 			 *         First buffER | Last buffer
812 			 * Case 1:      1       |     1
813 			 * Case 2:      1       |     0
814 			 * Case 3:      0       |     1
815 			 *
816 			 * In 3rd case only l2_hdr_padding byte will be Zero and
817 			 * in other case, It will be 2 Bytes.
818 			 */
819 			if (msdu_meta->first_buffer)
820 				l2_hdr_offset =
821 					DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
822 			else
823 				l2_hdr_offset = DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
824 
825 			if (msdu_meta->first_buffer) {
826 				/* Adjust page frag offset to point to 802.11 header */
827 				hdr_frag_size = qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter-1);
828 				qdf_nbuf_trim_add_frag_size(msdu_cur, frag_iter - 1, -(hdr_frag_size - (msdu_llc_len + amsdu_pad)), 0);
829 
830 				/* Adjust page frag offset to appropriate after decap header */
831 				frag_page_offset =
832 					(decap_hdr_pull_bytes + l2_hdr_offset);
833 				if (frag_size > (decap_hdr_pull_bytes + l2_hdr_offset)) {
834 					qdf_nbuf_move_frag_page_offset(msdu_cur, frag_iter, frag_page_offset);
835 					frag_size = frag_size - (l2_hdr_offset + decap_hdr_pull_bytes);
836 				}
837 
838 
839 				/*
840 				 * Calculate new page offset and create hole
841 				 * if amsdu_pad required.
842 				 */
843 				tot_msdu_len = frag_size;
844 				/*
845 				 * No amsdu padding required for first frame of
846 				 * continuation buffer
847 				 */
848 				if (!msdu_meta->last_buffer) {
849 					amsdu_pad = 0;
850 					continue;
851 				}
852 			} else {
853 				tot_msdu_len += frag_size;
854 			}
855 
856 			/* Will reach to this place in only two case:
857 			 * 1. Single buffer MSDU
858 			 * 2. Last buffer of MSDU in case of multiple buf MSDU
859 			 */
860 
861 			/* This flag is used to identify msdu boundry */
862 			prev_msdu_end_received = true;
863 			/* Check size of buffer if amsdu padding required */
864 			amsdu_pad = tot_msdu_len & 0x3;
865 			amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
866 
867 			/* Create placeholder if current bufer can
868 			 * accommodate padding.
869 			 */
870 			if (amsdu_pad && (amsdu_pad <= pad_byte_pholder)) {
871 				char *frag_addr_temp;
872 
873 				qdf_nbuf_trim_add_frag_size(msdu_cur,
874 						frag_iter,
875 						amsdu_pad, 0);
876 				frag_addr_temp = (char *)qdf_nbuf_get_frag_addr(msdu_cur,
877 						frag_iter);
878 				frag_addr_temp = (frag_addr_temp +
879 						qdf_nbuf_get_frag_size_by_idx(msdu_cur, frag_iter)) -
880 					amsdu_pad;
881 				qdf_mem_zero(frag_addr_temp, amsdu_pad);
882 				amsdu_pad = 0;
883 			}
884 
885 			/* reset tot_msdu_len */
886 			tot_msdu_len = 0;
887 		}
888 		if (is_nbuf_head) {
889 			msdu_cur = qdf_nbuf_get_ext_list(msdu_cur);
890 			is_nbuf_head = false;
891 		} else {
892 			msdu_cur = qdf_nbuf_queue_next(msdu_cur);
893 		}
894 	}
895 
896 	return QDF_STATUS_SUCCESS;
897 }
898 
899 /**
900  * dp_rx_mon_flush_status_buf_queue () - Flush status buffer queue
901  *
902  * @pdev: DP pdev handle
903  *
904  *Return: void
905  */
906 static inline void
907 dp_rx_mon_flush_status_buf_queue(struct dp_pdev *pdev)
908 {
909 	struct dp_soc *soc = pdev->soc;
910 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
911 	struct dp_mon_pdev_be *mon_pdev_be =
912 		dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
913 	union dp_mon_desc_list_elem_t *desc_list = NULL;
914 	union dp_mon_desc_list_elem_t *tail = NULL;
915 	struct dp_mon_desc *mon_desc;
916 	uint8_t idx;
917 	void *buf;
918 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
919 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
920 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
921 	uint8_t work_done = 0;
922 	uint16_t status_buf_count;
923 
924 	if (!mon_pdev_be->desc_count) {
925 		dp_mon_info("no of status buffer count is zero: %pK", pdev);
926 		return;
927 	}
928 
929 	status_buf_count = mon_pdev_be->desc_count;
930 	for (idx = 0; idx < status_buf_count; idx++) {
931 		mon_desc = mon_pdev_be->status[idx];
932 		if (!mon_desc) {
933 			qdf_assert_always(0);
934 			return;
935 		}
936 
937 		buf = mon_desc->buf_addr;
938 
939 		dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
940 		work_done++;
941 
942 		/* set status buffer pointer to NULL */
943 		mon_pdev_be->status[idx] = NULL;
944 		mon_pdev_be->desc_count--;
945 
946 		qdf_frag_free(buf);
947 		DP_STATS_INC(mon_soc, frag_free, 1);
948 	}
949 
950 	if (work_done) {
951 		mon_pdev->rx_mon_stats.mon_rx_bufs_replenished_dest +=
952 			work_done;
953 		dp_mon_buffers_replenish(soc, &soc->rxdma_mon_buf_ring[0],
954 					 rx_mon_desc_pool,
955 					 work_done,
956 					 &desc_list, &tail, NULL);
957 	}
958 }
959 
960 /**
961  * dp_rx_mon_handle_flush_n_trucated_ppdu () - Handle flush and truncated ppdu
962  *
963  * @soc: DP soc handle
964  * @pdev: pdev handle
965  * @mon_desc: mon sw desc
966  */
967 static inline void
968 dp_rx_mon_handle_flush_n_trucated_ppdu(struct dp_soc *soc,
969 				       struct dp_pdev *pdev,
970 				       struct dp_mon_desc *mon_desc)
971 {
972 	union dp_mon_desc_list_elem_t *desc_list = NULL;
973 	union dp_mon_desc_list_elem_t *tail = NULL;
974 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
975 	struct dp_mon_soc_be *mon_soc_be =
976 			dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
977 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
978 	uint16_t work_done;
979 
980 	/* Flush status buffers in queue */
981 	dp_rx_mon_flush_status_buf_queue(pdev);
982 	qdf_frag_free(mon_desc->buf_addr);
983 	DP_STATS_INC(mon_soc, frag_free, 1);
984 	dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
985 	work_done = 1;
986 	dp_mon_buffers_replenish(soc, &soc->rxdma_mon_buf_ring[0],
987 				 rx_mon_desc_pool,
988 				 work_done,
989 				 &desc_list, &tail, NULL);
990 }
991 
992 uint8_t dp_rx_mon_process_tlv_status(struct dp_pdev *pdev,
993 				     struct hal_rx_ppdu_info *ppdu_info,
994 				     void *status_frag,
995 				     uint16_t tlv_status,
996 				     union dp_mon_desc_list_elem_t **desc_list,
997 				     union dp_mon_desc_list_elem_t **tail)
998 {
999 	struct dp_soc *soc  = pdev->soc;
1000 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1001 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1002 	qdf_nbuf_t nbuf, tmp_nbuf;
1003 	qdf_frag_t addr;
1004 	uint8_t user_id = ppdu_info->user_id;
1005 	uint8_t mpdu_idx = ppdu_info->mpdu_count[user_id];
1006 	uint16_t num_frags;
1007 	uint8_t num_buf_reaped = 0;
1008 	QDF_STATUS status;
1009 
1010 	if (!mon_pdev->monitor_configured &&
1011 	    !dp_lite_mon_is_rx_enabled(mon_pdev)) {
1012 		return num_buf_reaped;
1013 	}
1014 
1015 	switch (tlv_status) {
1016 	case HAL_TLV_STATUS_HEADER: {
1017 		/* If this is first RX_HEADER for MPDU, allocate skb
1018 		 * else add frag to already allocated skb
1019 		 */
1020 
1021 		if (!ppdu_info->mpdu_info[user_id].mpdu_start_received) {
1022 
1023 			nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1024 					      DP_RX_MON_TLV_ROOM +
1025 					      DP_RX_MON_MAX_RADIO_TAP_HDR,
1026 					      DP_RX_MON_TLV_ROOM +
1027 					      DP_RX_MON_MAX_RADIO_TAP_HDR,
1028 					      4, FALSE);
1029 
1030 			/* Set *head_msdu->next as NULL as all msdus are
1031 			 *                          * mapped via nr frags
1032 			 *                                                   */
1033 			if (qdf_unlikely(!nbuf)) {
1034 				dp_mon_err("malloc failed pdev: %pK ", pdev);
1035 				return num_buf_reaped;
1036 			}
1037 
1038 			mon_pdev->rx_mon_stats.parent_buf_alloc++;
1039 
1040 			dp_rx_mon_set_zero(nbuf);
1041 
1042 			qdf_nbuf_set_next(nbuf, NULL);
1043 
1044 			ppdu_info->mpdu_q[user_id][mpdu_idx] = nbuf;
1045 
1046 			status = dp_rx_mon_nbuf_add_rx_frag(nbuf, status_frag,
1047 							    ppdu_info->hdr_len - DP_RX_MON_RX_HDR_OFFSET,
1048 							    ppdu_info->data - (unsigned char *)status_frag + 4,
1049 							    DP_MON_DATA_BUFFER_SIZE, true);
1050 			if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
1051 				dp_mon_err("num_frags exceeding MAX frags");
1052 				qdf_assert_always(0);
1053 			}
1054 			ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = true;
1055 			ppdu_info->mpdu_info[user_id].first_rx_hdr_rcvd = true;
1056 			/* initialize decap type to invalid, this will be set to appropriate
1057 			 * value once the mpdu start tlv is received
1058 			 */
1059 			ppdu_info->mpdu_info[user_id].decap_type = DP_MON_DECAP_FORMAT_INVALID;
1060 		} else {
1061 			if (ppdu_info->mpdu_info[user_id].decap_type ==
1062 					HAL_HW_RX_DECAP_FORMAT_RAW) {
1063 				return num_buf_reaped;
1064 			}
1065 
1066 			if (dp_lite_mon_is_rx_enabled(mon_pdev) &&
1067 			    !dp_lite_mon_is_level_msdu(mon_pdev))
1068 				break;
1069 
1070 			nbuf = ppdu_info->mpdu_q[user_id][mpdu_idx];
1071 			if (qdf_unlikely(!nbuf)) {
1072 				dp_mon_debug("nbuf is NULL");
1073 				return num_buf_reaped;
1074 			}
1075 
1076 			tmp_nbuf = qdf_get_nbuf_valid_frag(nbuf);
1077 
1078 			if (!tmp_nbuf) {
1079 				tmp_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1080 							  DP_RX_MON_MAX_MONITOR_HEADER,
1081 							  DP_RX_MON_MAX_MONITOR_HEADER,
1082 							  4, FALSE);
1083 				if (qdf_unlikely(!tmp_nbuf)) {
1084 					dp_mon_err("nbuf is NULL");
1085 					qdf_assert_always(0);
1086 				}
1087 				mon_pdev->rx_mon_stats.parent_buf_alloc++;
1088 				/* add new skb to frag list */
1089 				qdf_nbuf_append_ext_list(nbuf, tmp_nbuf,
1090 							 qdf_nbuf_len(tmp_nbuf));
1091 			}
1092 			dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, status_frag,
1093 						   ppdu_info->hdr_len - DP_RX_MON_RX_HDR_OFFSET,
1094 						   ppdu_info->data - (unsigned char *)status_frag + 4,
1095 						   DP_MON_DATA_BUFFER_SIZE,
1096 						   true);
1097 		}
1098 	}
1099 	break;
1100 	case HAL_TLV_STATUS_MON_BUF_ADDR:
1101 	{
1102 		struct hal_rx_mon_msdu_info *buf_info;
1103 		struct hal_mon_packet_info *packet_info = &ppdu_info->packet_info;
1104 		struct dp_mon_desc *mon_desc = (struct dp_mon_desc *)(uintptr_t)ppdu_info->packet_info.sw_cookie;
1105 		struct hal_rx_mon_mpdu_info *mpdu_info;
1106 		uint16_t frag_idx = 0;
1107 
1108 		qdf_assert_always(mon_desc);
1109 
1110 		if (mon_desc->magic != DP_MON_DESC_MAGIC)
1111 			qdf_assert_always(0);
1112 
1113 		addr = mon_desc->buf_addr;
1114 		qdf_assert_always(addr);
1115 
1116 		mpdu_info = &ppdu_info->mpdu_info[user_id];
1117 		if (!mon_desc->unmapped) {
1118 			qdf_mem_unmap_page(soc->osdev,
1119 					   (qdf_dma_addr_t)mon_desc->paddr,
1120 				   DP_MON_DATA_BUFFER_SIZE,
1121 					   QDF_DMA_FROM_DEVICE);
1122 			mon_desc->unmapped = 1;
1123 		}
1124 		dp_mon_add_to_free_desc_list(desc_list, tail, mon_desc);
1125 		num_buf_reaped++;
1126 
1127 		mon_pdev->rx_mon_stats.pkt_buf_count++;
1128 		nbuf = ppdu_info->mpdu_q[user_id][mpdu_idx];
1129 
1130 		if (qdf_unlikely(!nbuf)) {
1131 
1132 			/* WAR: RX_HDR is not received for this MPDU, drop this frame */
1133 			mon_pdev->rx_mon_stats.rx_hdr_not_received++;
1134 			DP_STATS_INC(mon_soc, frag_free, 1);
1135 			qdf_frag_free(addr);
1136 			return num_buf_reaped;
1137 		}
1138 
1139 		if (mpdu_info->decap_type == DP_MON_DECAP_FORMAT_INVALID) {
1140 			/* decap type is invalid, drop the frame */
1141 			mon_pdev->rx_mon_stats.mpdu_decap_type_invalid++;
1142 			DP_STATS_INC(mon_soc, frag_free, 1);
1143 			mon_pdev->rx_mon_stats.parent_buf_free++;
1144 			qdf_frag_free(addr);
1145 			qdf_nbuf_free(nbuf);
1146 			/* we have freed the nbuf mark the q entry null */
1147 			ppdu_info->mpdu_q[user_id][mpdu_idx] = NULL;
1148 			return num_buf_reaped;
1149 		}
1150 
1151 		tmp_nbuf = qdf_get_nbuf_valid_frag(nbuf);
1152 
1153 		if (!tmp_nbuf) {
1154 			tmp_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
1155 						  DP_RX_MON_MAX_MONITOR_HEADER,
1156 						  DP_RX_MON_MAX_MONITOR_HEADER,
1157 						  4, FALSE);
1158 			if (qdf_unlikely(!tmp_nbuf)) {
1159 				dp_mon_err("nbuf is NULL");
1160 				DP_STATS_INC(mon_soc, frag_free, 1);
1161 				mon_pdev->rx_mon_stats.parent_buf_free++;
1162 				qdf_frag_free(addr);
1163 				qdf_nbuf_free(nbuf);
1164 				ppdu_info->mpdu_q[user_id][mpdu_idx] = NULL;
1165 				return num_buf_reaped;
1166 			}
1167 			mon_pdev->rx_mon_stats.parent_buf_alloc++;
1168 			/* add new skb to frag list */
1169 			qdf_nbuf_append_ext_list(nbuf, tmp_nbuf,
1170 						 qdf_nbuf_len(tmp_nbuf));
1171 		}
1172 		mpdu_info->full_pkt = true;
1173 
1174 		if (mpdu_info->decap_type == HAL_HW_RX_DECAP_FORMAT_RAW) {
1175 			if (mpdu_info->first_rx_hdr_rcvd) {
1176 				qdf_nbuf_remove_frag(nbuf, frag_idx, DP_MON_DATA_BUFFER_SIZE);
1177 				dp_rx_mon_nbuf_add_rx_frag(nbuf, addr,
1178 							   packet_info->dma_length,
1179 							   DP_RX_MON_PACKET_OFFSET,
1180 							   DP_MON_DATA_BUFFER_SIZE,
1181 							   false);
1182 				DP_STATS_INC(mon_soc, frag_free, 1);
1183 				mpdu_info->first_rx_hdr_rcvd = false;
1184 			} else {
1185 				dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, addr,
1186 							   packet_info->dma_length,
1187 							   DP_RX_MON_PACKET_OFFSET,
1188 							   DP_MON_DATA_BUFFER_SIZE,
1189 							   false);
1190 				DP_STATS_INC(mon_soc, frag_free, 1);
1191 			}
1192 		} else {
1193 			dp_rx_mon_nbuf_add_rx_frag(tmp_nbuf, addr,
1194 						   packet_info->dma_length,
1195 						   DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE +
1196 						   DP_RX_MON_PACKET_OFFSET,
1197 						   DP_MON_DATA_BUFFER_SIZE,
1198 						   false);
1199 				DP_STATS_INC(mon_soc, frag_free, 1);
1200 			buf_info = addr;
1201 
1202 			if (!ppdu_info->msdu[user_id].first_buffer) {
1203 				buf_info->first_buffer = true;
1204 				ppdu_info->msdu[user_id].first_buffer = true;
1205 			} else {
1206 				buf_info->first_buffer = false;
1207 			}
1208 
1209 			if (packet_info->msdu_continuation)
1210 				buf_info->last_buffer = false;
1211 			else
1212 				buf_info->last_buffer = true;
1213 
1214 			buf_info->frag_len = packet_info->dma_length;
1215 		}
1216 		if (qdf_unlikely(packet_info->truncated))
1217 			mpdu_info->truncated = true;
1218 	}
1219 	break;
1220 	case HAL_TLV_STATUS_MSDU_END:
1221 	{
1222 		struct hal_rx_mon_msdu_info *msdu_info = &ppdu_info->msdu[user_id];
1223 		struct hal_rx_mon_msdu_info *last_buf_info;
1224 		/* update msdu metadata at last buffer of msdu in MPDU */
1225 		nbuf = ppdu_info->mpdu_q[user_id][mpdu_idx];
1226 		if (!nbuf) {
1227 			/* reset msdu info for next msdu for same user */
1228 			qdf_mem_zero(msdu_info, sizeof(*msdu_info));
1229 			dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d",
1230 				     __LINE__, user_id, mpdu_idx);
1231 			break;
1232 		}
1233 		num_frags = qdf_nbuf_get_nr_frags(nbuf);
1234 		if (ppdu_info->mpdu_info[user_id].decap_type ==
1235 				HAL_HW_RX_DECAP_FORMAT_RAW) {
1236 			break;
1237 		}
1238 		/* This points to last buffer of MSDU . update metadata here */
1239 		addr = qdf_nbuf_get_frag_addr(nbuf, num_frags - 1) -
1240 					      (DP_RX_MON_PACKET_OFFSET +
1241 					       DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE);
1242 		last_buf_info = addr;
1243 
1244 		last_buf_info->first_msdu = msdu_info->first_msdu;
1245 		last_buf_info->last_msdu = msdu_info->last_msdu;
1246 		last_buf_info->decap_type = msdu_info->decap_type;
1247 		last_buf_info->msdu_index = msdu_info->msdu_index;
1248 		last_buf_info->user_rssi = msdu_info->user_rssi;
1249 		last_buf_info->reception_type = msdu_info->reception_type;
1250 		last_buf_info->msdu_len = msdu_info->msdu_len;
1251 
1252 		dp_rx_mon_pf_tag_to_buf_headroom_2_0(nbuf, ppdu_info, pdev,
1253 						     soc);
1254 		/* reset msdu info for next msdu for same user */
1255 		qdf_mem_zero(msdu_info, sizeof(*msdu_info));
1256 
1257 		/* If flow classification is enabled,
1258 		 * update cce_metadata and fse_metadata
1259 		 */
1260 	}
1261 	break;
1262 	case HAL_TLV_STATUS_MPDU_START:
1263 	{
1264 		struct hal_rx_mon_mpdu_info *mpdu_info, *mpdu_meta;
1265 
1266 		nbuf = ppdu_info->mpdu_q[user_id][mpdu_idx];
1267 		if (!nbuf) {
1268 			dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d",
1269 				     __LINE__, user_id, mpdu_idx);
1270 			break;
1271 		}
1272 		mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(nbuf);
1273 		mpdu_info = &ppdu_info->mpdu_info[user_id];
1274 		mpdu_meta->decap_type = mpdu_info->decap_type;
1275 		ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = true;
1276 	break;
1277 	}
1278 	case HAL_TLV_STATUS_MPDU_END:
1279 	{
1280 		struct hal_rx_mon_mpdu_info *mpdu_info, *mpdu_meta;
1281 		mpdu_info = &ppdu_info->mpdu_info[user_id];
1282 		nbuf = ppdu_info->mpdu_q[user_id][mpdu_idx];
1283 		if (!nbuf) {
1284 			/* reset mpdu info for next mpdu for same user */
1285 			qdf_mem_zero(mpdu_info, sizeof(*mpdu_info));
1286 			dp_mon_debug(" <%d> nbuf is NULL, return user: %d mpdu_idx: %d",
1287 				     __LINE__, user_id, mpdu_idx);
1288 			break;
1289 		}
1290 		mpdu_meta = (struct hal_rx_mon_mpdu_info *)qdf_nbuf_data(nbuf);
1291 		mpdu_meta->mpdu_length_err = mpdu_info->mpdu_length_err;
1292 		mpdu_meta->fcs_err = mpdu_info->fcs_err;
1293 		ppdu_info->rx_status.rs_fcs_err = mpdu_info->fcs_err;
1294 		mpdu_meta->overflow_err = mpdu_info->overflow_err;
1295 		mpdu_meta->decrypt_err = mpdu_info->decrypt_err;
1296 		mpdu_meta->full_pkt = mpdu_info->full_pkt;
1297 		mpdu_meta->truncated = mpdu_info->truncated;
1298 
1299 		ppdu_info->mpdu_q[user_id][mpdu_idx] = nbuf;
1300 		/* reset mpdu info for next mpdu for same user */
1301 		qdf_mem_zero(mpdu_info, sizeof(*mpdu_info));
1302 		ppdu_info->mpdu_info[ppdu_info->user_id].mpdu_start_received = false;
1303 		ppdu_info->mpdu_count[user_id]++;
1304 	}
1305 	break;
1306 	}
1307 	return num_buf_reaped;
1308 }
1309 
1310 /**
1311  * dp_rx_mon_process_status_tlv () - Handle mon status process TLV
1312  *
1313  * @pdev: DP pdev handle
1314  *
1315  * Return
1316  */
1317 static inline struct hal_rx_ppdu_info *
1318 dp_rx_mon_process_status_tlv(struct dp_pdev *pdev)
1319 {
1320 	struct dp_soc *soc = pdev->soc;
1321 	struct dp_mon_pdev *mon_pdev = pdev->monitor_pdev;
1322 	struct dp_mon_pdev_be *mon_pdev_be =
1323 			dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1324 	union dp_mon_desc_list_elem_t *desc_list = NULL;
1325 	union dp_mon_desc_list_elem_t *tail = NULL;
1326 	struct dp_mon_desc *mon_desc;
1327 	uint8_t idx;
1328 	void *buf;
1329 	struct hal_rx_ppdu_info *ppdu_info;
1330 	uint8_t *rx_tlv;
1331 	uint8_t *rx_tlv_start;
1332 	uint16_t end_offset = 0;
1333 	uint16_t tlv_status = HAL_TLV_STATUS_BUF_DONE;
1334 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1335 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1336 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1337 	uint8_t work_done = 0;
1338 	uint16_t status_buf_count;
1339 
1340 	if (!mon_pdev_be->desc_count) {
1341 		dp_mon_err("no of status buffer count is zero: %pK", pdev);
1342 		return NULL;
1343 	}
1344 
1345 	ppdu_info = &mon_pdev->ppdu_info;
1346 
1347 	if (!ppdu_info) {
1348 		dp_mon_err("ppdu_info malloc failed pdev: %pK", pdev);
1349 		return NULL;
1350 	}
1351 
1352 	status_buf_count = mon_pdev_be->desc_count;
1353 	for (idx = 0; idx < status_buf_count; idx++) {
1354 		mon_desc = mon_pdev_be->status[idx];
1355 		if (!mon_desc) {
1356 			qdf_assert_always(0);
1357 			return NULL;
1358 		}
1359 
1360 		buf = mon_desc->buf_addr;
1361 		end_offset = mon_desc->end_offset;
1362 
1363 		dp_mon_add_to_free_desc_list(&desc_list, &tail, mon_desc);
1364 		work_done++;
1365 
1366 		rx_tlv = buf;
1367 		rx_tlv_start = buf;
1368 
1369 		do {
1370 			tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
1371 								ppdu_info,
1372 								pdev->soc->hal_soc,
1373 								buf);
1374 
1375 			work_done += dp_rx_mon_process_tlv_status(pdev,
1376 								  ppdu_info,
1377 								  buf,
1378 								  tlv_status,
1379 								  &desc_list,
1380 								  &tail);
1381 			rx_tlv = hal_rx_status_get_next_tlv(rx_tlv, 1);
1382 
1383 			/* HW provides end_offset (how many bytes HW DMA'ed)
1384 			 * as part of descriptor, use this as delimiter for
1385 			 * status buffer
1386 			 */
1387 			if ((rx_tlv - rx_tlv_start) >= (end_offset + 1))
1388 				break;
1389 
1390 	} while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) ||
1391 			(tlv_status == HAL_TLV_STATUS_HEADER) ||
1392 			(tlv_status == HAL_TLV_STATUS_MPDU_END) ||
1393 			(tlv_status == HAL_TLV_STATUS_MSDU_END) ||
1394 			(tlv_status == HAL_TLV_STATUS_MON_BUF_ADDR) ||
1395 			(tlv_status == HAL_TLV_STATUS_MPDU_START));
1396 
1397 		/* set status buffer pointer to NULL */
1398 		mon_pdev_be->status[idx] = NULL;
1399 		mon_pdev_be->desc_count--;
1400 
1401 		qdf_frag_free(buf);
1402 		DP_STATS_INC(mon_soc, frag_free, 1);
1403 		mon_pdev->rx_mon_stats.status_buf_count++;
1404 	}
1405 
1406 	if (work_done) {
1407 		mon_pdev->rx_mon_stats.mon_rx_bufs_replenished_dest +=
1408 				work_done;
1409 		dp_mon_buffers_replenish(soc, &soc->rxdma_mon_buf_ring[0],
1410 					 rx_mon_desc_pool,
1411 					 work_done,
1412 					 &desc_list, &tail, NULL);
1413 	}
1414 
1415 	ppdu_info->rx_status.tsft = ppdu_info->rx_status.tsft +
1416 				    pdev->timestamp.mlo_offset_lo_us +
1417 				    ((uint64_t)pdev->timestamp.mlo_offset_hi_us
1418 				    << 32);
1419 
1420 	return ppdu_info;
1421 }
1422 
1423 /**
1424  * dp_rx_mon_update_peer_id() - Update sw_peer_id with link peer_id
1425  *
1426  * @pdev: DP pdev handle
1427  * @ppdu_info: HAL PPDU Info buffer
1428  *
1429  * Return: none
1430  */
1431 #ifdef WLAN_FEATURE_11BE_MLO
1432 #define DP_PEER_ID_MASK 0x3FFF
1433 static inline
1434 void dp_rx_mon_update_peer_id(struct dp_pdev *pdev,
1435 			      struct hal_rx_ppdu_info *ppdu_info)
1436 {
1437 	uint32_t i;
1438 	uint16_t peer_id;
1439 	struct dp_soc *soc = pdev->soc;
1440 	uint32_t num_users = ppdu_info->com_info.num_users;
1441 
1442 	for (i = 0; i < num_users; i++) {
1443 		peer_id = ppdu_info->rx_user_status[i].sw_peer_id;
1444 		if (peer_id == HTT_INVALID_PEER)
1445 			continue;
1446 		/*
1447 		+---------------------------------------------------------------------+
1448 		| 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
1449 		+---------------------------------------------------------------------+
1450 		| CHIP ID | ML |                     PEER ID                          |
1451 		+---------------------------------------------------------------------+
1452 		*/
1453 		peer_id &= DP_PEER_ID_MASK;
1454 		peer_id = dp_get_link_peer_id_by_lmac_id(soc, peer_id,
1455 							 pdev->lmac_id);
1456 		ppdu_info->rx_user_status[i].sw_peer_id = peer_id;
1457 	}
1458 }
1459 #else
1460 static inline
1461 void dp_rx_mon_update_peer_id(struct dp_pdev *pdev,
1462 			      struct hal_rx_ppdu_info *ppdu_info)
1463 {
1464 }
1465 #endif
1466 
1467 static inline uint32_t
1468 dp_rx_mon_srng_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
1469 			   uint32_t mac_id, uint32_t quota)
1470 {
1471 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
1472 	struct dp_mon_pdev *mon_pdev;
1473 	struct dp_mon_pdev_be *mon_pdev_be;
1474 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1475 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1476 	struct dp_mon_desc_pool *rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1477 	hal_soc_handle_t hal_soc = soc->hal_soc;
1478 	void *rx_mon_dst_ring_desc;
1479 	void *mon_dst_srng;
1480 	uint32_t work_done = 0;
1481 	struct hal_rx_ppdu_info *ppdu_info = NULL;
1482 	QDF_STATUS status;
1483 
1484 	if (!pdev) {
1485 		dp_mon_err("%pK: pdev is null for mac_id = %d", soc, mac_id);
1486 		return work_done;
1487 	}
1488 
1489 	mon_pdev = pdev->monitor_pdev;
1490 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1491 	mon_dst_srng = soc->rxdma_mon_dst_ring[mac_id].hal_srng;
1492 
1493 	if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) {
1494 		dp_mon_err("%pK: : HAL Monitor Destination Ring Init Failed -- %pK",
1495 			   soc, mon_dst_srng);
1496 		return work_done;
1497 	}
1498 
1499 	hal_soc = soc->hal_soc;
1500 
1501 	qdf_assert((hal_soc && pdev));
1502 
1503 	qdf_spin_lock_bh(&mon_pdev->mon_lock);
1504 
1505 	if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_dst_srng))) {
1506 		dp_mon_err("%s %d : HAL Mon Dest Ring access Failed -- %pK",
1507 			   __func__, __LINE__, mon_dst_srng);
1508 		qdf_spin_unlock_bh(&mon_pdev->mon_lock);
1509 		return work_done;
1510 	}
1511 
1512 	while (qdf_likely((rx_mon_dst_ring_desc =
1513 			  (void *)hal_srng_dst_peek(hal_soc, mon_dst_srng))
1514 				&& quota--)) {
1515 		struct hal_mon_desc hal_mon_rx_desc = {0};
1516 		struct dp_mon_desc *mon_desc;
1517 		hal_be_get_mon_dest_status(soc->hal_soc,
1518 					   rx_mon_dst_ring_desc,
1519 					   &hal_mon_rx_desc);
1520 		/* If it's empty descriptor, skip processing
1521 		 * and process next hW desc
1522 		 */
1523 		if (hal_mon_rx_desc.empty_descriptor == 1) {
1524 			dp_mon_debug("empty descriptor found mon_pdev: %pK",
1525 				     mon_pdev);
1526 			rx_mon_dst_ring_desc =
1527 				hal_srng_dst_get_next(hal_soc, mon_dst_srng);
1528 			mon_pdev->rx_mon_stats.empty_desc_ppdu++;
1529 			continue;
1530 		}
1531 		mon_desc = (struct dp_mon_desc *)(uintptr_t)(hal_mon_rx_desc.buf_addr);
1532 		qdf_assert_always(mon_desc);
1533 
1534 		if ((mon_desc == mon_pdev_be->prev_rxmon_desc) &&
1535 		    (mon_desc->cookie == mon_pdev_be->prev_rxmon_cookie)) {
1536 			dp_mon_err("duplicate descritout found mon_pdev: %pK mon_desc: %pK cookie: %d",
1537 				   mon_pdev, mon_desc, mon_desc->cookie);
1538 			mon_pdev->rx_mon_stats.dup_mon_buf_cnt++;
1539 			hal_srng_dst_get_next(hal_soc, mon_dst_srng);
1540 			continue;
1541 		}
1542 		mon_pdev_be->prev_rxmon_desc = mon_desc;
1543 		mon_pdev_be->prev_rxmon_cookie = mon_desc->cookie;
1544 
1545 		if (!mon_desc->unmapped) {
1546 			qdf_mem_unmap_page(soc->osdev, mon_desc->paddr,
1547 					   rx_mon_desc_pool->buf_size,
1548 					   QDF_DMA_FROM_DEVICE);
1549 			mon_desc->unmapped = 1;
1550 		}
1551 		mon_desc->end_offset = hal_mon_rx_desc.end_offset;
1552 
1553 		/* Flush and truncated status buffers content
1554 		 * need to discarded
1555 		 */
1556 		if (hal_mon_rx_desc.end_reason == HAL_MON_FLUSH_DETECTED ||
1557 		    hal_mon_rx_desc.end_reason == HAL_MON_PPDU_TRUNCATED) {
1558 			dp_mon_debug("end_resaon: %d mon_pdev: %pK",
1559 				     hal_mon_rx_desc.end_reason, mon_pdev);
1560 			mon_pdev->rx_mon_stats.status_ppdu_drop++;
1561 			dp_rx_mon_handle_flush_n_trucated_ppdu(soc,
1562 							       pdev,
1563 							       mon_desc);
1564 			rx_mon_dst_ring_desc = hal_srng_dst_get_next(hal_soc,
1565 							mon_dst_srng);
1566 			continue;
1567 		}
1568 		if (mon_pdev_be->desc_count >= DP_MON_MAX_STATUS_BUF)
1569 			qdf_assert_always(0);
1570 
1571 		mon_pdev_be->status[mon_pdev_be->desc_count++] = mon_desc;
1572 
1573 		rx_mon_dst_ring_desc = hal_srng_dst_get_next(hal_soc, mon_dst_srng);
1574 
1575 		status = dp_rx_process_pktlog_be(soc, pdev, ppdu_info,
1576 						 mon_desc->buf_addr,
1577 						 hal_mon_rx_desc.end_offset);
1578 
1579 		if (hal_mon_rx_desc.end_reason == HAL_MON_STATUS_BUFFER_FULL)
1580 			continue;
1581 
1582 		mon_pdev->rx_mon_stats.status_ppdu_done++;
1583 
1584 		ppdu_info = dp_rx_mon_process_status_tlv(pdev);
1585 
1586 		if (ppdu_info)
1587 			dp_rx_mon_update_peer_id(pdev, ppdu_info);
1588 
1589 		/* Call enhanced stats update API */
1590 		if (mon_pdev->enhanced_stats_en && ppdu_info)
1591 			dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info);
1592 		else if (dp_cfr_rcc_mode_status(pdev) && ppdu_info)
1593 			dp_rx_handle_cfr(soc, pdev, ppdu_info);
1594 
1595 		dp_rx_mon_process_ppdu_info(pdev, ppdu_info);
1596 
1597 		work_done++;
1598 
1599 		/* desc_count should be zero  after PPDU status processing */
1600 		if (mon_pdev_be->desc_count > 0)
1601 			qdf_assert_always(0);
1602 
1603 		mon_pdev_be->desc_count = 0;
1604 	}
1605 	dp_srng_access_end(int_ctx, soc, mon_dst_srng);
1606 
1607 	qdf_spin_unlock_bh(&mon_pdev->mon_lock);
1608 	dp_mon_info("mac_id: %d, work_done:%d", mac_id, work_done);
1609 	return work_done;
1610 }
1611 
1612 uint32_t
1613 dp_rx_mon_process_2_0(struct dp_soc *soc, struct dp_intr *int_ctx,
1614 		      uint32_t mac_id, uint32_t quota)
1615 {
1616 	uint32_t work_done;
1617 
1618 	work_done = dp_rx_mon_srng_process_2_0(soc, int_ctx, mac_id, quota);
1619 
1620 	return work_done;
1621 }
1622 
1623 void
1624 dp_rx_mon_buf_desc_pool_deinit(struct dp_soc *soc)
1625 {
1626 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1627 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1628 
1629 	/* Drain page frag cachce before pool deinit */
1630 	qdf_frag_cache_drain(&mon_soc_be->rx_desc_mon.pf_cache);
1631 	dp_mon_desc_pool_deinit(&mon_soc_be->rx_desc_mon);
1632 }
1633 
1634 QDF_STATUS
1635 dp_rx_mon_buf_desc_pool_init(struct dp_soc *soc)
1636 {
1637 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1638 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1639 	uint32_t num_entries;
1640 
1641 	num_entries =
1642 		wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc->wlan_cfg_ctx);
1643 	return dp_mon_desc_pool_init(&mon_soc_be->rx_desc_mon, num_entries);
1644 }
1645 
1646 void dp_rx_mon_buf_desc_pool_free(struct dp_soc *soc)
1647 {
1648 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1649 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1650 
1651 	if (mon_soc)
1652 		dp_mon_desc_pool_free(&mon_soc_be->rx_desc_mon);
1653 }
1654 
1655 QDF_STATUS
1656 dp_rx_mon_buf_desc_pool_alloc(struct dp_soc *soc)
1657 {
1658 	struct dp_srng *mon_buf_ring;
1659 	struct dp_mon_desc_pool *rx_mon_desc_pool;
1660 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1661 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1662 	int entries;
1663 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
1664 
1665 	soc_cfg_ctx = soc->wlan_cfg_ctx;
1666 
1667 	entries = wlan_cfg_get_dp_soc_rx_mon_buf_ring_size(soc_cfg_ctx);
1668 	mon_buf_ring = &soc->rxdma_mon_buf_ring[0];
1669 
1670 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1671 
1672 	qdf_print("%s:%d rx mon buf desc pool entries: %d", __func__, __LINE__, entries);
1673 	return dp_mon_desc_pool_alloc(entries, rx_mon_desc_pool);
1674 }
1675 
1676 void
1677 dp_rx_mon_buffers_free(struct dp_soc *soc)
1678 {
1679 	struct dp_mon_desc_pool *rx_mon_desc_pool;
1680 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1681 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1682 
1683 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1684 
1685 	dp_mon_pool_frag_unmap_and_free(soc, rx_mon_desc_pool);
1686 }
1687 
1688 QDF_STATUS
1689 dp_rx_mon_buffers_alloc(struct dp_soc *soc, uint32_t size)
1690 {
1691 	struct dp_srng *mon_buf_ring;
1692 	struct dp_mon_desc_pool *rx_mon_desc_pool;
1693 	union dp_mon_desc_list_elem_t *desc_list = NULL;
1694 	union dp_mon_desc_list_elem_t *tail = NULL;
1695 	struct dp_mon_soc *mon_soc = soc->monitor_soc;
1696 	struct dp_mon_soc_be *mon_soc_be = dp_get_be_mon_soc_from_dp_mon_soc(mon_soc);
1697 
1698 	mon_buf_ring = &soc->rxdma_mon_buf_ring[0];
1699 
1700 	rx_mon_desc_pool = &mon_soc_be->rx_desc_mon;
1701 
1702 	return dp_mon_buffers_replenish(soc, mon_buf_ring,
1703 					rx_mon_desc_pool,
1704 					size,
1705 					&desc_list, &tail, NULL);
1706 }
1707 
1708 #ifdef QCA_ENHANCED_STATS_SUPPORT
1709 void
1710 dp_rx_mon_populate_ppdu_usr_info_2_0(struct mon_rx_user_status *rx_user_status,
1711 				     struct cdp_rx_stats_ppdu_user *ppdu_user)
1712 {
1713 	ppdu_user->mpdu_retries = rx_user_status->retry_mpdu;
1714 }
1715 
1716 #ifdef WLAN_FEATURE_11BE
1717 void dp_rx_mon_stats_update_2_0(struct dp_mon_peer *mon_peer,
1718 				struct cdp_rx_indication_ppdu *ppdu,
1719 				struct cdp_rx_stats_ppdu_user *ppdu_user)
1720 {
1721 	uint8_t mcs, preamble, ppdu_type, punc_mode;
1722 	uint32_t num_msdu;
1723 
1724 	preamble = ppdu->u.preamble;
1725 	ppdu_type = ppdu->u.ppdu_type;
1726 	num_msdu = ppdu_user->num_msdu;
1727 	punc_mode = ppdu->punc_bw;
1728 
1729 	if (ppdu_type == HAL_RX_TYPE_SU)
1730 		mcs = ppdu->u.mcs;
1731 	else
1732 		mcs = ppdu_user->mcs;
1733 
1734 	DP_STATS_INC(mon_peer, rx.mpdu_retry_cnt, ppdu_user->mpdu_retries);
1735 	DP_STATS_INC(mon_peer, rx.punc_bw[punc_mode], num_msdu);
1736 	DP_STATS_INCC(mon_peer,
1737 		      rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
1738 		      ((mcs >= MAX_MCS_11BE) && (preamble == DOT11_BE)));
1739 	DP_STATS_INCC(mon_peer,
1740 		      rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
1741 		      ((mcs < MAX_MCS_11BE) && (preamble == DOT11_BE)));
1742 	DP_STATS_INCC(mon_peer,
1743 		      rx.su_be_ppdu_cnt.mcs_count[MAX_MCS - 1], 1,
1744 		      ((mcs >= (MAX_MCS_11BE)) && (preamble == DOT11_BE) &&
1745 		      (ppdu_type == HAL_RX_TYPE_SU)));
1746 	DP_STATS_INCC(mon_peer,
1747 		      rx.su_be_ppdu_cnt.mcs_count[mcs], 1,
1748 		      ((mcs < (MAX_MCS_11BE)) && (preamble == DOT11_BE) &&
1749 		      (ppdu_type == HAL_RX_TYPE_SU)));
1750 	DP_STATS_INCC(mon_peer,
1751 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[MAX_MCS - 1],
1752 		      1, ((mcs >= (MAX_MCS_11BE)) &&
1753 		      (preamble == DOT11_BE) &&
1754 		      (ppdu_type == HAL_RX_TYPE_MU_OFDMA)));
1755 	DP_STATS_INCC(mon_peer,
1756 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_OFDMA].mcs_count[mcs],
1757 		      1, ((mcs < (MAX_MCS_11BE)) &&
1758 		      (preamble == DOT11_BE) &&
1759 		      (ppdu_type == HAL_RX_TYPE_MU_OFDMA)));
1760 	DP_STATS_INCC(mon_peer,
1761 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[MAX_MCS - 1],
1762 		      1, ((mcs >= (MAX_MCS_11BE)) &&
1763 		      (preamble == DOT11_BE) &&
1764 		      (ppdu_type == HAL_RX_TYPE_MU_MIMO)));
1765 	DP_STATS_INCC(mon_peer,
1766 		      rx.mu_be_ppdu_cnt[TXRX_TYPE_MU_MIMO].mcs_count[mcs],
1767 		      1, ((mcs < (MAX_MCS_11BE)) &&
1768 		      (preamble == DOT11_BE) &&
1769 		      (ppdu_type == HAL_RX_TYPE_MU_MIMO)));
1770 }
1771 
1772 void
1773 dp_rx_mon_populate_ppdu_info_2_0(struct hal_rx_ppdu_info *hal_ppdu_info,
1774 				 struct cdp_rx_indication_ppdu *ppdu)
1775 {
1776 	uint16_t puncture_pattern;
1777 	enum cdp_punctured_modes punc_mode;
1778 
1779 	/* Align bw value as per host data structures */
1780 	if (hal_ppdu_info->rx_status.bw == HAL_FULL_RX_BW_320)
1781 		ppdu->u.bw = CMN_BW_320MHZ;
1782 	else
1783 		ppdu->u.bw = hal_ppdu_info->rx_status.bw;
1784 	if (hal_ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11BE) {
1785 		/* Align preamble value as per host data structures */
1786 		ppdu->u.preamble = DOT11_BE;
1787 		ppdu->u.stbc = hal_ppdu_info->rx_status.is_stbc;
1788 		ppdu->u.dcm = hal_ppdu_info->rx_status.dcm;
1789 	} else {
1790 		ppdu->u.preamble = hal_ppdu_info->rx_status.preamble_type;
1791 	}
1792 
1793 	puncture_pattern = hal_ppdu_info->rx_status.punctured_pattern;
1794 	punc_mode = dp_mon_get_puncture_type(puncture_pattern,
1795 					     ppdu->u.bw);
1796 	ppdu->punc_bw = punc_mode;
1797 }
1798 #else
1799 void dp_rx_mon_stats_update_2_0(struct dp_mon_peer *mon_peer,
1800 				struct cdp_rx_indication_ppdu *ppdu,
1801 				struct cdp_rx_stats_ppdu_user *ppdu_user)
1802 {
1803 	DP_STATS_INC(mon_peer, rx.mpdu_retry_cnt, ppdu_user->mpdu_retries);
1804 }
1805 
1806 void
1807 dp_rx_mon_populate_ppdu_info_2_0(struct hal_rx_ppdu_info *hal_ppdu_info,
1808 				 struct cdp_rx_indication_ppdu *ppdu)
1809 {
1810 	ppdu->punc_bw = NO_PUNCTURE;
1811 }
1812 #endif
1813 #endif
1814