xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/2.0/dp_tx_mon_status_2.0.c (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 #include "dp_types.h"
18 #include "qdf_nbuf.h"
19 #include "dp_internal.h"
20 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
21 #include <dp_be.h>
22 #include <qdf_nbuf_frag.h>
23 #include <hal_be_api_mon.h>
24 #include <dp_mon.h>
25 #include <dp_tx_mon_2.0.h>
26 #include <dp_mon_2.0.h>
27 #include <dp_lite_mon.h>
28 
29 #define MAX_PPDU_INFO_LIST_DEPTH 64
30 
31 /**
32  * dp_tx_mon_status_free_packet_buf() - API to free packet buffer
33  * @pdev: pdev Handle
34  * @status_frag: status frag
35  * @end_offset: status fragment end offset
36  * @mon_desc_list_ref: tx monitor descriptor list reference
37  *
38  * Return: void
39  */
40 void
41 dp_tx_mon_status_free_packet_buf(struct dp_pdev *pdev,
42 				 qdf_frag_t status_frag, uint32_t end_offset,
43 				 struct dp_tx_mon_desc_list *mon_desc_list_ref)
44 {
45 	struct dp_mon_pdev *mon_pdev;
46 	struct dp_mon_pdev_be *mon_pdev_be;
47 	struct dp_pdev_tx_monitor_be *tx_mon_be;
48 	struct hal_mon_packet_info packet_info = {0};
49 	uint8_t *tx_tlv;
50 	uint8_t *mon_buf_tx_tlv;
51 	uint8_t *tx_tlv_start;
52 
53 	if (qdf_unlikely(!pdev))
54 		return;
55 
56 	mon_pdev = pdev->monitor_pdev;
57 	if (qdf_unlikely(!mon_pdev))
58 		return;
59 
60 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
61 	if (qdf_unlikely(!mon_pdev_be))
62 		return;
63 
64 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
65 	tx_tlv = status_frag;
66 	tx_tlv_start = tx_tlv;
67 	/*
68 	 * parse each status buffer and find packet buffer in it
69 	 */
70 	do {
71 		if (hal_txmon_is_mon_buf_addr_tlv(pdev->soc->hal_soc, tx_tlv)) {
72 			struct dp_mon_desc *mon_desc = NULL;
73 			qdf_frag_t packet_buffer = NULL;
74 
75 			mon_buf_tx_tlv = ((uint8_t *)tx_tlv +
76 					  HAL_RX_TLV64_HDR_SIZE);
77 			hal_txmon_populate_packet_info(pdev->soc->hal_soc,
78 						       mon_buf_tx_tlv,
79 						       &packet_info);
80 
81 			mon_desc = (struct dp_mon_desc *)(uintptr_t)packet_info.sw_cookie;
82 
83 			qdf_assert_always(mon_desc);
84 
85 			if (mon_desc->magic != DP_MON_DESC_MAGIC)
86 				qdf_assert_always(0);
87 
88 			if (!mon_desc->unmapped) {
89 				qdf_mem_unmap_page(pdev->soc->osdev,
90 						   (qdf_dma_addr_t)mon_desc->paddr,
91 						   DP_MON_DATA_BUFFER_SIZE,
92 						   QDF_DMA_FROM_DEVICE);
93 				mon_desc->unmapped = 1;
94 			}
95 
96 			packet_buffer = (qdf_frag_t)(mon_desc->buf_addr);
97 			mon_desc->buf_addr = NULL;
98 
99 			qdf_assert_always(packet_buffer);
100 			/* increment reap count */
101 			mon_desc_list_ref->tx_mon_reap_cnt++;
102 
103 			/* add the mon_desc to free list */
104 			dp_mon_add_to_free_desc_list(&mon_desc_list_ref->desc_list,
105 						     &mon_desc_list_ref->tail,
106 						     mon_desc);
107 
108 			tx_mon_be->stats.pkt_buf_recv++;
109 			tx_mon_be->stats.pkt_buf_free++;
110 
111 			/* free buffer, mapped to descriptor */
112 			qdf_frag_free(packet_buffer);
113 		}
114 
115 		/* need api definition for hal_tx_status_get_next_tlv */
116 		tx_tlv = hal_tx_status_get_next_tlv(tx_tlv);
117 	} while ((tx_tlv - tx_tlv_start) < end_offset);
118 }
119 
120 #if defined(WLAN_TX_PKT_CAPTURE_ENH_BE) && defined(QCA_MONITOR_2_0_SUPPORT)
121 /**
122  * dp_tx_mon_status_queue_free() - API to free status buffer
123  * @pdev: pdev Handle
124  * @tx_mon_be: pointer to tx_monitor_be
125  * @mon_desc_list_ref: tx monitor descriptor list reference
126  *
127  * Return: void
128  */
129 static void
130 dp_tx_mon_status_queue_free(struct dp_pdev *pdev,
131 			    struct dp_pdev_tx_monitor_be *tx_mon_be,
132 			    struct dp_tx_mon_desc_list *mon_desc_list_ref)
133 {
134 	uint8_t last_frag_q_idx = tx_mon_be->last_frag_q_idx;
135 	qdf_frag_t status_frag = NULL;
136 	uint8_t i = tx_mon_be->cur_frag_q_idx;
137 	uint32_t end_offset = 0;
138 
139 	for (; i < last_frag_q_idx; i++) {
140 		status_frag = tx_mon_be->frag_q_vec[i].frag_buf;
141 
142 		if (qdf_unlikely(!status_frag))
143 			continue;
144 
145 		end_offset = tx_mon_be->frag_q_vec[i].end_offset;
146 		dp_tx_mon_status_free_packet_buf(pdev, status_frag, end_offset,
147 						 mon_desc_list_ref);
148 		tx_mon_be->stats.status_buf_free++;
149 		qdf_frag_free(status_frag);
150 		tx_mon_be->frag_q_vec[i].frag_buf = NULL;
151 		tx_mon_be->frag_q_vec[i].end_offset = 0;
152 	}
153 	tx_mon_be->last_frag_q_idx = 0;
154 	tx_mon_be->cur_frag_q_idx = 0;
155 }
156 
157 /**
158  * dp_tx_mon_enqueue_mpdu_nbuf() - API to enqueue nbuf from per user mpdu queue
159  * @tx_ppdu_info: pointer to tx ppdu info structure
160  * @user_id: user index
161  * @mpdu_nbuf: nbuf to be enqueue
162  *
163  * Return: void
164  */
165 static void
166 dp_tx_mon_enqueue_mpdu_nbuf(struct dp_pdev *pdev,
167 			    struct dp_tx_ppdu_info *tx_ppdu_info,
168 			    uint8_t user_id, qdf_nbuf_t mpdu_nbuf)
169 {
170 	qdf_nbuf_t radiotap = NULL;
171 	/* enqueue mpdu_nbuf to the per user mpdu_q */
172 	qdf_nbuf_queue_t *usr_mpdu_q = NULL;
173 
174 	if (!TXMON_PPDU_HAL(tx_ppdu_info, rx_user_status) ||
175 	    !TXMON_PPDU_HAL(tx_ppdu_info, num_users))
176 		QDF_BUG(0);
177 
178 	usr_mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, user_id, mpdu_q);
179 
180 	radiotap = qdf_nbuf_alloc(pdev->soc->osdev, MAX_MONITOR_HEADER,
181 				  MAX_MONITOR_HEADER,
182 				  4, FALSE);
183 
184 	/* append ext list */
185 	qdf_nbuf_append_ext_list(radiotap, mpdu_nbuf, qdf_nbuf_len(mpdu_nbuf));
186 	qdf_nbuf_queue_add(usr_mpdu_q, radiotap);
187 }
188 
189 /*
190  * TX MONITOR
191  *
192  * frame format
193  * -------------------------------------------------------------------------
194  *  FUNC   | ToDS | FromDS | ADDRESS 1 | ADDRESS 2 | ADDRESS 3 | ADDRESS 4 |
195  *  ------------------------------------------------------------------------
196  *  IBSS   |  0   |    0   | DA        | SA        | BSSID     | NOT USED  |
197  *  TO AP  |  1   |    0   | BSSID     | SA        | DA        | NOT USED  |
198  *  From AP|  0   |    1   | DA        | BSSID     | SA        | NOT USED  |
199  *  WDS    |  1   |    1   | RA        | TA        | DA        | SA        |
200  *  ------------------------------------------------------------------------
201  *
202  *  HOST GENERATED FRAME:
203  *  =====================
204  *     1. RTS
205  *     2. CTS
206  *     3. ACK
207  *     4. BA
208  *     5. Multi STA BA
209  *
210  *  control frame
211  *  ------------------------------------------------------------
212  *  | protocol 2b | Type 2b | subtype 4b | ToDS 1b | FromDS 1b |
213  *                | Morefrag 1b | Retry 1b | pwr_mgmt 1b | More data 1b |
214  *                              | protected frm 1b | order 1b |
215  *  -----------------------------------------------------------
216  *  control frame originated from wireless station so ToDS = FromDS = 0,
217  *
218  *  RTS
219  *  ---------------------------------------------------------------------------
220  *  | FrameCtl 2 | Duration 2 | Receiver Address 6 | Transmit address 6 | FCS |
221  *  ---------------------------------------------------------------------------
222  *  subtype in FC is RTS - 1101
223  *  type in FC is control frame - 10
224  *
225  *  CTS
226  *  --------------------------------------------------------
227  *  | FrameCtl 2 | Duration 2 | Receiver Address 6 | FCS 4 |
228  *  --------------------------------------------------------
229  *  subtype in FC is CTS - 0011
230  *  type in FC is control frame - 10
231  *
232  *  ACK
233  *  --------------------------------------------------------
234  *  | FrameCtl 2 | Duration 2 | Receiver Address 6 | FCS 4 |
235  *  --------------------------------------------------------
236  *  subtype in FC is ACK - 1011
237  *  type in FC is control frame - 10
238  *
239  *  Block ACK
240  *  --------------------------------------------------------------------------
241  *  | FC 2 | Dur 2 | RA 6 | TA 6 | BA CTRL 2 | BA Information variable | FCS |
242  *  --------------------------------------------------------------------------
243  *
244  *	Block Ack control
245  *	---------------------------------------------------------------
246  *	| BA ACK POLICY B0 | BA TYPE B1-B4 | Rsv B5-B11 | TID B12-B15 |
247  *	---------------------------------------------------------------
248  *
249  *	BA ack policy
250  *	0 - Normal Ack
251  *	1 - No Ack
252  *
253  *	Block Ack Type
254  *	0     - Reserved
255  *	1     - extended compressed
256  *	2     - compressed
257  *	3     - Multi TID
258  *	4-5   - Reserved
259  *	6     - GCR
260  *	7-9   - Reserved
261  *	10    - GLK-GCR
262  *	11    - Multi-STA
263  *	12-15 - Reserved
264  *
265  *	Block Ack information
266  *	----------------------------------------------------------
267  *	| Block ack start seq ctrl 2 | Block ack bitmap variable |
268  *	----------------------------------------------------------
269  *
270  *	Multi STA Block Ack Information
271  *	-----------------------------------------------------------------
272  *	| Per STA TID info 2 | BA start seq ctrl 2 | BA bitmap variable |
273  *	-----------------------------------------------------------------
274  *
275  *		Per STA TID info
276  *		------------------------------------
277  *		| AID11 11b | Ack Type 1b | TID 4b |
278  *		------------------------------------
279  *		AID11 - 2045 means unassociated STA, then ACK Type and TID 0, 15
280  *
281  *		Mgmt/PS-POLL frame ack
282  *		Ack type - 1 and TID - 15, BA_seq_ctrl & BA_bitmap - not present
283  *
284  *		All ack context - with no bitmap (all AMPDU success)
285  *		Ack type - 1 and TID - 14, BA_seq_ctrl & BA_bitmap - not present
286  *
287  *		Block ack context
288  *		Ack type - 0 and  TID - 0~7 BA_seq_ctrl & BA_bitmap - present
289  *
290  *		Ack context
291  *		Ack type - 1 and TID - 0~7 BA_seq_ctrl & BA_bitmap - not present
292  *
293  *
294  */
295 
296 /**
297  * dp_tx_mon_generate_cts2self_frm() - API to generate cts2self frame
298  * @pdev: pdev Handle
299  * @tx_ppdu_info: pointer to tx ppdu info structure
300  *
301  * Return: void
302  */
303 static void
304 dp_tx_mon_generate_cts2self_frm(struct dp_pdev *pdev,
305 				struct dp_tx_ppdu_info *tx_ppdu_info)
306 {
307 	/* allocate and populate CTS/ CTS2SELF frame */
308 	/* enqueue 802.11 payload to per user mpdu_q */
309 	struct dp_mon_pdev *mon_pdev;
310 	struct dp_mon_pdev_be *mon_pdev_be;
311 	struct dp_pdev_tx_monitor_be *tx_mon_be;
312 	struct hal_tx_status_info *tx_status_info;
313 	uint16_t duration_le = 0;
314 	struct ieee80211_frame_min_one *wh_min = NULL;
315 	qdf_nbuf_t mpdu_nbuf = NULL;
316 
317 	/* sanity check */
318 	if (qdf_unlikely(!pdev))
319 		return;
320 
321 	mon_pdev = pdev->monitor_pdev;
322 	if (qdf_unlikely(!mon_pdev))
323 		return;
324 
325 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
326 	if (qdf_unlikely(!mon_pdev_be))
327 		return;
328 
329 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
330 	tx_status_info = &tx_mon_be->prot_status_info;
331 
332 	/*
333 	 * for radiotap we allocate new skb,
334 	 * so we don't need reserver skb header
335 	 */
336 	mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
337 				   MAX_DUMMY_FRM_BODY, 0, 4, FALSE);
338 	if (!mpdu_nbuf)
339 		return;
340 
341 	wh_min = (struct ieee80211_frame_min_one *)qdf_nbuf_data(mpdu_nbuf);
342 	qdf_mem_zero(wh_min, MAX_DUMMY_FRM_BODY);
343 
344 	wh_min->i_fc[1] = 0;
345 	wh_min->i_fc[0] = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL |
346 			   IEEE80211_FC0_SUBTYPE_CTS);
347 	duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration));
348 	wh_min->i_dur[1] = (duration_le & 0xFF00) >> 8;
349 	wh_min->i_dur[0] = (duration_le & 0xFF);
350 
351 	qdf_mem_copy(wh_min->i_addr1,
352 		     TXMON_STATUS_INFO(tx_status_info, addr1),
353 		     QDF_MAC_ADDR_SIZE);
354 
355 	qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_min));
356 	dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf);
357 	TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1;
358 }
359 
360 /**
361  * dp_tx_mon_generate_rts_frm() - API to generate rts frame
362  * @pdev: pdev Handle
363  * @tx_ppdu_info: pointer to tx ppdu info structure
364  *
365  * Return: void
366  */
367 static void
368 dp_tx_mon_generate_rts_frm(struct dp_pdev *pdev,
369 			   struct dp_tx_ppdu_info *tx_ppdu_info)
370 {
371 	/* allocate and populate RTS frame */
372 	/* enqueue 802.11 payload to per user mpdu_q */
373 	struct dp_mon_pdev *mon_pdev;
374 	struct dp_mon_pdev_be *mon_pdev_be;
375 	struct dp_pdev_tx_monitor_be *tx_mon_be;
376 	struct hal_tx_status_info *tx_status_info;
377 	uint16_t duration_le = 0;
378 	struct ieee80211_ctlframe_addr2 *wh_min = NULL;
379 	qdf_nbuf_t mpdu_nbuf = NULL;
380 
381 	/* sanity check */
382 	if (qdf_unlikely(!pdev))
383 		return;
384 
385 	mon_pdev = pdev->monitor_pdev;
386 	if (qdf_unlikely(!mon_pdev))
387 		return;
388 
389 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
390 	if (qdf_unlikely(!mon_pdev_be))
391 		return;
392 
393 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
394 	tx_status_info = &tx_mon_be->prot_status_info;
395 	/*
396 	 * for radiotap we allocate new skb,
397 	 * so we don't need reserver skb header
398 	 */
399 	mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
400 				   MAX_DUMMY_FRM_BODY, 0, 4, FALSE);
401 	if (!mpdu_nbuf)
402 		return;
403 
404 	wh_min = (struct ieee80211_ctlframe_addr2 *)qdf_nbuf_data(mpdu_nbuf);
405 	qdf_mem_zero(wh_min, MAX_DUMMY_FRM_BODY);
406 
407 	wh_min->i_fc[1] = 0;
408 	wh_min->i_fc[0] = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL |
409 			   IEEE80211_FC0_SUBTYPE_RTS);
410 	duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration));
411 	wh_min->i_aidordur[1] = (duration_le & 0xFF00) >> 8;
412 	wh_min->i_aidordur[0] = (duration_le & 0xFF);
413 
414 	if (!tx_status_info->protection_addr)
415 		tx_status_info = &tx_mon_be->data_status_info;
416 	qdf_mem_copy(wh_min->i_addr1,
417 		     TXMON_STATUS_INFO(tx_status_info, addr1),
418 		     QDF_MAC_ADDR_SIZE);
419 	qdf_mem_copy(wh_min->i_addr2,
420 		     TXMON_STATUS_INFO(tx_status_info, addr2),
421 		     QDF_MAC_ADDR_SIZE);
422 
423 	qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_min));
424 	dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf);
425 	TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1;
426 }
427 
428 /**
429  * dp_tx_mon_generate_ack_frm() - API to generate ack frame
430  * @pdev: pdev Handle
431  * @tx_ppdu_info: pointer to tx ppdu info structure
432  *
433  * Return: void
434  */
435 static void
436 dp_tx_mon_generate_ack_frm(struct dp_pdev *pdev,
437 			   struct dp_tx_ppdu_info *tx_ppdu_info)
438 {
439 	/* allocate and populate ACK frame */
440 	/* enqueue 802.11 payload to per user mpdu_q */
441 	struct dp_mon_pdev *mon_pdev;
442 	struct dp_mon_pdev_be *mon_pdev_be;
443 	struct dp_pdev_tx_monitor_be *tx_mon_be;
444 	struct hal_tx_status_info *tx_status_info;
445 	struct ieee80211_frame_min_one *wh_addr1 = NULL;
446 	qdf_nbuf_t mpdu_nbuf = NULL;
447 	uint8_t user_id = TXMON_PPDU(tx_ppdu_info, cur_usr_idx);
448 
449 	/* sanity check */
450 	if (qdf_unlikely(!pdev))
451 		return;
452 
453 	mon_pdev = pdev->monitor_pdev;
454 	if (qdf_unlikely(!mon_pdev))
455 		return;
456 
457 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
458 	if (qdf_unlikely(!mon_pdev_be))
459 		return;
460 
461 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
462 	tx_status_info = &tx_mon_be->data_status_info;
463 	/*
464 	 * for radiotap we allocate new skb,
465 	 * so we don't need reserver skb header
466 	 */
467 	mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
468 				   MAX_DUMMY_FRM_BODY, 0, 4, FALSE);
469 	if (!mpdu_nbuf)
470 		return;
471 
472 	wh_addr1 = (struct ieee80211_frame_min_one *)qdf_nbuf_data(mpdu_nbuf);
473 	wh_addr1->i_fc[1] = 0;
474 	wh_addr1->i_fc[0] = (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_CTL |
475 			     IEEE80211_FC0_SUBTYPE_ACK);
476 	qdf_mem_copy(wh_addr1->i_addr1,
477 		     TXMON_STATUS_INFO(tx_status_info, addr1),
478 		     QDF_MAC_ADDR_SIZE);
479 	/* set duration zero for ack frame */
480 	*(u_int16_t *)(&wh_addr1->i_dur) = qdf_cpu_to_le16(0x0000);
481 
482 	qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_addr1));
483 
484 	dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, user_id, mpdu_nbuf);
485 	TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1;
486 }
487 
488 /**
489  * dp_tx_mon_generate_3addr_qos_null_frm() - API to generate
490  * 3 address qosnull frame
491  *
492  * @pdev: pdev Handle
493  * @tx_ppdu_info: pointer to tx ppdu info structure
494  *
495  * Return: void
496  */
497 static void
498 dp_tx_mon_generate_3addr_qos_null_frm(struct dp_pdev *pdev,
499 				      struct dp_tx_ppdu_info *tx_ppdu_info)
500 {
501 	/* allocate and populate 3 address qos null frame */
502 	/* enqueue 802.11 payload to per user mpdu_q */
503 	struct dp_mon_pdev *mon_pdev;
504 	struct dp_mon_pdev_be *mon_pdev_be;
505 	struct dp_pdev_tx_monitor_be *tx_mon_be;
506 	struct hal_tx_status_info *tx_status_info;
507 	struct ieee80211_qosframe *wh_addr3 = NULL;
508 	qdf_nbuf_t mpdu_nbuf = NULL;
509 	uint16_t duration_le = 0;
510 	uint8_t num_users = 0;
511 
512 	/* sanity check */
513 	if (qdf_unlikely(!pdev))
514 		return;
515 
516 	mon_pdev = pdev->monitor_pdev;
517 	if (qdf_unlikely(!mon_pdev))
518 		return;
519 
520 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
521 	if (qdf_unlikely(!mon_pdev_be))
522 		return;
523 
524 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
525 	tx_status_info = &tx_mon_be->data_status_info;
526 	/*
527 	 * for radiotap we allocate new skb,
528 	 * so we don't need reserver skb header
529 	 */
530 	mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
531 				   MAX_DUMMY_FRM_BODY, 0, 4, FALSE);
532 	if (!mpdu_nbuf)
533 		return;
534 
535 	wh_addr3 = (struct ieee80211_qosframe *)qdf_nbuf_data(mpdu_nbuf);
536 	qdf_mem_zero(wh_addr3, sizeof(struct ieee80211_qosframe));
537 	wh_addr3->i_fc[0] = 0;
538 	wh_addr3->i_fc[1] = 0;
539 	wh_addr3->i_fc[0] = (IEEE80211_FC0_VERSION_0 |
540 			     IEEE80211_FC0_TYPE_DATA |
541 			     IEEE80211_FC0_SUBTYPE_QOS_NULL);
542 
543 	duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration));
544 	wh_addr3->i_dur[1] = (duration_le & 0xFF00) >> 8;
545 	wh_addr3->i_dur[0] = (duration_le & 0xFF);
546 
547 	qdf_mem_copy(wh_addr3->i_addr1,
548 		     TXMON_STATUS_INFO(tx_status_info, addr1),
549 		     QDF_MAC_ADDR_SIZE);
550 	qdf_mem_copy(wh_addr3->i_addr2,
551 		     TXMON_STATUS_INFO(tx_status_info, addr2),
552 		     QDF_MAC_ADDR_SIZE);
553 	qdf_mem_copy(wh_addr3->i_addr3,
554 		     TXMON_STATUS_INFO(tx_status_info, addr3),
555 		     QDF_MAC_ADDR_SIZE);
556 
557 	qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_addr3));
558 	dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, num_users, mpdu_nbuf);
559 	TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1;
560 }
561 
562 /**
563  * dp_tx_mon_generate_4addr_qos_null_frm() - API to generate
564  * 4 address qos null frame
565  *
566  * @pdev: pdev Handle
567  * @tx_ppdu_info: pointer to tx ppdu info structure
568  *
569  * Return: void
570  */
571 static void
572 dp_tx_mon_generate_4addr_qos_null_frm(struct dp_pdev *pdev,
573 				      struct dp_tx_ppdu_info *tx_ppdu_info)
574 {
575 	/* allocate and populate 4 address qos null frame */
576 	/* enqueue 802.11 payload to per user mpdu_q */
577 	struct dp_mon_pdev *mon_pdev;
578 	struct dp_mon_pdev_be *mon_pdev_be;
579 	struct dp_pdev_tx_monitor_be *tx_mon_be;
580 	struct hal_tx_status_info *tx_status_info;
581 	struct ieee80211_qosframe_addr4 *wh_addr4 = NULL;
582 	qdf_nbuf_t mpdu_nbuf = NULL;
583 	uint16_t duration_le = 0;
584 	uint8_t num_users = 0;
585 
586 	/* sanity check */
587 	if (qdf_unlikely(!pdev))
588 		return;
589 
590 	mon_pdev = pdev->monitor_pdev;
591 	if (qdf_unlikely(!mon_pdev))
592 		return;
593 
594 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
595 	if (qdf_unlikely(!mon_pdev_be))
596 		return;
597 
598 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
599 	tx_status_info = &tx_mon_be->data_status_info;
600 	/*
601 	 * for radiotap we allocate new skb,
602 	 * so we don't need reserver skb header
603 	 */
604 	mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
605 				   MAX_DUMMY_FRM_BODY, 0, 4, FALSE);
606 	if (!mpdu_nbuf)
607 		return;
608 
609 	wh_addr4 = (struct ieee80211_qosframe_addr4 *)qdf_nbuf_data(mpdu_nbuf);
610 	qdf_mem_zero(wh_addr4, sizeof(struct ieee80211_qosframe_addr4));
611 	wh_addr4->i_fc[1] = 0;
612 	wh_addr4->i_fc[0] = (IEEE80211_FC0_VERSION_0 |
613 			     IEEE80211_FC0_TYPE_DATA |
614 			     IEEE80211_FC0_SUBTYPE_QOS_NULL);
615 
616 	duration_le = qdf_cpu_to_le16(TXMON_PPDU_COM(tx_ppdu_info, duration));
617 	wh_addr4->i_dur[1] = (duration_le & 0xFF00) >> 8;
618 	wh_addr4->i_dur[0] = (duration_le & 0xFF);
619 
620 	qdf_mem_copy(wh_addr4->i_addr1,
621 		     TXMON_STATUS_INFO(tx_status_info, addr1),
622 		     QDF_MAC_ADDR_SIZE);
623 	qdf_mem_copy(wh_addr4->i_addr2,
624 		     TXMON_STATUS_INFO(tx_status_info, addr2),
625 		     QDF_MAC_ADDR_SIZE);
626 	qdf_mem_copy(wh_addr4->i_addr3,
627 		     TXMON_STATUS_INFO(tx_status_info, addr3),
628 		     QDF_MAC_ADDR_SIZE);
629 	qdf_mem_copy(wh_addr4->i_addr4,
630 		     TXMON_STATUS_INFO(tx_status_info, addr4),
631 		     QDF_MAC_ADDR_SIZE);
632 
633 	qdf_nbuf_set_pktlen(mpdu_nbuf, sizeof(*wh_addr4));
634 	dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, num_users, mpdu_nbuf);
635 	TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1;
636 }
637 
638 #define TXMON_BA_CTRL_SZ		2
639 #define TXMON_BA_INFO_SZ(bitmap_sz)	((4 * (bitmap_sz)) + 6)
640 #define TXMON_MU_BA_ACK_FRAME_SZ(bitmap_sz)		\
641 		(sizeof(struct ieee80211_ctlframe_addr2) +\
642 		 TXMON_BA_CTRL_SZ + (bitmap_sz))
643 
644 #define TXMON_BA_ACK_FRAME_SZ(bitmap_sz)		\
645 		(sizeof(struct ieee80211_ctlframe_addr2) +\
646 		 TXMON_BA_CTRL_SZ + TXMON_BA_INFO_SZ(bitmap_sz))
647 
648 /**
649  * dp_tx_mon_generate_mu_block_ack_frm() - API to generate MU block ack frame
650  * @pdev: pdev Handle
651  * @tx_ppdu_info: pointer to tx ppdu info structure
652  *
653  * Return: void
654  */
655 static void
656 dp_tx_mon_generate_mu_block_ack_frm(struct dp_pdev *pdev,
657 				    struct dp_tx_ppdu_info *tx_ppdu_info)
658 {
659 	/* allocate and populate MU block ack frame */
660 	/* enqueue 802.11 payload to per user mpdu_q */
661 	struct dp_mon_pdev *mon_pdev;
662 	struct dp_mon_pdev_be *mon_pdev_be;
663 	struct dp_pdev_tx_monitor_be *tx_mon_be;
664 	struct hal_tx_status_info *tx_status_info;
665 	struct ieee80211_ctlframe_addr2 *wh_addr2 = NULL;
666 	qdf_nbuf_t mpdu_nbuf = NULL;
667 	uint8_t *frm = NULL;
668 	uint32_t ba_sz = 0;
669 	uint8_t num_users = TXMON_PPDU_HAL(tx_ppdu_info, num_users);
670 	uint8_t i = 0;
671 
672 	/* sanity check */
673 	if (qdf_unlikely(!pdev))
674 		return;
675 
676 	mon_pdev = pdev->monitor_pdev;
677 	if (qdf_unlikely(!mon_pdev))
678 		return;
679 
680 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
681 	if (qdf_unlikely(!mon_pdev_be))
682 		return;
683 
684 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
685 	tx_status_info = &tx_mon_be->data_status_info;
686 	for (i = 0; i < num_users; i++)
687 		ba_sz += (4 << TXMON_BA_INFO_SZ(TXMON_PPDU_USR(tx_ppdu_info,
688 							       i,
689 							       ba_bitmap_sz)));
690 
691 	/*
692 	 * for multi sta block ack, do we need to increase the size
693 	 * or copy info on subsequent frame offset
694 	 *
695 	 * for radiotap we allocate new skb,
696 	 * so we don't need reserver skb header
697 	 */
698 	mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
699 				   TXMON_MU_BA_ACK_FRAME_SZ(ba_sz), 0, 4,
700 				   FALSE);
701 	if (!mpdu_nbuf) {
702 		/* TODO: update status and break */
703 		return;
704 	}
705 
706 	wh_addr2 = (struct ieee80211_ctlframe_addr2 *)qdf_nbuf_data(mpdu_nbuf);
707 	qdf_mem_zero(wh_addr2, DP_BA_ACK_FRAME_SIZE);
708 
709 	wh_addr2->i_fc[0] = 0;
710 	wh_addr2->i_fc[1] = 0;
711 	wh_addr2->i_fc[0] = (IEEE80211_FC0_VERSION_0 |
712 			     IEEE80211_FC0_TYPE_CTL |
713 			     IEEE80211_FC0_BLOCK_ACK);
714 	*(u_int16_t *)(&wh_addr2->i_aidordur) = qdf_cpu_to_le16(0x0000);
715 
716 	qdf_mem_copy(wh_addr2->i_addr2,
717 		     TXMON_STATUS_INFO(tx_status_info, addr2),
718 		     QDF_MAC_ADDR_SIZE);
719 	qdf_mem_copy(wh_addr2->i_addr1,
720 		     TXMON_STATUS_INFO(tx_status_info, addr1),
721 		     QDF_MAC_ADDR_SIZE);
722 
723 	frm = (uint8_t *)&wh_addr2[1];
724 
725 	/* BA control */
726 	*((uint16_t *)frm) = qdf_cpu_to_le16(TXMON_PPDU_USR(tx_ppdu_info,
727 							    0, ba_control));
728 	frm += 2;
729 
730 	for (i = 0; i < num_users; i++) {
731 		*((uint16_t *)frm) =
732 			qdf_cpu_to_le16((TXMON_PPDU_USR(tx_ppdu_info, i, tid) <<
733 					 DP_IEEE80211_BAR_CTL_TID_S) |
734 					(TXMON_PPDU_USR(tx_ppdu_info, i,
735 							aid) & 0x7FF));
736 		frm += 2;
737 		*((uint16_t *)frm) = TXMON_PPDU_USR(tx_ppdu_info,
738 						    i, start_seq) & 0xFFF;
739 		frm += 2;
740 		qdf_mem_copy(frm,
741 			     TXMON_PPDU_USR(tx_ppdu_info, i, ba_bitmap),
742 					    4 <<
743 					    TXMON_PPDU_USR(tx_ppdu_info,
744 							   i, ba_bitmap_sz));
745 		frm += 4 << TXMON_PPDU_USR(tx_ppdu_info, i, ba_bitmap_sz);
746 	}
747 
748 	qdf_nbuf_set_pktlen(mpdu_nbuf,
749 			    (frm - (uint8_t *)qdf_nbuf_data(mpdu_nbuf)));
750 
751 	/* always enqueue to first active user */
752 	dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf);
753 	TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1;
754 }
755 
756 /**
757  * dp_tx_mon_generate_block_ack_frm() - API to generate block ack frame
758  * @pdev: pdev Handle
759  * @tx_ppdu_info: pointer to tx ppdu info structure
760  *
761  * Return: void
762  */
763 static void
764 dp_tx_mon_generate_block_ack_frm(struct dp_pdev *pdev,
765 				 struct dp_tx_ppdu_info *tx_ppdu_info)
766 {
767 	/* allocate and populate block ack frame */
768 	/* enqueue 802.11 payload to per user mpdu_q */
769 	struct dp_mon_pdev *mon_pdev;
770 	struct dp_mon_pdev_be *mon_pdev_be;
771 	struct dp_pdev_tx_monitor_be *tx_mon_be;
772 	struct hal_tx_status_info *tx_status_info;
773 	struct ieee80211_ctlframe_addr2 *wh_addr2 = NULL;
774 	qdf_nbuf_t mpdu_nbuf = NULL;
775 	uint8_t *frm = NULL;
776 	uint8_t user_id = TXMON_PPDU(tx_ppdu_info, cur_usr_idx);
777 	uint32_t ba_bitmap_sz = TXMON_PPDU_USR(tx_ppdu_info,
778 					       user_id, ba_bitmap_sz);
779 
780 	/* sanity check */
781 	if (qdf_unlikely(!pdev))
782 		return;
783 
784 	mon_pdev = pdev->monitor_pdev;
785 	if (qdf_unlikely(!mon_pdev))
786 		return;
787 
788 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
789 	if (qdf_unlikely(!mon_pdev_be))
790 		return;
791 
792 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
793 	tx_status_info = &tx_mon_be->data_status_info;
794 	/*
795 	 * for multi sta block ack, do we need to increase the size
796 	 * or copy info on subsequent frame offset
797 	 *
798 	 * for radiotap we allocate new skb,
799 	 * so we don't need reserver skb header
800 	 */
801 	mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
802 				   TXMON_BA_ACK_FRAME_SZ(ba_bitmap_sz),
803 				   0, 4, FALSE);
804 	if (!mpdu_nbuf) {
805 		/* TODO: update status and break */
806 		return;
807 	}
808 
809 	/*
810 	 * BA CONTROL
811 	 * fields required to construct block ack information
812 	 * B0 - BA ACK POLICY
813 	 *	0 - Normal ACK
814 	 *	1 - No ACK
815 	 * B1 - MULTI TID
816 	 * B2 - COMPRESSED BITMAP
817 	 *	B12
818 	 *	00 - Basic block ack
819 	 *	01 - Compressed block ack
820 	 *	10 - Reserved
821 	 *	11 - Multi tid block ack
822 	 * B3-B11 - Reserved
823 	 * B12-B15 - TID info
824 	 *
825 	 * BA INFORMATION
826 	 * Per sta tid info
827 	 *	AID: 11 bits
828 	 *	ACK type: 1 bit
829 	 *	TID: 4 bits
830 	 *
831 	 * BA SEQ CTRL
832 	 *
833 	 * BA bitmap
834 	 *
835 	 */
836 
837 	wh_addr2 = (struct ieee80211_ctlframe_addr2 *)qdf_nbuf_data(mpdu_nbuf);
838 	qdf_mem_zero(wh_addr2, DP_BA_ACK_FRAME_SIZE);
839 
840 	wh_addr2->i_fc[0] = 0;
841 	wh_addr2->i_fc[1] = 0;
842 	wh_addr2->i_fc[0] = (IEEE80211_FC0_VERSION_0 |
843 			     IEEE80211_FC0_TYPE_CTL |
844 			     IEEE80211_FC0_BLOCK_ACK);
845 	/* duration */
846 	*(u_int16_t *)(&wh_addr2->i_aidordur) = qdf_cpu_to_le16(0x0020);
847 
848 	qdf_mem_copy(wh_addr2->i_addr2,
849 		     TXMON_STATUS_INFO(tx_status_info, addr2),
850 		     QDF_MAC_ADDR_SIZE);
851 	qdf_mem_copy(wh_addr2->i_addr1,
852 		     TXMON_STATUS_INFO(tx_status_info, addr1),
853 		     QDF_MAC_ADDR_SIZE);
854 
855 	frm = (uint8_t *)&wh_addr2[1];
856 	/* BA control */
857 	*((uint16_t *)frm) = qdf_cpu_to_le16(TXMON_PPDU_USR(tx_ppdu_info,
858 							    user_id,
859 							    ba_control));
860 	frm += 2;
861 	*((uint16_t *)frm) = qdf_cpu_to_le16(TXMON_PPDU_USR(tx_ppdu_info,
862 							    user_id,
863 							    start_seq) & 0xFFF);
864 	frm += 2;
865 	qdf_mem_copy(frm,
866 		     TXMON_PPDU_USR(tx_ppdu_info, user_id, ba_bitmap),
867 		     4 << TXMON_PPDU_USR(tx_ppdu_info, user_id, ba_bitmap_sz));
868 	frm += (4 << TXMON_PPDU_USR(tx_ppdu_info, user_id, ba_bitmap_sz));
869 
870 	qdf_nbuf_set_pktlen(mpdu_nbuf,
871 			    (frm - (uint8_t *)qdf_nbuf_data(mpdu_nbuf)));
872 
873 	dp_tx_mon_enqueue_mpdu_nbuf(pdev, tx_ppdu_info, 0, mpdu_nbuf);
874 
875 	TXMON_PPDU_HAL(tx_ppdu_info, is_used) = 1;
876 }
877 
878 /**
879  * dp_tx_mon_alloc_mpdu() - API to allocate mpdu and add that current
880  * user index
881  *
882  * @pdev: pdev Handle
883  * @tx_ppdu_info: pointer to tx ppdu info structure
884  *
885  * Return: void
886  */
887 static void
888 dp_tx_mon_alloc_mpdu(struct dp_pdev *pdev, struct dp_tx_ppdu_info *tx_ppdu_info)
889 {
890 	qdf_nbuf_t mpdu_nbuf = NULL;
891 	qdf_nbuf_queue_t *usr_mpdu_q = NULL;
892 	uint32_t usr_idx = 0;
893 
894 	/*
895 	 * payload will be added as a frag to buffer
896 	 * and we allocate new skb for radiotap header
897 	 * we allocate a dummy bufffer size
898 	 */
899 	mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
900 				   MAX_MONITOR_HEADER, MAX_MONITOR_HEADER,
901 				   4, FALSE);
902 	if (!mpdu_nbuf) {
903 		qdf_err("%s: %d No memory to allocate mpdu_nbuf!!!!!\n",
904 			__func__, __LINE__);
905 	}
906 
907 	usr_idx = TXMON_PPDU(tx_ppdu_info, cur_usr_idx);
908 	usr_mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, usr_idx, mpdu_q);
909 
910 	qdf_nbuf_queue_add(usr_mpdu_q, mpdu_nbuf);
911 }
912 
913 /**
914  * dp_tx_mon_generate_data_frm() - API to generate data frame
915  * @pdev: pdev Handle
916  * @tx_ppdu_info: pointer to tx ppdu info structure
917  *
918  * Return: void
919  */
920 static void
921 dp_tx_mon_generate_data_frm(struct dp_pdev *pdev,
922 			    struct dp_tx_ppdu_info *tx_ppdu_info,
923 			    bool take_ref)
924 {
925 	struct dp_mon_pdev *mon_pdev;
926 	struct dp_mon_pdev_be *mon_pdev_be;
927 	struct dp_pdev_tx_monitor_be *tx_mon_be;
928 	struct hal_tx_status_info *tx_status_info;
929 	qdf_nbuf_t mpdu_nbuf = NULL;
930 	qdf_nbuf_queue_t *usr_mpdu_q = NULL;
931 	uint32_t usr_idx = 0;
932 
933 	/* sanity check */
934 	if (qdf_unlikely(!pdev))
935 		return;
936 
937 	mon_pdev = pdev->monitor_pdev;
938 	if (qdf_unlikely(!mon_pdev))
939 		return;
940 
941 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
942 	if (qdf_unlikely(!mon_pdev_be))
943 		return;
944 
945 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
946 
947 	tx_status_info = &tx_mon_be->data_status_info;
948 	usr_idx = TXMON_PPDU(tx_ppdu_info, cur_usr_idx);
949 	usr_mpdu_q = &TXMON_PPDU_USR(tx_ppdu_info, usr_idx, mpdu_q);
950 	mpdu_nbuf = qdf_nbuf_queue_last(usr_mpdu_q);
951 
952 	if (!mpdu_nbuf)
953 		QDF_BUG(0);
954 
955 	tx_mon_be->stats.pkt_buf_processed++;
956 
957 	/* add function to either copy or add frag to frag_list */
958 	qdf_nbuf_add_frag(pdev->soc->osdev,
959 			  TXMON_STATUS_INFO(tx_status_info, buffer),
960 			  mpdu_nbuf,
961 			  TXMON_STATUS_INFO(tx_status_info, offset),
962 			  TXMON_STATUS_INFO(tx_status_info, length),
963 			  DP_MON_DATA_BUFFER_SIZE,
964 			  take_ref, TXMON_NO_BUFFER_SZ);
965 }
966 
967 /**
968  * dp_tx_mon_generate_prot_frm() - API to generate protection frame
969  * @pdev: pdev Handle
970  * @tx_ppdu_info: pointer to tx ppdu info structure
971  *
972  * Return: void
973  */
974 static void
975 dp_tx_mon_generate_prot_frm(struct dp_pdev *pdev,
976 			    struct dp_tx_ppdu_info *tx_ppdu_info)
977 {
978 	struct dp_mon_pdev *mon_pdev;
979 	struct dp_mon_pdev_be *mon_pdev_be;
980 	struct dp_pdev_tx_monitor_be *tx_mon_be;
981 	struct hal_tx_status_info *tx_status_info;
982 
983 	/* sanity check */
984 	if (qdf_unlikely(!pdev))
985 		return;
986 
987 	mon_pdev = pdev->monitor_pdev;
988 	if (qdf_unlikely(!mon_pdev))
989 		return;
990 
991 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
992 	if (qdf_unlikely(!mon_pdev_be))
993 		return;
994 
995 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
996 	tx_status_info = &tx_mon_be->prot_status_info;
997 
998 	/* update medium prot type from data */
999 	TXMON_STATUS_INFO(tx_status_info, medium_prot_type) =
1000 		tx_mon_be->data_status_info.medium_prot_type;
1001 
1002 	switch (TXMON_STATUS_INFO(tx_status_info, medium_prot_type)) {
1003 	case TXMON_MEDIUM_NO_PROTECTION:
1004 	{
1005 		/* no protection frame - do nothing */
1006 		break;
1007 	}
1008 	case TXMON_MEDIUM_RTS_LEGACY:
1009 	case TXMON_MEDIUM_RTS_11AC_STATIC_BW:
1010 	case TXMON_MEDIUM_RTS_11AC_DYNAMIC_BW:
1011 	{
1012 		dp_tx_mon_generate_rts_frm(pdev, tx_ppdu_info);
1013 		break;
1014 	}
1015 	case TXMON_MEDIUM_CTS2SELF:
1016 	{
1017 		dp_tx_mon_generate_cts2self_frm(pdev, tx_ppdu_info);
1018 		break;
1019 	}
1020 	case TXMON_MEDIUM_QOS_NULL_NO_ACK_3ADDR:
1021 	{
1022 		dp_tx_mon_generate_3addr_qos_null_frm(pdev, tx_ppdu_info);
1023 		break;
1024 	}
1025 	case TXMON_MEDIUM_QOS_NULL_NO_ACK_4ADDR:
1026 	{
1027 		dp_tx_mon_generate_4addr_qos_null_frm(pdev, tx_ppdu_info);
1028 		break;
1029 	}
1030 	}
1031 }
1032 
1033 /**
1034  * dp_lite_mon_filter_subtype() - filter frames with subtype
1035  * @mon_pdev_be: mon pdev Handle
1036  * @ppdu_info: pointer to hal_tx_ppdu_info structure
1037  *
1038  * Return: QDF_STATUS
1039  */
1040 static inline QDF_STATUS
1041 dp_lite_mon_filter_subtype(struct dp_mon_pdev_be *mon_pdev_be,
1042 			   struct hal_tx_ppdu_info *ppdu_info)
1043 {
1044 	struct dp_mon_pdev *mon_pdev = &mon_pdev_be->mon_pdev;
1045 	uint16_t frame_control;
1046 	struct dp_lite_mon_tx_config *lite_mon_tx_config =
1047 			mon_pdev_be->lite_mon_tx_config;
1048 	uint16_t mgmt_filter, ctrl_filter, data_filter, type, subtype;
1049 
1050 	if (!dp_lite_mon_is_tx_enabled(mon_pdev))
1051 		return QDF_STATUS_SUCCESS;
1052 
1053 	if (!TXMON_HAL_STATUS(ppdu_info, frame_control_info_valid)) {
1054 		dp_mon_err("Queue extension is invalid");
1055 		return QDF_STATUS_E_ABORTED;
1056 	}
1057 
1058 	frame_control = TXMON_HAL_STATUS(ppdu_info, frame_control);
1059 	qdf_spin_lock_bh(&lite_mon_tx_config->lite_mon_tx_lock);
1060 	mgmt_filter = lite_mon_tx_config->tx_config.mgmt_filter[DP_MON_FRM_FILTER_MODE_FP];
1061 	ctrl_filter = lite_mon_tx_config->tx_config.ctrl_filter[DP_MON_FRM_FILTER_MODE_FP];
1062 	data_filter = lite_mon_tx_config->tx_config.data_filter[DP_MON_FRM_FILTER_MODE_FP];
1063 	qdf_spin_unlock_bh(&lite_mon_tx_config->lite_mon_tx_lock);
1064 
1065 	type = (frame_control & FRAME_CONTROL_TYPE_MASK) >>
1066 		FRAME_CONTROL_TYPE_SHIFT;
1067 	subtype = (frame_control & FRAME_CONTROL_SUBTYPE_MASK) >>
1068 		FRAME_CONTROL_SUBTYPE_SHIFT;
1069 
1070 	switch (type) {
1071 	case FRAME_CTRL_TYPE_MGMT:
1072 		if (mgmt_filter >> subtype & 0x1)
1073 			return QDF_STATUS_SUCCESS;
1074 		else
1075 			return QDF_STATUS_E_ABORTED;
1076 	case FRAME_CTRL_TYPE_CTRL:
1077 		if (ctrl_filter >> subtype & 0x1)
1078 			return QDF_STATUS_SUCCESS;
1079 		else
1080 			return QDF_STATUS_E_ABORTED;
1081 	case FRAME_CTRL_TYPE_DATA:
1082 		/* Allowing all data frames */
1083 		return QDF_STATUS_SUCCESS;
1084 	default:
1085 		dp_mon_err("Unknown frame type in framecontrol\n");
1086 		return QDF_STATUS_E_INVAL;
1087 	}
1088 }
1089 
1090 /**
1091  * dp_tx_mon_generated_response_frm() - API to handle generated response frame
1092  * @pdev: pdev Handle
1093  * @tx_ppdu_info: pointer to tx ppdu info structure
1094  *
1095  * Return: QDF_STATUS
1096  */
1097 static QDF_STATUS
1098 dp_tx_mon_generated_response_frm(struct dp_pdev *pdev,
1099 				 struct dp_tx_ppdu_info *tx_ppdu_info)
1100 {
1101 	struct dp_mon_pdev *mon_pdev;
1102 	struct dp_mon_pdev_be *mon_pdev_be;
1103 	struct dp_pdev_tx_monitor_be *tx_mon_be;
1104 	struct hal_tx_status_info *tx_status_info;
1105 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1106 	uint8_t gen_response = 0;
1107 
1108 	/* sanity check */
1109 	if (qdf_unlikely(!pdev))
1110 		return QDF_STATUS_E_NOMEM;
1111 
1112 	mon_pdev = pdev->monitor_pdev;
1113 	if (qdf_unlikely(!mon_pdev))
1114 		return QDF_STATUS_E_NOMEM;
1115 
1116 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1117 	if (qdf_unlikely(!mon_pdev_be))
1118 		return QDF_STATUS_E_NOMEM;
1119 
1120 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
1121 
1122 	tx_status_info = &tx_mon_be->data_status_info;
1123 	gen_response = TXMON_STATUS_INFO(tx_status_info, generated_response);
1124 
1125 	switch (gen_response) {
1126 	case TXMON_GEN_RESP_SELFGEN_ACK:
1127 	{
1128 		TXMON_PPDU_COM(tx_ppdu_info,
1129 			       frame_control) = ((IEEE80211_FC0_TYPE_CTL <<
1130 						  IEEE80211_FC0_TYPE_SHIFT) |
1131 						 (IEEE80211_FC0_SUBTYPE_ACK <<
1132 						  IEEE80211_FC0_SUBTYPE_SHIFT));
1133 		TXMON_PPDU_COM(tx_ppdu_info,
1134 			       frame_control_info_valid) = 1;
1135 		dp_tx_mon_generate_ack_frm(pdev, tx_ppdu_info);
1136 		break;
1137 	}
1138 	case TXMON_GEN_RESP_SELFGEN_CTS:
1139 	{
1140 		TXMON_PPDU_COM(tx_ppdu_info,
1141 			       frame_control) = ((IEEE80211_FC0_TYPE_CTL <<
1142 						  IEEE80211_FC0_TYPE_SHIFT) |
1143 						 (IEEE80211_FC0_SUBTYPE_CTS <<
1144 						  IEEE80211_FC0_SUBTYPE_SHIFT));
1145 		TXMON_PPDU_COM(tx_ppdu_info,
1146 			       frame_control_info_valid) = 1;
1147 		dp_tx_mon_generate_cts2self_frm(pdev, tx_ppdu_info);
1148 		break;
1149 	}
1150 	case TXMON_GEN_RESP_SELFGEN_BA:
1151 	{
1152 		TXMON_PPDU_COM(tx_ppdu_info,
1153 			       frame_control) = ((IEEE80211_FC0_TYPE_CTL <<
1154 						  IEEE80211_FC0_TYPE_SHIFT) |
1155 						 (IEEE80211_FC0_BLOCK_ACK <<
1156 						  IEEE80211_FC0_SUBTYPE_SHIFT));
1157 		TXMON_PPDU_COM(tx_ppdu_info,
1158 			       frame_control_info_valid) = 1;
1159 		dp_tx_mon_generate_block_ack_frm(pdev, tx_ppdu_info);
1160 		break;
1161 	}
1162 	case TXMON_GEN_RESP_SELFGEN_MBA:
1163 	{
1164 		break;
1165 	}
1166 	case TXMON_GEN_RESP_SELFGEN_CBF:
1167 	{
1168 		break;
1169 	}
1170 	case TXMON_GEN_RESP_SELFGEN_TRIG:
1171 	{
1172 		break;
1173 	}
1174 	case TXMON_GEN_RESP_SELFGEN_NDP_LMR:
1175 	{
1176 		break;
1177 	}
1178 	};
1179 
1180 	return status;
1181 }
1182 
1183 /**
1184  * dp_tx_mon_update_ppdu_info_status() - API to update frame as information
1185  * is stored only for that processing
1186  *
1187  * @pdev: pdev Handle
1188  * @tx_data_ppdu_info: pointer to data tx ppdu info
1189  * @tx_prot_ppdu_info: pointer to protection tx ppdu info
1190  * @tx_tlv_hdr: pointer to tx_tlv_hdr
1191  * @status_frag: pointer to fragment
1192  * @tlv_status: tlv status return from hal api
1193  * @mon_desc_list_ref: tx monitor descriptor list reference
1194  *
1195  * Return: QDF_STATUS
1196  */
1197 static QDF_STATUS
1198 dp_tx_mon_update_ppdu_info_status(struct dp_pdev *pdev,
1199 				  struct dp_tx_ppdu_info *tx_data_ppdu_info,
1200 				  struct dp_tx_ppdu_info *tx_prot_ppdu_info,
1201 				  void *tx_tlv_hdr,
1202 				  qdf_frag_t status_frag,
1203 				  uint32_t tlv_status,
1204 				  struct dp_tx_mon_desc_list *mon_desc_list_ref)
1205 {
1206 	struct dp_mon_pdev *mon_pdev;
1207 	struct dp_mon_pdev_be *mon_pdev_be;
1208 	struct dp_pdev_tx_monitor_be *tx_mon_be;
1209 	struct hal_tx_status_info *tx_status_info;
1210 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1211 
1212 	/* sanity check */
1213 	if (qdf_unlikely(!pdev))
1214 		return QDF_STATUS_E_NOMEM;
1215 
1216 	mon_pdev = pdev->monitor_pdev;
1217 	if (qdf_unlikely(!mon_pdev))
1218 		return QDF_STATUS_E_NOMEM;
1219 
1220 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1221 	if (qdf_unlikely(!mon_pdev_be))
1222 		return QDF_STATUS_E_NOMEM;
1223 
1224 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
1225 
1226 	switch (tlv_status) {
1227 	case HAL_MON_TX_FES_SETUP:
1228 	{
1229 		/*
1230 		 * start of initiator window
1231 		 *
1232 		 * got number of user count from  fes setup tlv
1233 		 */
1234 		break;
1235 	}
1236 	case HAL_MON_RX_RESPONSE_REQUIRED_INFO:
1237 	{
1238 		/*
1239 		 * start of Response window
1240 		 *
1241 		 * response window start and follow with
1242 		 * RTS(sta) - cts(AP)
1243 		 * BlockAckReq(sta) - BlockAck(AP)
1244 		 */
1245 		tx_status_info = &tx_mon_be->data_status_info;
1246 		if (TXMON_STATUS_INFO(tx_status_info, reception_type) ==
1247 		    TXMON_RESP_CTS)
1248 			dp_tx_mon_generate_cts2self_frm(pdev,
1249 							tx_data_ppdu_info);
1250 		break;
1251 	}
1252 	case HAL_MON_TX_FES_STATUS_START_PROT:
1253 	{
1254 		/* update tsft to local */
1255 		break;
1256 	}
1257 	case HAL_MON_TX_FES_STATUS_START_PPDU:
1258 	{
1259 		/* update tsft to local */
1260 		break;
1261 	}
1262 	case HAL_MON_TX_FES_STATUS_PROT:
1263 	{
1264 		TXMON_PPDU_HAL(tx_prot_ppdu_info, is_used) = 1;
1265 		TXMON_PPDU_COM(tx_prot_ppdu_info, ppdu_timestamp) =
1266 			TXMON_PPDU_COM(tx_prot_ppdu_info, ppdu_timestamp) << 1;
1267 
1268 		/* based on medium protection type we need to generate frame */
1269 		dp_tx_mon_generate_prot_frm(pdev, tx_prot_ppdu_info);
1270 		break;
1271 	}
1272 	case HAL_MON_RX_FRAME_BITMAP_ACK:
1273 	{
1274 		/* this comes for each user */
1275 		dp_tx_mon_generate_ack_frm(pdev, tx_data_ppdu_info);
1276 		break;
1277 	}
1278 	case HAL_MON_RX_FRAME_BITMAP_BLOCK_ACK_256:
1279 	case HAL_MON_RX_FRAME_BITMAP_BLOCK_ACK_1K:
1280 	{
1281 		/*
1282 		 * this comes for each user
1283 		 * BlockAck is not same as ACK, single frame can hold
1284 		 * multiple BlockAck info
1285 		 */
1286 		tx_status_info = &tx_mon_be->data_status_info;
1287 
1288 		if (TXMON_PPDU_HAL(tx_data_ppdu_info, num_users))
1289 			dp_tx_mon_generate_block_ack_frm(pdev,
1290 							 tx_data_ppdu_info);
1291 		else
1292 			dp_tx_mon_generate_mu_block_ack_frm(pdev,
1293 							    tx_data_ppdu_info);
1294 
1295 		break;
1296 	}
1297 	case HAL_MON_TX_MPDU_START:
1298 	{
1299 		dp_tx_mon_alloc_mpdu(pdev, tx_data_ppdu_info);
1300 		TXMON_PPDU_HAL(tx_data_ppdu_info, is_used) = 1;
1301 		break;
1302 	}
1303 	case HAL_MON_TX_MSDU_START:
1304 	{
1305 		break;
1306 	}
1307 	case HAL_MON_TX_DATA:
1308 	{
1309 		TXMON_PPDU_HAL(tx_data_ppdu_info, is_used) = 1;
1310 		dp_tx_mon_generate_data_frm(pdev, tx_data_ppdu_info, true);
1311 		break;
1312 	}
1313 	case HAL_MON_TX_BUFFER_ADDR:
1314 	{
1315 		struct hal_mon_packet_info *packet_info = NULL;
1316 		struct dp_mon_desc *mon_desc = NULL;
1317 		qdf_frag_t packet_buffer = NULL;
1318 		uint32_t end_offset = 0;
1319 
1320 		tx_status_info = &tx_mon_be->data_status_info;
1321 		/* update buffer from packet info */
1322 		packet_info = &TXMON_PPDU_HAL(tx_data_ppdu_info, packet_info);
1323 		mon_desc = (struct dp_mon_desc *)(uintptr_t)packet_info->sw_cookie;
1324 
1325 		qdf_assert_always(mon_desc);
1326 
1327 		if (mon_desc->magic != DP_MON_DESC_MAGIC)
1328 			qdf_assert_always(0);
1329 
1330 		qdf_assert_always(mon_desc->buf_addr);
1331 		tx_mon_be->stats.pkt_buf_recv++;
1332 
1333 		if (!mon_desc->unmapped) {
1334 			qdf_mem_unmap_page(pdev->soc->osdev,
1335 					   (qdf_dma_addr_t)mon_desc->paddr,
1336 					   DP_MON_DATA_BUFFER_SIZE,
1337 					   QDF_DMA_FROM_DEVICE);
1338 			mon_desc->unmapped = 1;
1339 		}
1340 
1341 		packet_buffer = mon_desc->buf_addr;
1342 		mon_desc->buf_addr = NULL;
1343 
1344 		/* increment reap count */
1345 		mon_desc_list_ref->tx_mon_reap_cnt++;
1346 
1347 		/* add the mon_desc to free list */
1348 		dp_mon_add_to_free_desc_list(&mon_desc_list_ref->desc_list,
1349 					     &mon_desc_list_ref->tail,
1350 					     mon_desc);
1351 
1352 		TXMON_STATUS_INFO(tx_status_info, buffer) = packet_buffer;
1353 		TXMON_STATUS_INFO(tx_status_info, offset) = end_offset;
1354 		TXMON_STATUS_INFO(tx_status_info,
1355 				  length) = packet_info->dma_length;
1356 
1357 		TXMON_PPDU_HAL(tx_data_ppdu_info, is_used) = 1;
1358 		dp_tx_mon_generate_data_frm(pdev, tx_data_ppdu_info, false);
1359 		break;
1360 	}
1361 	case HAL_MON_TX_FES_STATUS_END:
1362 	{
1363 		break;
1364 	}
1365 	case HAL_MON_RESPONSE_END_STATUS_INFO:
1366 	{
1367 		dp_tx_mon_generated_response_frm(pdev, tx_data_ppdu_info);
1368 		status = dp_lite_mon_filter_subtype(mon_pdev_be,
1369 						    &tx_data_ppdu_info->hal_txmon);
1370 		break;
1371 	}
1372 	case HAL_MON_TX_FES_STATUS_START:
1373 	{
1374 		/* update the medium protection type */
1375 		break;
1376 	}
1377 	case HAL_MON_TX_QUEUE_EXTENSION:
1378 	{
1379 		status = dp_lite_mon_filter_subtype(mon_pdev_be,
1380 						    &tx_data_ppdu_info->hal_txmon);
1381 		break;
1382 	}
1383 	default:
1384 	{
1385 		/* return or break in default case */
1386 		break;
1387 	}
1388 	};
1389 
1390 	return status;
1391 }
1392 
1393 /*
1394  * dp_tx_mon_process_tlv_2_0() - API to parse PPDU worth information
1395  * @pdev_handle: DP_PDEV handle
1396  * @mon_desc_list_ref: tx monitor descriptor list reference
1397  *
1398  * Return: status
1399  */
1400 QDF_STATUS
1401 dp_tx_mon_process_tlv_2_0(struct dp_pdev *pdev,
1402 			  struct dp_tx_mon_desc_list *mon_desc_list_ref)
1403 {
1404 	struct dp_mon_pdev *mon_pdev;
1405 	struct dp_mon_pdev_be *mon_pdev_be;
1406 	struct dp_pdev_tx_monitor_be *tx_mon_be;
1407 	struct dp_tx_ppdu_info *tx_prot_ppdu_info = NULL;
1408 	struct dp_tx_ppdu_info *tx_data_ppdu_info = NULL;
1409 	struct hal_tx_status_info *tx_status_prot;
1410 	struct hal_tx_status_info *tx_status_data;
1411 	qdf_frag_t status_frag = NULL;
1412 	uint32_t end_offset = 0;
1413 	uint32_t tlv_status;
1414 	uint32_t status = QDF_STATUS_SUCCESS;
1415 	uint8_t *tx_tlv;
1416 	uint8_t *tx_tlv_start;
1417 	uint8_t num_users = 0;
1418 	uint8_t cur_frag_q_idx;
1419 	bool schedule_wrq = false;
1420 
1421 	/* sanity check */
1422 	if (qdf_unlikely(!pdev))
1423 		return QDF_STATUS_E_NOMEM;
1424 
1425 	mon_pdev = pdev->monitor_pdev;
1426 	if (qdf_unlikely(!mon_pdev))
1427 		return QDF_STATUS_E_NOMEM;
1428 
1429 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1430 	if (qdf_unlikely(!mon_pdev_be))
1431 		return QDF_STATUS_E_NOMEM;
1432 
1433 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
1434 	cur_frag_q_idx = tx_mon_be->cur_frag_q_idx;
1435 
1436 	tx_status_prot = &tx_mon_be->prot_status_info;
1437 	tx_status_data = &tx_mon_be->data_status_info;
1438 
1439 	tx_prot_ppdu_info = dp_tx_mon_get_ppdu_info(pdev, TX_PROT_PPDU_INFO,
1440 						    1, tx_mon_be->be_ppdu_id);
1441 
1442 	if (!tx_prot_ppdu_info) {
1443 		dp_mon_info("tx prot ppdu info alloc got failed!!");
1444 		return QDF_STATUS_E_NOMEM;
1445 	}
1446 
1447 	status_frag = tx_mon_be->frag_q_vec[cur_frag_q_idx].frag_buf;
1448 	end_offset = tx_mon_be->frag_q_vec[cur_frag_q_idx].end_offset;
1449 	tx_tlv = status_frag;
1450 	dp_mon_debug("last_frag_q_idx: %d status_frag:%pK",
1451 		     tx_mon_be->last_frag_q_idx, status_frag);
1452 
1453 	/* get number of user from tlv window */
1454 	tlv_status = hal_txmon_status_get_num_users(pdev->soc->hal_soc,
1455 						    tx_tlv, &num_users);
1456 	if (tlv_status == HAL_MON_TX_STATUS_PPDU_NOT_DONE || !num_users) {
1457 		dp_mon_err("window open with tlv_tag[0x%x] num_users[%d]!\n",
1458 			   hal_tx_status_get_tlv_tag(tx_tlv), num_users);
1459 		return QDF_STATUS_E_INVAL;
1460 	}
1461 
1462 	/* allocate tx_data_ppdu_info based on num_users */
1463 	tx_data_ppdu_info = dp_tx_mon_get_ppdu_info(pdev, TX_DATA_PPDU_INFO,
1464 						    num_users,
1465 						    tx_mon_be->be_ppdu_id);
1466 	if (!tx_data_ppdu_info) {
1467 		dp_mon_info("tx prot ppdu info alloc got failed!!");
1468 		return QDF_STATUS_E_NOMEM;
1469 	}
1470 
1471 	/* iterate status buffer queue */
1472 	while (tx_mon_be->cur_frag_q_idx < tx_mon_be->last_frag_q_idx &&
1473 	       status == QDF_STATUS_SUCCESS) {
1474 		/* get status buffer from frag_q_vec */
1475 		status_frag = tx_mon_be->frag_q_vec[cur_frag_q_idx].frag_buf;
1476 		end_offset = tx_mon_be->frag_q_vec[cur_frag_q_idx].end_offset;
1477 		if (qdf_unlikely(!status_frag)) {
1478 			dp_mon_err("status frag is NULL\n");
1479 			QDF_BUG(0);
1480 		}
1481 
1482 		tx_tlv = status_frag;
1483 		tx_tlv_start = tx_tlv;
1484 		/*
1485 		 * parse each status buffer and populate the information to
1486 		 * dp_tx_ppdu_info
1487 		 */
1488 		do {
1489 			tlv_status = hal_txmon_status_parse_tlv(
1490 					pdev->soc->hal_soc,
1491 					&tx_data_ppdu_info->hal_txmon,
1492 					&tx_prot_ppdu_info->hal_txmon,
1493 					tx_status_data,
1494 					tx_status_prot,
1495 					tx_tlv, status_frag);
1496 
1497 			status =
1498 				dp_tx_mon_update_ppdu_info_status(
1499 							pdev,
1500 							tx_data_ppdu_info,
1501 							tx_prot_ppdu_info,
1502 							tx_tlv,
1503 							status_frag,
1504 							tlv_status,
1505 							mon_desc_list_ref);
1506 
1507 			if (status != QDF_STATUS_SUCCESS) {
1508 				dp_tx_mon_status_free_packet_buf(pdev,
1509 							status_frag,
1510 							end_offset,
1511 							mon_desc_list_ref);
1512 				break;
1513 			}
1514 
1515 			/* need api definition for hal_tx_status_get_next_tlv */
1516 			tx_tlv = hal_tx_status_get_next_tlv(tx_tlv);
1517 			if ((tx_tlv - tx_tlv_start) >= end_offset)
1518 				break;
1519 		} while ((tx_tlv - tx_tlv_start) < end_offset);
1520 
1521 		/*
1522 		 * free status buffer after parsing
1523 		 * is status_frag mapped to mpdu if so make sure
1524 		 */
1525 		tx_mon_be->stats.status_buf_free++;
1526 		qdf_frag_free(status_frag);
1527 		tx_mon_be->frag_q_vec[cur_frag_q_idx].frag_buf = NULL;
1528 		tx_mon_be->frag_q_vec[cur_frag_q_idx].end_offset = 0;
1529 		cur_frag_q_idx = ++tx_mon_be->cur_frag_q_idx;
1530 	}
1531 
1532 	/* clear the unreleased frag array */
1533 	dp_tx_mon_status_queue_free(pdev, tx_mon_be, mon_desc_list_ref);
1534 
1535 	if (TXMON_PPDU_HAL(tx_prot_ppdu_info, is_used)) {
1536 		if (qdf_unlikely(!TXMON_PPDU_COM(tx_prot_ppdu_info,
1537 						 chan_num))) {
1538 			/* update channel number, if not fetched properly */
1539 			TXMON_PPDU_COM(tx_prot_ppdu_info,
1540 				       chan_num) = mon_pdev->mon_chan_num;
1541 		}
1542 
1543 		if (qdf_unlikely(!TXMON_PPDU_COM(tx_prot_ppdu_info,
1544 						 chan_freq))) {
1545 			/* update channel frequency, if not fetched properly */
1546 			TXMON_PPDU_COM(tx_prot_ppdu_info,
1547 				       chan_freq) = mon_pdev->mon_chan_freq;
1548 		}
1549 
1550 		/*
1551 		 * add dp_tx_ppdu_info to pdev queue
1552 		 * for post processing
1553 		 *
1554 		 * TODO: add a threshold check and drop the ppdu info
1555 		 */
1556 		qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock);
1557 		tx_mon_be->last_prot_ppdu_info =
1558 					tx_mon_be->tx_prot_ppdu_info;
1559 		STAILQ_INSERT_TAIL(&tx_mon_be->tx_ppdu_info_queue,
1560 				   tx_prot_ppdu_info,
1561 				   tx_ppdu_info_queue_elem);
1562 		tx_mon_be->tx_ppdu_info_list_depth++;
1563 
1564 		tx_mon_be->tx_prot_ppdu_info = NULL;
1565 		qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock);
1566 		schedule_wrq = true;
1567 	} else {
1568 		dp_tx_mon_free_ppdu_info(tx_prot_ppdu_info, tx_mon_be);
1569 		tx_mon_be->tx_prot_ppdu_info = NULL;
1570 		tx_prot_ppdu_info = NULL;
1571 	}
1572 
1573 	if (TXMON_PPDU_HAL(tx_data_ppdu_info, is_used)) {
1574 		if (qdf_unlikely(!TXMON_PPDU_COM(tx_data_ppdu_info,
1575 						 chan_num))) {
1576 			/* update channel number, if not fetched properly */
1577 			TXMON_PPDU_COM(tx_data_ppdu_info,
1578 				       chan_num) = mon_pdev->mon_chan_num;
1579 		}
1580 
1581 		if (qdf_unlikely(!TXMON_PPDU_COM(tx_data_ppdu_info,
1582 						 chan_freq))) {
1583 			/* update channel frequency, if not fetched properly */
1584 			TXMON_PPDU_COM(tx_data_ppdu_info,
1585 				       chan_freq) = mon_pdev->mon_chan_freq;
1586 		}
1587 
1588 		/*
1589 		 * add dp_tx_ppdu_info to pdev queue
1590 		 * for post processing
1591 		 *
1592 		 * TODO: add a threshold check and drop the ppdu info
1593 		 */
1594 		qdf_spin_lock_bh(&tx_mon_be->tx_mon_list_lock);
1595 		tx_mon_be->last_data_ppdu_info =
1596 					tx_mon_be->tx_data_ppdu_info;
1597 		STAILQ_INSERT_TAIL(&tx_mon_be->tx_ppdu_info_queue,
1598 				   tx_data_ppdu_info,
1599 				   tx_ppdu_info_queue_elem);
1600 		tx_mon_be->tx_ppdu_info_list_depth++;
1601 
1602 		tx_mon_be->tx_data_ppdu_info = NULL;
1603 		qdf_spin_unlock_bh(&tx_mon_be->tx_mon_list_lock);
1604 		schedule_wrq = true;
1605 	} else {
1606 		dp_tx_mon_free_ppdu_info(tx_data_ppdu_info, tx_mon_be);
1607 		tx_mon_be->tx_data_ppdu_info = NULL;
1608 		tx_data_ppdu_info = NULL;
1609 	}
1610 
1611 	if (schedule_wrq)
1612 		qdf_queue_work(NULL, tx_mon_be->post_ppdu_workqueue,
1613 			       &tx_mon_be->post_ppdu_work);
1614 
1615 	return QDF_STATUS_SUCCESS;
1616 }
1617 
1618 /**
1619  * dp_tx_mon_update_end_reason() - API to update end reason
1620  *
1621  * @mon_pdev - DP_MON_PDEV handle
1622  * @ppdu_id - ppdu_id
1623  * @end_reason - monitor destiantion descriptor end reason
1624  *
1625  * Return: void
1626  */
1627 void dp_tx_mon_update_end_reason(struct dp_mon_pdev *mon_pdev,
1628 				 int ppdu_id, int end_reason)
1629 {
1630 	struct dp_mon_pdev_be *mon_pdev_be;
1631 	struct dp_pdev_tx_monitor_be *tx_mon_be;
1632 
1633 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1634 	if (qdf_unlikely(!mon_pdev_be))
1635 		return;
1636 
1637 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
1638 
1639 	tx_mon_be->be_end_reason_bitmap |= (1 << end_reason);
1640 }
1641 
1642 /*
1643  * dp_tx_mon_process_status_tlv() - API to processed TLV
1644  * invoked from interrupt handler
1645  *
1646  * @soc - DP_SOC handle
1647  * @pdev - DP_PDEV handle
1648  * @mon_ring_desc - descriptor status info
1649  * @addr - status buffer frag address
1650  * @end_offset - end offset of buffer that has valid buffer
1651  * @mon_desc_list_ref: tx monitor descriptor list reference
1652  *
1653  * Return: QDF_STATUS
1654  */
1655 QDF_STATUS
1656 dp_tx_mon_process_status_tlv(struct dp_soc *soc,
1657 			     struct dp_pdev *pdev,
1658 			     struct hal_mon_desc *mon_ring_desc,
1659 			     qdf_frag_t status_frag,
1660 			     uint32_t end_offset,
1661 			     struct dp_tx_mon_desc_list *mon_desc_list_ref)
1662 {
1663 	struct dp_mon_pdev *mon_pdev;
1664 	struct dp_mon_pdev_be *mon_pdev_be;
1665 	struct dp_pdev_tx_monitor_be *tx_mon_be;
1666 	uint8_t last_frag_q_idx = 0;
1667 
1668 	/* sanity check */
1669 	if (qdf_unlikely(!pdev))
1670 		goto free_status_buffer;
1671 
1672 	mon_pdev = pdev->monitor_pdev;
1673 	if (qdf_unlikely(!mon_pdev))
1674 		goto free_status_buffer;
1675 
1676 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1677 	if (qdf_unlikely(!mon_pdev_be))
1678 		goto free_status_buffer;
1679 
1680 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
1681 
1682 	if (qdf_unlikely(tx_mon_be->last_frag_q_idx >
1683 			 MAX_STATUS_BUFFER_IN_PPDU)) {
1684 		dp_mon_err("status frag queue for a ppdu[%d] exceed %d\n",
1685 			   tx_mon_be->be_ppdu_id,
1686 			   MAX_STATUS_BUFFER_IN_PPDU);
1687 		dp_tx_mon_status_queue_free(pdev, tx_mon_be, mon_desc_list_ref);
1688 		goto free_status_buffer;
1689 	}
1690 
1691 	if (tx_mon_be->mode == TX_MON_BE_DISABLE &&
1692 	    !dp_lite_mon_is_tx_enabled(mon_pdev)) {
1693 		dp_tx_mon_status_queue_free(pdev, tx_mon_be,
1694 					    mon_desc_list_ref);
1695 		goto free_status_buffer;
1696 	}
1697 
1698 	if (tx_mon_be->be_ppdu_id != mon_ring_desc->ppdu_id &&
1699 	    tx_mon_be->last_frag_q_idx) {
1700 		if (tx_mon_be->be_end_reason_bitmap &
1701 		    (1 << HAL_MON_FLUSH_DETECTED)) {
1702 			tx_mon_be->stats.ppdu_info_drop_flush++;
1703 			dp_tx_mon_status_queue_free(pdev, tx_mon_be,
1704 						    mon_desc_list_ref);
1705 		} else if (tx_mon_be->be_end_reason_bitmap &
1706 			   (1 << HAL_MON_PPDU_TRUNCATED)) {
1707 			tx_mon_be->stats.ppdu_info_drop_trunc++;
1708 			dp_tx_mon_status_queue_free(pdev, tx_mon_be,
1709 						    mon_desc_list_ref);
1710 		} else {
1711 			dp_mon_err("End of ppdu not seen PID:%d cur_pid:%d idx:%d",
1712 				   tx_mon_be->be_ppdu_id,
1713 				   mon_ring_desc->ppdu_id,
1714 				   tx_mon_be->last_frag_q_idx);
1715 			/* schedule ppdu worth information */
1716 			dp_tx_mon_status_queue_free(pdev, tx_mon_be,
1717 						    mon_desc_list_ref);
1718 		}
1719 
1720 		/* reset end reason bitmap */
1721 		tx_mon_be->be_end_reason_bitmap = 0;
1722 		tx_mon_be->last_frag_q_idx = 0;
1723 		tx_mon_be->cur_frag_q_idx = 0;
1724 	}
1725 
1726 	tx_mon_be->be_ppdu_id = mon_ring_desc->ppdu_id;
1727 	tx_mon_be->be_end_reason_bitmap |= (1 << mon_ring_desc->end_reason);
1728 
1729 	last_frag_q_idx = tx_mon_be->last_frag_q_idx;
1730 
1731 	tx_mon_be->frag_q_vec[last_frag_q_idx].frag_buf = status_frag;
1732 	tx_mon_be->frag_q_vec[last_frag_q_idx].end_offset = end_offset;
1733 	tx_mon_be->last_frag_q_idx++;
1734 
1735 	if (mon_ring_desc->end_reason == HAL_MON_END_OF_PPDU) {
1736 		/* drop processing of tlv, if ppdu info list exceed threshold */
1737 		if ((tx_mon_be->defer_ppdu_info_list_depth +
1738 		     tx_mon_be->tx_ppdu_info_list_depth) >
1739 		    MAX_PPDU_INFO_LIST_DEPTH) {
1740 			tx_mon_be->stats.ppdu_info_drop_th++;
1741 			dp_tx_mon_status_queue_free(pdev, tx_mon_be,
1742 						    mon_desc_list_ref);
1743 			return QDF_STATUS_E_PENDING;
1744 		}
1745 
1746 		if (dp_tx_mon_process_tlv_2_0(pdev,
1747 					      mon_desc_list_ref) !=
1748 		    QDF_STATUS_SUCCESS)
1749 			dp_tx_mon_status_queue_free(pdev, tx_mon_be,
1750 						    mon_desc_list_ref);
1751 	}
1752 
1753 	return QDF_STATUS_SUCCESS;
1754 
1755 free_status_buffer:
1756 	dp_tx_mon_status_free_packet_buf(pdev, status_frag, end_offset,
1757 					 mon_desc_list_ref);
1758 	tx_mon_be->stats.status_buf_free++;
1759 	qdf_frag_free(status_frag);
1760 
1761 	return QDF_STATUS_E_NOMEM;
1762 }
1763 
1764 #else
1765 
1766 /**
1767  * dp_tx_mon_process_status_tlv() - API to processed TLV
1768  * invoked from interrupt handler
1769  *
1770  * @soc - DP_SOC handle
1771  * @pdev - DP_PDEV handle
1772  * @mon_ring_desc - descriptor status info
1773  * @addr - status buffer frag address
1774  * @end_offset - end offset of buffer that has valid buffer
1775  * @mon_desc_list_ref: tx monitor descriptor list reference
1776  *
1777  * Return: QDF_STATUS
1778  */
1779 QDF_STATUS
1780 dp_tx_mon_process_status_tlv(struct dp_soc *soc,
1781 			     struct dp_pdev *pdev,
1782 			     struct hal_mon_desc *mon_ring_desc,
1783 			     qdf_frag_t status_frag,
1784 			     uint32_t end_offset,
1785 			     struct dp_tx_mon_desc_list *mon_desc_list_ref)
1786 {
1787 	struct dp_mon_pdev *mon_pdev;
1788 	struct dp_mon_pdev_be *mon_pdev_be;
1789 	struct dp_pdev_tx_monitor_be *tx_mon_be;
1790 
1791 	/* sanity check */
1792 	if (qdf_unlikely(!pdev))
1793 		return QDF_STATUS_E_INVAL;
1794 
1795 	mon_pdev = pdev->monitor_pdev;
1796 	if (qdf_unlikely(!mon_pdev))
1797 		return QDF_STATUS_E_INVAL;
1798 
1799 	mon_pdev_be = dp_get_be_mon_pdev_from_dp_mon_pdev(mon_pdev);
1800 	if (qdf_unlikely(!mon_pdev_be))
1801 		return QDF_STATUS_E_INVAL;
1802 
1803 	tx_mon_be = &mon_pdev_be->tx_monitor_be;
1804 
1805 	dp_tx_mon_status_free_packet_buf(pdev, status_frag, end_offset,
1806 					 mon_desc_list_ref);
1807 	tx_mon_be->stats.status_buf_free++;
1808 	qdf_frag_free(status_frag);
1809 
1810 	return QDF_STATUS_E_INVAL;
1811 }
1812 
1813 /**
1814  * dp_tx_mon_update_end_reason() - API to update end reason
1815  *
1816  * @mon_pdev - DP_MON_PDEV handle
1817  * @ppdu_id - ppdu_id
1818  * @end_reason - monitor destiantion descriptor end reason
1819  *
1820  * Return: void
1821  */
1822 void dp_tx_mon_update_end_reason(struct dp_mon_pdev *mon_pdev,
1823 				 int ppdu_id, int end_reason)
1824 {
1825 }
1826 #endif
1827