xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/dp_rx_mon.h (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_RX_MON_H_
20 #define _DP_RX_MON_H_
21 
22 #include "dp_mon.h"
23 
24 /*
25  * MON_BUF_MIN_ENTRIES macro defines minimum number of network buffers
26  * to be refilled in the RXDMA monitor buffer ring at init, remaining
27  * buffers are replenished at the time of monitor vap creation
28  */
29 #define MON_BUF_MIN_ENTRIES 64
30 
31 /*
32  * The below macro defines the maximum number of ring entries that would
33  * be processed in a single instance when processing each of the non-monitoring
34  * RXDMA2SW ring.
35  */
36 #define MON_DROP_REAP_LIMIT 64
37 
38 /*
39  * The maximum headroom reserved for monitor destination buffer to
40  * accomodate radiotap header and protocol flow tag
41  */
42 #ifdef DP_RX_MON_MEM_FRAG
43 /*
44  *  -------------------------------------------------
45  * |       Protocol & Flow TAG      | Radiotap header|
46  * |                                |  Length(128 B) |
47  * |  ((4* QDF_NBUF_MAX_FRAGS) * 2) |                |
48  *  -------------------------------------------------
49  */
50 #define DP_RX_MON_MAX_RADIO_TAP_HDR (128)
51 #define DP_RX_MON_PF_TAG_LEN_PER_FRAG (4)
52 #define DP_RX_MON_TOT_PF_TAG_LEN \
53 	((DP_RX_MON_PF_TAG_LEN_PER_FRAG) * (QDF_NBUF_MAX_FRAGS))
54 #define DP_RX_MON_MAX_MONITOR_HEADER \
55 	((DP_RX_MON_TOT_PF_TAG_LEN * 2) + (DP_RX_MON_MAX_RADIO_TAP_HDR))
56 #endif
57 
58 /* The maximum buffer length allocated for radiotap for monitor status buffer */
59 #define MAX_MONITOR_HEADER (512)
60 
61 /* l2 header pad byte in case of Raw frame is Zero and 2 in non raw */
62 #define DP_RX_MON_RAW_L2_HDR_PAD_BYTE (0)
63 #define DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE (2)
64 
65 #define dp_rx_mon_dest_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_MON_DEST, params)
66 #define dp_rx_mon_dest_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX_MON_DEST, params)
67 #define dp_rx_mon_dest_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_MON_DEST, params)
68 #define dp_rx_mon_dest_info(params...) \
69 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_MON_DEST, ## params)
70 #define dp_rx_mon_dest_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_MON_DEST, params)
71 
72 /**
73  * enum dp_mon_reap_status - monitor status ring ppdu status
74  *
75  * @DP_MON_STATUS_NO_DMA - DMA not done for status ring entry
76  * @DP_MON_STATUS_MATCH - status and dest ppdu id mathes
77  * @DP_MON_STATUS_LAG - status ppdu id is lagging
78  * @DP_MON_STATUS_LEAD - status ppdu id is leading
79  * @DP_MON_STATUS_REPLENISH - status ring entry is NULL
80  */
81 enum dp_mon_reap_status {
82 	DP_MON_STATUS_NO_DMA,
83 	DP_MON_STATUS_MATCH,
84 	DP_MON_STATUS_LAG,
85 	DP_MON_STATUS_LEAD,
86 	DP_MON_STATUS_REPLENISH
87 };
88 
89 /*
90  * dp_rx_mon_status_process() - Process monitor status ring and
91  *			TLV in status ring.
92  *
93  * @soc: core txrx main context
94  * @int_ctx: interrupt context
95  * @mac_id: mac_id which is one of 3 mac_ids
96  * @quota: No. of ring entry that can be serviced in one shot.
97 
98  * Return: uint32_t: No. of ring entry that is processed.
99  */
100 uint32_t
101 dp_rx_mon_status_process(struct dp_soc *soc, struct dp_intr *int_ctx,
102 			 uint32_t mac_id, uint32_t quota);
103 
104 /**
105  * dp_rx_mon_dest_process() - Brain of the Rx processing functionality
106  *	Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
107  * @soc: core txrx main contex
108  * @int_ctx: interrupt context
109  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
110  * @quota: No. of units (packets) that can be serviced in one shot.
111  *
112  * This function implements the core of Rx functionality. This is
113  * expected to handle only non-error frames.
114  *
115  * Return: none
116  */
117 QDF_STATUS dp_rx_pdev_mon_desc_pool_alloc(struct dp_pdev *pdev);
118 QDF_STATUS dp_rx_pdev_mon_buffers_alloc(struct dp_pdev *pdev);
119 void dp_rx_pdev_mon_buffers_free(struct dp_pdev *pdev);
120 void dp_rx_pdev_mon_desc_pool_init(struct dp_pdev *pdev);
121 void dp_rx_pdev_mon_desc_pool_deinit(struct dp_pdev *pdev);
122 void dp_rx_pdev_mon_desc_pool_free(struct dp_pdev *pdev);
123 #ifdef QCA_MONITOR_PKT_SUPPORT
124 void dp_rx_mon_dest_process(struct dp_soc *soc, struct dp_intr *int_ctx,
125 			    uint32_t mac_id, uint32_t quota);
126 
127 void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id);
128 QDF_STATUS
129 dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id,
130 				 bool delayed_replenish);
131 QDF_STATUS
132 dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id);
133 void
134 dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id);
135 #else
136 static inline
137 void dp_rx_mon_dest_process(struct dp_soc *soc, struct dp_intr *int_ctx,
138 			    uint32_t mac_id, uint32_t quota)
139 {
140 }
141 
142 static inline
143 void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id)
144 {
145 }
146 
147 static inline QDF_STATUS
148 dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id,
149 				 bool delayed_replenish)
150 {
151 	return QDF_STATUS_SUCCESS;
152 }
153 
154 static inline QDF_STATUS
155 dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id)
156 {
157 	return QDF_STATUS_SUCCESS;
158 }
159 
160 static inline void
161 dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id)
162 {
163 }
164 #endif
165 
166 QDF_STATUS dp_rx_pdev_mon_status_buffers_alloc(struct dp_pdev *pdev,
167 					       uint32_t mac_id);
168 QDF_STATUS dp_rx_pdev_mon_status_desc_pool_alloc(struct dp_pdev *pdev,
169 						 uint32_t mac_id);
170 void dp_rx_pdev_mon_status_desc_pool_init(struct dp_pdev *pdev,
171 					  uint32_t mac_id);
172 void dp_rx_pdev_mon_status_desc_pool_deinit(struct dp_pdev *pdev,
173 					    uint32_t mac_id);
174 void dp_rx_pdev_mon_status_desc_pool_free(struct dp_pdev *pdev,
175 					  uint32_t mac_id);
176 void dp_rx_pdev_mon_status_buffers_free(struct dp_pdev *pdev, uint32_t mac_id);
177 
178 /*
179  * dp_rx_populate_cbf_hdr - Send CBF frame with htt header
180  * @soc: Datapath soc handle
181  * @mac_id: Datapath mac id
182  * @event: WDI event
183  * @mpdu: mpdu buffer
184  * @msdu_timesstamp: time stamp
185  *
186  * Return: QDF_STATUS
187  */
188 QDF_STATUS dp_rx_populate_cbf_hdr(struct dp_soc *soc,
189 				  uint32_t mac_id, uint32_t event,
190 				  qdf_nbuf_t data, uint32_t msdu_timestamp);
191 
192 /**
193  * dp_rx_mon_handle_status_buf_done () - Handle DMA not done case for
194  * monitor status ring
195  *
196  * @pdev: DP pdev handle
197  * @mon_status_srng: Monitor status SRNG
198  *
199  * Return: enum dp_mon_reap_status
200  */
201 enum dp_mon_reap_status
202 dp_rx_mon_handle_status_buf_done(struct dp_pdev *pdev,
203 				 void *mon_status_srng);
204 
205 #ifdef QCA_SUPPORT_FULL_MON
206 
207 /**
208  * dp_full_mon_attach() - Full monitor mode attach
209  * This API initilises full monitor mode resources
210  *
211  * @pdev: dp pdev object
212  *
213  * Return: void
214  *
215  */
216 void dp_full_mon_attach(struct dp_pdev *pdev);
217 
218 /**
219  * dp_full_mon_detach() - Full monitor mode attach
220  * This API deinitilises full monitor mode resources
221  *
222  * @pdev: dp pdev object
223  *
224  * Return: void
225  *
226  */
227 void dp_full_mon_detach(struct dp_pdev *pdev);
228 
229 /**
230  * dp_rx_mon_process ()- API to process monitor destination ring for
231  * full monitor mode
232  *
233  * @soc: dp soc handle
234  * @int_ctx: interrupt context
235  * @mac_id: lmac id
236  * @quota: No. of ring entry that can be serviced in one shot.
237  */
238 
239 uint32_t dp_rx_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx,
240 			   uint32_t mac_id, uint32_t quota);
241 
242 #else
243 /**
244  * dp_full_mon_attach() - attach full monitor mode resources
245  * @pdev: Datapath PDEV handle
246  *
247  * Return: void
248  */
249 static inline void dp_full_mon_attach(struct dp_pdev *pdev)
250 {
251 }
252 
253 /**
254  * dp_full_mon_detach() - detach full monitor mode resources
255  * @pdev: Datapath PDEV handle
256  *
257  * Return: void
258  *
259  */
260 static inline void dp_full_mon_detach(struct dp_pdev *pdev)
261 {
262 }
263 #endif
264 
265 /**
266  * dp_reset_monitor_mode() - Disable monitor mode
267  * @pdev_handle: Datapath PDEV handle
268  *
269  * Return: QDF_STATUS
270  */
271 QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl,
272 				 uint8_t pdev_id,
273 				 uint8_t smart_monitor);
274 
275 /**
276  * dp_mon_link_free() - free monitor link desc pool
277  * @pdev: core txrx pdev context
278  *
279  * This function will release DP link desc pool for monitor mode from
280  * main device context.
281  *
282  * Return: QDF_STATUS_SUCCESS: success
283  *         QDF_STATUS_E_RESOURCES: Error return
284  */
285 QDF_STATUS dp_mon_link_free(struct dp_pdev *pdev);
286 
287 
288 /**
289  * dp_mon_process() - Main monitor mode processing roution.
290  * @soc: core txrx main context
291  * @int_ctx: interrupt context
292  * @mac_id: mac_id which is one of 3 mac_ids
293  * @quota: No. of status ring entry that can be serviced in one shot.
294  *
295  * This call monitor status ring process then monitor
296  * destination ring process.
297  * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
298  *
299  * Return: uint32_t: No. of ring entry that is processed.
300  */
301 uint32_t dp_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx,
302 			uint32_t mac_id, uint32_t quota);
303 
304 /**
305  * dp_mon_drop_packets_for_mac() - Drop the mon status ring and
306  *  dest ring packets for a given mac. Packets in status ring and
307  *  dest ring are dropped independently.
308  * @pdev: DP pdev
309  * @mac_id: mac id
310  * @quota: max number of status ring entries that can be processed
311  *
312  * Return: work done
313  */
314 uint32_t dp_mon_drop_packets_for_mac(struct dp_pdev *pdev, uint32_t mac_id,
315 				     uint32_t quota);
316 
317 QDF_STATUS dp_rx_mon_deliver(struct dp_soc *soc, uint32_t mac_id,
318 	qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu);
319 /*
320  * dp_rx_mon_deliver_non_std() - deliver frames for non standard path
321  * @soc: core txrx main contex
322  * @mac_id: MAC ID
323  *
324  * This function delivers the radio tap and dummy MSDU
325  * into user layer application for preamble only PPDU.
326  *
327  * Return: Operation status
328  */
329 QDF_STATUS dp_rx_mon_deliver_non_std(struct dp_soc *soc, uint32_t mac_id);
330 
331 /**
332  * dp_rxdma_err_process() - RxDMA error processing functionality
333  * @soc: core txrx main contex
334  * @mac_id: mac id which is one of 3 mac_ids
335  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
336  * @quota: No. of units (packets) that can be serviced in one shot.
337  *
338  * Return: num of buffers processed
339  */
340 uint32_t dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
341 			      uint32_t mac_id, uint32_t quota);
342 
343 /**
344  * dp_mon_buf_delayed_replenish() - Helper routine to replenish monitor dest buf
345  * @pdev: DP pdev object
346  *
347  * Return: None
348  */
349 void dp_mon_buf_delayed_replenish(struct dp_pdev *pdev);
350 
351 #ifdef QCA_MONITOR_PKT_SUPPORT
352 /**
353  * dp_rx_mon_link_desc_return() - Return a MPDU link descriptor to HW
354  *			      (WBM), following error handling
355  *
356  * @dp_pdev: core txrx pdev context
357  * @buf_addr_info: void pointer to monitor link descriptor buf addr info
358  * Return: QDF_STATUS
359  */
360 QDF_STATUS
361 dp_rx_mon_link_desc_return(struct dp_pdev *dp_pdev,
362 			   hal_buff_addrinfo_t buf_addr_info,
363 			   int mac_id);
364 #else
365 static inline QDF_STATUS
366 dp_rx_mon_link_desc_return(struct dp_pdev *dp_pdev,
367 			   hal_buff_addrinfo_t buf_addr_info,
368 			   int mac_id)
369 {
370 	return QDF_STATUS_SUCCESS;
371 }
372 #endif
373 
374 /**
375  * dp_mon_adjust_frag_len() - MPDU and MSDU may spread across
376  *				multiple nbufs. This function
377  *                              is to return data length in
378  *				fragmented buffer
379  * @soc: Datapath soc handle
380  * @total_len: pointer to remaining data length.
381  * @frag_len: pointer to data length in this fragment.
382  * @l2_hdr_pad: l2 header padding
383  */
384 static inline void dp_mon_adjust_frag_len(struct dp_soc *soc,
385 					  uint32_t *total_len,
386 					  uint32_t *frag_len,
387 					  uint16_t l2_hdr_pad)
388 {
389 	uint32_t rx_pkt_tlv_len = soc->rx_pkt_tlv_size;
390 
391 	if (*total_len >= (RX_MONITOR_BUFFER_SIZE - rx_pkt_tlv_len)) {
392 		*frag_len = RX_MONITOR_BUFFER_SIZE - rx_pkt_tlv_len -
393 					l2_hdr_pad;
394 		*total_len -= *frag_len;
395 	} else {
396 		*frag_len = *total_len;
397 		*total_len = 0;
398 	}
399 }
400 
401 /**
402  * dp_rx_mon_frag_adjust_frag_len() - MPDU and MSDU may spread across
403  * multiple nbufs. This function is to return data length in
404  * fragmented buffer.
405  * It takes input as max_limit for any buffer(as it changes based
406  * on decap type and buffer sequence in MSDU.
407  *
408  * If MSDU is divided into multiple buffer then below format will
409  * be max limit.
410  * Decap type Non-Raw
411  *--------------------------------
412  *|  1st  |  2nd  | ...  | Last   |
413  *| 1662  |  1664 | 1664 | <=1664 |
414  *--------------------------------
415  * Decap type Raw
416  *--------------------------------
417  *|  1st  |  2nd  | ...  | Last   |
418  *| 1664  |  1664 | 1664 | <=1664 |
419  *--------------------------------
420  *
421  * It also calculate if current buffer has placeholder to keep padding byte.
422  *  --------------------------------
423  * |       MAX LIMIT(1662/1664)     |
424  *  --------------------------------
425  * | Actual Data | Pad byte Pholder |
426  *  --------------------------------
427  *
428  * @total_len: Remaining data length.
429  * @frag_len:  Data length in this fragment.
430  * @max_limit: Max limit of current buffer/MSDU.
431 */
432 #ifdef DP_RX_MON_MEM_FRAG
433 static inline
434 void dp_rx_mon_frag_adjust_frag_len(uint32_t *total_len, uint32_t *frag_len,
435 				    uint32_t max_limit)
436 {
437 	if (*total_len >= max_limit) {
438 		*frag_len = max_limit;
439 		*total_len -= *frag_len;
440 	} else {
441 		*frag_len = *total_len;
442 		*total_len = 0;
443 	}
444 }
445 
446 /**
447  * DP_RX_MON_GET_NBUF_FROM_DESC() - Get nbuf from desc
448  */
449 #define DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc) \
450 	NULL
451 
452 /**
453  * dp_rx_mon_add_msdu_to_list_failure_handler() - Handler for nbuf buffer
454  *                                                  attach failure
455  *
456  * @rx_tlv_hdr: rx_tlv_hdr
457  * @pdev: struct dp_pdev *
458  * @last: skb pointing to last skb in chained list at any moment
459  * @head_msdu: parent skb in the chained list
460  * @tail_msdu: Last skb in the chained list
461  * @func_name: caller function name
462  *
463  * Return: void
464  */
465 static inline void
466 dp_rx_mon_add_msdu_to_list_failure_handler(void *rx_tlv_hdr,
467 					   struct dp_pdev *pdev,
468 					   qdf_nbuf_t *last,
469 					   qdf_nbuf_t *head_msdu,
470 					   qdf_nbuf_t *tail_msdu,
471 					   const char *func_name)
472 {
473 	DP_STATS_INC(pdev, replenish.nbuf_alloc_fail, 1);
474 	qdf_frag_free(rx_tlv_hdr);
475 	if (head_msdu)
476 		qdf_nbuf_list_free(*head_msdu);
477 	dp_err("[%s] failed to allocate subsequent parent buffer to hold all frag\n",
478 	       func_name);
479 	if (head_msdu)
480 		*head_msdu = NULL;
481 	if (last)
482 		*last = NULL;
483 	if (tail_msdu)
484 		*tail_msdu = NULL;
485 }
486 
487 /**
488  * dp_rx_mon_get_paddr_from_desc() - Get paddr from desc
489  */
490 static inline
491 qdf_dma_addr_t dp_rx_mon_get_paddr_from_desc(struct dp_rx_desc *rx_desc)
492 {
493 	return rx_desc->paddr_buf_start;
494 }
495 
496 /**
497  * DP_RX_MON_IS_BUFFER_ADDR_NULL() - Is Buffer received from hw is NULL
498  */
499 #define DP_RX_MON_IS_BUFFER_ADDR_NULL(rx_desc) \
500 	(!(rx_desc->rx_buf_start))
501 
502 #define DP_RX_MON_IS_MSDU_NOT_NULL(msdu) \
503 	true
504 
505 /**
506  * dp_rx_mon_buffer_free() - Free nbuf or frag memory
507  * Free nbuf if feature is disabled, else free frag.
508  *
509  * @rx_desc: Rx desc
510  */
511 static inline void
512 dp_rx_mon_buffer_free(struct dp_rx_desc *rx_desc)
513 {
514 	qdf_frag_free(rx_desc->rx_buf_start);
515 }
516 
517 /**
518  * dp_rx_mon_buffer_unmap() - Unmap nbuf or frag memory
519  * Unmap nbuf if feature is disabled, else unmap frag.
520  *
521  * @soc: struct dp_soc *
522  * @rx_desc: struct dp_rx_desc *
523  * @size: Size to be unmapped
524  */
525 static inline void
526 dp_rx_mon_buffer_unmap(struct dp_soc *soc, struct dp_rx_desc *rx_desc,
527 		       uint16_t size)
528 {
529 	qdf_mem_unmap_page(soc->osdev, rx_desc->paddr_buf_start,
530 			   size, QDF_DMA_FROM_DEVICE);
531 }
532 
533 /**
534  * dp_rx_mon_alloc_parent_buffer() - Allocate parent buffer to hold
535  * radiotap header and accommodate all frag memory in nr_frag.
536  *
537  * @head_msdu: Ptr to hold allocated Msdu
538  *
539  * Return: QDF_STATUS
540  */
541 static inline
542 QDF_STATUS dp_rx_mon_alloc_parent_buffer(qdf_nbuf_t *head_msdu)
543 {
544 	/*
545 	 * Headroom should accommodate radiotap header
546 	 * and protocol and flow tag for all frag
547 	 * Length reserved to accommodate Radiotap header
548 	 * is 128 bytes and length reserved for Protocol
549 	 * flow tag will vary based on QDF_NBUF_MAX_FRAGS.
550 	 */
551 	/*  -------------------------------------------------
552 	 * |       Protocol & Flow TAG      | Radiotap header|
553 	 * |                                |  Length(128 B) |
554 	 * |  ((4* QDF_NBUF_MAX_FRAGS) * 2) |                |
555 	 *  -------------------------------------------------
556 	 */
557 
558 	*head_msdu = qdf_nbuf_alloc_no_recycler(DP_RX_MON_MAX_MONITOR_HEADER,
559 						DP_RX_MON_MAX_MONITOR_HEADER, 4);
560 
561 	if (!(*head_msdu))
562 		return QDF_STATUS_E_FAILURE;
563 
564 	qdf_mem_zero(qdf_nbuf_head(*head_msdu), qdf_nbuf_headroom(*head_msdu));
565 
566 	/* Set *head_msdu->next as NULL as all msdus are
567 	 * mapped via nr frags
568 	 */
569 	qdf_nbuf_set_next(*head_msdu, NULL);
570 
571 	return QDF_STATUS_SUCCESS;
572 }
573 
574 /**
575  * dp_rx_mon_parse_desc_buffer() - Parse desc buffer based.
576  *
577  * Below code will parse desc buffer, handle continuation frame,
578  * adjust frag length and update l2_hdr_padding
579  *
580  * @soc                : struct dp_soc*
581  * @msdu_info          : struct hal_rx_msdu_desc_info*
582  * @is_frag_p          : is_frag *
583  * @total_frag_len_p   : Remaining frag len to be updated
584  * @frag_len_p         : frag len
585  * @l2_hdr_offset_p    : l2 hdr offset
586  * @rx_desc_tlv        : rx_desc_tlv
587  * @is_frag_non_raw_p  : Non raw frag
588  * @data               : NBUF Data
589  */
590 static inline void
591 dp_rx_mon_parse_desc_buffer(struct dp_soc *dp_soc,
592 			    struct hal_rx_msdu_desc_info *msdu_info,
593 			    bool *is_frag_p, uint32_t *total_frag_len_p,
594 			    uint32_t *frag_len_p, uint16_t *l2_hdr_offset_p,
595 			    qdf_frag_t rx_desc_tlv,
596 			    bool *is_frag_non_raw_p, void *data)
597 {
598 	struct hal_rx_mon_dest_buf_info frame_info;
599 	uint16_t tot_payload_len =
600 			RX_MONITOR_BUFFER_SIZE - dp_soc->rx_pkt_tlv_size;
601 
602 	if (msdu_info->msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) {
603 		/* First buffer of MSDU */
604 		if (!(*is_frag_p)) {
605 			/* Set total frag_len from msdu_len */
606 			*total_frag_len_p = msdu_info->msdu_len;
607 
608 			*is_frag_p = true;
609 			if (HAL_HW_RX_DECAP_FORMAT_RAW ==
610 			    hal_rx_tlv_decap_format_get(dp_soc->hal_soc,
611 							rx_desc_tlv)) {
612 				*l2_hdr_offset_p =
613 					DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
614 				frame_info.is_decap_raw = 1;
615 			} else {
616 				*l2_hdr_offset_p =
617 					DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
618 				frame_info.is_decap_raw = 0;
619 				*is_frag_non_raw_p = true;
620 			}
621 			dp_rx_mon_frag_adjust_frag_len(total_frag_len_p,
622 						       frag_len_p,
623 						       tot_payload_len -
624 						       *l2_hdr_offset_p);
625 
626 			frame_info.first_buffer = 1;
627 			frame_info.last_buffer = 0;
628 			hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc,
629 						    rx_desc_tlv,
630 						    (uint8_t *)&frame_info,
631 						    sizeof(frame_info));
632 		} else {
633 			/*
634 			 * Continuation Middle frame
635 			 * Here max limit will be same for Raw and Non raw case.
636 			 */
637 			*l2_hdr_offset_p = DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
638 			dp_rx_mon_frag_adjust_frag_len(total_frag_len_p,
639 						       frag_len_p,
640 						       tot_payload_len);
641 
642 			/* Update frame info if is non raw frame */
643 			if (*is_frag_non_raw_p)
644 				frame_info.is_decap_raw = 0;
645 			else
646 				frame_info.is_decap_raw = 1;
647 
648 			frame_info.first_buffer = 0;
649 			frame_info.last_buffer = 0;
650 			hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc,
651 						    rx_desc_tlv,
652 						    (uint8_t *)&frame_info,
653 						    sizeof(frame_info));
654 		}
655 	} else {
656 		/**
657 		 * Last buffer of MSDU spread among multiple buffer
658 		 * Here max limit will be same for Raw and Non raw case.
659 		 */
660 		if (*is_frag_p) {
661 			*l2_hdr_offset_p = DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
662 
663 			dp_rx_mon_frag_adjust_frag_len(total_frag_len_p,
664 						       frag_len_p,
665 						       tot_payload_len);
666 
667 			/* Update frame info if is non raw frame */
668 			if (*is_frag_non_raw_p)
669 				frame_info.is_decap_raw = 0;
670 			else
671 				frame_info.is_decap_raw = 1;
672 
673 			frame_info.first_buffer = 0;
674 			frame_info.last_buffer = 1;
675 			hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc,
676 						    rx_desc_tlv,
677 						    (uint8_t *)&frame_info,
678 						    sizeof(frame_info));
679 		} else {
680 			/* MSDU with single buffer */
681 			*frag_len_p = msdu_info->msdu_len;
682 			if (HAL_HW_RX_DECAP_FORMAT_RAW ==
683 			    hal_rx_tlv_decap_format_get(dp_soc->hal_soc,
684 							rx_desc_tlv)) {
685 				*l2_hdr_offset_p =
686 					DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
687 				frame_info.is_decap_raw = 1;
688 			} else {
689 				*l2_hdr_offset_p =
690 					DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
691 				frame_info.is_decap_raw = 0;
692 			}
693 
694 			frame_info.first_buffer = 1;
695 			frame_info.last_buffer = 1;
696 			hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc,
697 						    rx_desc_tlv,
698 						    (uint8_t *)&frame_info,
699 						    sizeof(frame_info));
700 		}
701 		/* Reset bool after complete processing of MSDU */
702 		*is_frag_p = false;
703 		*is_frag_non_raw_p = false;
704 	}
705 }
706 
707 /**
708  * dp_rx_mon_buffer_set_pktlen() - set pktlen for buffer
709  */
710 static inline void dp_rx_mon_buffer_set_pktlen(qdf_nbuf_t msdu, uint32_t size)
711 {
712 }
713 
714 /**
715  * dp_rx_mon_add_msdu_to_list()- Add msdu to list and update head_msdu
716  *      It will add reaped buffer frag to nr frag of parent msdu.
717  * @soc: DP soc handle
718  * @head_msdu: NULL if first time called else &msdu
719  * @msdu: Msdu where frag address needs to be added via nr_frag
720  * @last: Used to traverse in list if this feature is disabled.
721  * @rx_desc_tlv: Frag address
722  * @frag_len: Frag len
723  * @l2_hdr_offset: l2 hdr padding
724  */
725 static inline
726 QDF_STATUS dp_rx_mon_add_msdu_to_list(struct dp_soc *soc, qdf_nbuf_t *head_msdu,
727 				      qdf_nbuf_t msdu, qdf_nbuf_t *last,
728 				      qdf_frag_t rx_desc_tlv, uint32_t frag_len,
729 				      uint32_t l2_hdr_offset)
730 {
731 	uint32_t num_frags;
732 	qdf_nbuf_t msdu_curr;
733 
734 	/* Here head_msdu and *head_msdu must not be NULL */
735 	/* Dont add frag to skb if frag length is zero. Drop frame */
736 	if (qdf_unlikely(!frag_len || !head_msdu || !(*head_msdu))) {
737 		dp_err("[%s] frag_len[%d] || head_msdu[%pK] || *head_msdu is Null while adding frag to skb\n",
738 		       __func__, frag_len, head_msdu);
739 		return QDF_STATUS_E_FAILURE;
740 	}
741 
742 	/* In case of first desc of MPDU, assign curr msdu to *head_msdu */
743 	if (!qdf_nbuf_get_nr_frags(*head_msdu))
744 		msdu_curr = *head_msdu;
745 	else
746 		msdu_curr = *last;
747 
748 	/* Current msdu must not be NULL */
749 	if (qdf_unlikely(!msdu_curr)) {
750 		dp_err("[%s] Current msdu can't be Null while adding frag to skb\n",
751 		       __func__);
752 		return QDF_STATUS_E_FAILURE;
753 	}
754 
755 	num_frags = qdf_nbuf_get_nr_frags(msdu_curr);
756 	if (num_frags < QDF_NBUF_MAX_FRAGS) {
757 		qdf_nbuf_add_rx_frag(rx_desc_tlv, msdu_curr,
758 				     soc->rx_mon_pkt_tlv_size,
759 				     frag_len + l2_hdr_offset,
760 				     RX_MONITOR_BUFFER_SIZE,
761 				     false);
762 		if (*last != msdu_curr)
763 			*last = msdu_curr;
764 		return QDF_STATUS_SUCCESS;
765 	}
766 
767 	/* Execution will reach here only if num_frags == QDF_NBUF_MAX_FRAGS */
768 	msdu_curr = NULL;
769 	if ((dp_rx_mon_alloc_parent_buffer(&msdu_curr))
770 	    != QDF_STATUS_SUCCESS)
771 		return QDF_STATUS_E_FAILURE;
772 
773 	qdf_nbuf_add_rx_frag(rx_desc_tlv, msdu_curr, soc->rx_mon_pkt_tlv_size,
774 			     frag_len + l2_hdr_offset, RX_MONITOR_BUFFER_SIZE,
775 			     false);
776 
777 	/* Add allocated nbuf in the chain */
778 	qdf_nbuf_set_next(*last, msdu_curr);
779 
780 	/* Assign current msdu to last to avoid traversal */
781 	*last = msdu_curr;
782 
783 	return QDF_STATUS_SUCCESS;
784 }
785 
786 /**
787  * dp_rx_mon_init_tail_msdu() - Initialize tail msdu
788  *
789  * @head_msdu: Parent buffer to hold MPDU data
790  * @msdu: Msdu to be updated in tail_msdu
791  * @last: last msdu
792  * @tail_msdu: Last msdu
793  */
794 static inline
795 void dp_rx_mon_init_tail_msdu(qdf_nbuf_t *head_msdu, qdf_nbuf_t msdu,
796 			      qdf_nbuf_t last, qdf_nbuf_t *tail_msdu)
797 {
798 	if (!head_msdu || !(*head_msdu)) {
799 		*tail_msdu = NULL;
800 		return;
801 	}
802 
803 	if (last)
804 		qdf_nbuf_set_next(last, NULL);
805 	*tail_msdu = last;
806 }
807 
808 /**
809  * dp_rx_mon_remove_raw_frame_fcs_len() - Remove FCS length for Raw Frame
810  *
811  * If feature is disabled, then removal happens in restitch logic.
812  *
813  * @soc: Datapath soc handle
814  * @head_msdu: Head msdu
815  * @tail_msdu: Tail msdu
816  */
817 static inline
818 void dp_rx_mon_remove_raw_frame_fcs_len(struct dp_soc *soc,
819 					qdf_nbuf_t *head_msdu,
820 					qdf_nbuf_t *tail_msdu)
821 {
822 	qdf_frag_t addr;
823 
824 	if (qdf_unlikely(!head_msdu || !tail_msdu || !(*head_msdu)))
825 		return;
826 
827 	/* If *head_msdu is valid, then *tail_msdu must be valid */
828 	/* If head_msdu is valid, then it must have nr_frags */
829 	/* If tail_msdu is valid, then it must have nr_frags */
830 
831 	/* Strip FCS_LEN for Raw frame */
832 	addr = qdf_nbuf_get_frag_addr(*head_msdu, 0);
833 	addr -= soc->rx_mon_pkt_tlv_size;
834 	if (hal_rx_tlv_decap_format_get(soc->hal_soc, addr) ==
835 		HAL_HW_RX_DECAP_FORMAT_RAW) {
836 		qdf_nbuf_trim_add_frag_size(*tail_msdu,
837 			qdf_nbuf_get_nr_frags(*tail_msdu) - 1,
838 					-HAL_RX_FCS_LEN, 0);
839 	}
840 }
841 
842 /**
843  * dp_rx_mon_get_buffer_data()- Get data from desc buffer
844  * @rx_desc: desc
845  *
846  * Return address containing actual tlv content
847  */
848 static inline
849 uint8_t *dp_rx_mon_get_buffer_data(struct dp_rx_desc *rx_desc)
850 {
851 	return rx_desc->rx_buf_start;
852 }
853 
854 /**
855  * dp_rx_mon_get_nbuf_80211_hdr() - Get 80211 hdr from nbuf
856  * @nbuf: qdf_nbuf_t
857  *
858  * This function must be called after moving radiotap header.
859  *
860  * Return: Ptr pointing to 80211 header or NULL.
861  */
862 static inline
863 qdf_frag_t dp_rx_mon_get_nbuf_80211_hdr(qdf_nbuf_t nbuf)
864 {
865 	/* Return NULL if nr_frag is Zero */
866 	if (!qdf_nbuf_get_nr_frags(nbuf))
867 		return NULL;
868 
869 	return qdf_nbuf_get_frag_addr(nbuf, 0);
870 }
871 #else
872 
873 #define DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc) \
874 	(rx_desc->nbuf)
875 
876 static inline void
877 dp_rx_mon_add_msdu_to_list_failure_handler(void *rx_tlv_hdr,
878 					   struct dp_pdev *pdev,
879 					   qdf_nbuf_t *last,
880 					   qdf_nbuf_t *head_msdu,
881 					   qdf_nbuf_t *tail_msdu,
882 					   const char *func_name)
883 {
884 }
885 
886 static inline
887 qdf_dma_addr_t dp_rx_mon_get_paddr_from_desc(struct dp_rx_desc *rx_desc)
888 {
889 	qdf_dma_addr_t paddr = 0;
890 	qdf_nbuf_t msdu = NULL;
891 
892 	msdu = rx_desc->nbuf;
893 	if (msdu)
894 		paddr = qdf_nbuf_get_frag_paddr(msdu, 0);
895 
896 	return paddr;
897 }
898 
899 #define DP_RX_MON_IS_BUFFER_ADDR_NULL(rx_desc) \
900 	(!(rx_desc->nbuf))
901 
902 #define DP_RX_MON_IS_MSDU_NOT_NULL(msdu) \
903 	(msdu)
904 
905 static inline void
906 dp_rx_mon_buffer_free(struct dp_rx_desc *rx_desc)
907 {
908 	qdf_nbuf_free(rx_desc->nbuf);
909 }
910 
911 static inline void
912 dp_rx_mon_buffer_unmap(struct dp_soc *soc, struct dp_rx_desc *rx_desc,
913 		       uint16_t size)
914 {
915 	qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
916 				     QDF_DMA_FROM_DEVICE, size);
917 }
918 
919 static inline
920 QDF_STATUS dp_rx_mon_alloc_parent_buffer(qdf_nbuf_t *head_msdu)
921 {
922 	return QDF_STATUS_SUCCESS;
923 }
924 
925 static inline void
926 dp_rx_mon_parse_desc_buffer(struct dp_soc *dp_soc,
927 			    struct hal_rx_msdu_desc_info *msdu_info,
928 			    bool *is_frag_p, uint32_t *total_frag_len_p,
929 			    uint32_t *frag_len_p, uint16_t *l2_hdr_offset_p,
930 			    qdf_frag_t rx_desc_tlv,
931 			    bool *is_frag_non_raw_p, void *data)
932 {
933 	/*
934 	 * HW structures call this L3 header padding
935 	 * -- even though this is actually the offset
936 	 * from the buffer beginning where the L2
937 	 * header begins.
938 	 */
939 	*l2_hdr_offset_p =
940 	hal_rx_msdu_end_l3_hdr_padding_get(dp_soc->hal_soc, data);
941 
942 	if (msdu_info->msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) {
943 		if (!*(is_frag_p)) {
944 			*total_frag_len_p = msdu_info->msdu_len;
945 			*is_frag_p = true;
946 		}
947 		dp_mon_adjust_frag_len(dp_soc, total_frag_len_p, frag_len_p,
948 				       *l2_hdr_offset_p);
949 	} else {
950 		if (*is_frag_p) {
951 			dp_mon_adjust_frag_len(dp_soc, total_frag_len_p,
952 					       frag_len_p,
953 					       *l2_hdr_offset_p);
954 		} else {
955 			*frag_len_p = msdu_info->msdu_len;
956 		}
957 		*is_frag_p = false;
958 	}
959 }
960 
961 static inline void dp_rx_mon_buffer_set_pktlen(qdf_nbuf_t msdu, uint32_t size)
962 {
963 	qdf_nbuf_set_pktlen(msdu, size);
964 }
965 
966 static inline
967 QDF_STATUS dp_rx_mon_add_msdu_to_list(struct dp_soc *soc, qdf_nbuf_t *head_msdu,
968 				      qdf_nbuf_t msdu, qdf_nbuf_t *last,
969 				      qdf_frag_t rx_desc_tlv, uint32_t frag_len,
970 				      uint32_t l2_hdr_offset)
971 {
972 	if (head_msdu && !*head_msdu) {
973 		*head_msdu = msdu;
974 	} else {
975 		if (*last)
976 			qdf_nbuf_set_next(*last, msdu);
977 	}
978 	*last = msdu;
979 	return QDF_STATUS_SUCCESS;
980 }
981 
982 static inline
983 void dp_rx_mon_init_tail_msdu(qdf_nbuf_t *head_msdu, qdf_nbuf_t msdu,
984 			      qdf_nbuf_t last, qdf_nbuf_t *tail_msdu)
985 {
986 	if (last)
987 		qdf_nbuf_set_next(last, NULL);
988 
989 	*tail_msdu = msdu;
990 }
991 
992 static inline
993 void dp_rx_mon_remove_raw_frame_fcs_len(struct dp_soc *soc,
994 					qdf_nbuf_t *head_msdu,
995 					qdf_nbuf_t *tail_msdu)
996 {
997 }
998 
999 static inline
1000 uint8_t *dp_rx_mon_get_buffer_data(struct dp_rx_desc *rx_desc)
1001 {
1002 	qdf_nbuf_t msdu = NULL;
1003 	uint8_t *data = NULL;
1004 
1005 	msdu = rx_desc->nbuf;
1006 	if (qdf_likely(msdu))
1007 		data = qdf_nbuf_data(msdu);
1008 	return data;
1009 }
1010 
1011 static inline
1012 qdf_frag_t dp_rx_mon_get_nbuf_80211_hdr(qdf_nbuf_t nbuf)
1013 {
1014 	return qdf_nbuf_data(nbuf);
1015 }
1016 #endif
1017 
1018 /**
1019  * dp_rx_cookie_2_mon_link_desc() - Retrieve Link descriptor based on target
1020  * @pdev: core physical device context
1021  * @hal_buf_info: structure holding the buffer info
1022  * mac_id: mac number
1023  *
1024  * Return: link descriptor address
1025  */
1026 static inline
1027 void *dp_rx_cookie_2_mon_link_desc(struct dp_pdev *pdev,
1028 				   struct hal_buf_info buf_info,
1029 				   uint8_t mac_id)
1030 {
1031 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable)
1032 		return dp_rx_cookie_2_mon_link_desc_va(pdev, &buf_info,
1033 						       mac_id);
1034 
1035 	return dp_rx_cookie_2_link_desc_va(pdev->soc, &buf_info);
1036 }
1037 
1038 /**
1039  * dp_rx_monitor_link_desc_return() - Return Link descriptor based on target
1040  * @pdev: core physical device context
1041  * @p_last_buf_addr_info: MPDU Link descriptor
1042  * mac_id: mac number
1043  *
1044  * Return: QDF_STATUS
1045  */
1046 static inline
1047 QDF_STATUS dp_rx_monitor_link_desc_return(struct dp_pdev *pdev,
1048 					  hal_buff_addrinfo_t
1049 					  p_last_buf_addr_info,
1050 					  uint8_t mac_id, uint8_t bm_action)
1051 {
1052 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable)
1053 		return dp_rx_mon_link_desc_return(pdev, p_last_buf_addr_info,
1054 						  mac_id);
1055 
1056 	return dp_rx_link_desc_return_by_addr(pdev->soc, p_last_buf_addr_info,
1057 				      bm_action);
1058 }
1059 
1060 /**
1061  * dp_rxdma_get_mon_dst_ring() - Return the pointer to rxdma_err_dst_ring
1062  *					or mon_dst_ring based on the target
1063  * @pdev: core physical device context
1064  * @mac_for_pdev: mac_id number
1065  *
1066  * Return: ring address
1067  */
1068 static inline
1069 void *dp_rxdma_get_mon_dst_ring(struct dp_pdev *pdev,
1070 				uint8_t mac_for_pdev)
1071 {
1072 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable)
1073 		return pdev->soc->rxdma_mon_dst_ring[mac_for_pdev].hal_srng;
1074 
1075 	return pdev->soc->rxdma_err_dst_ring[mac_for_pdev].hal_srng;
1076 }
1077 
1078 /**
1079  * dp_rxdma_get_mon_buf_ring() - Return monitor buf ring address
1080  *				    based on target
1081  * @pdev: core physical device context
1082  * @mac_for_pdev: mac id number
1083  *
1084  * Return: ring address
1085  */
1086 static inline
1087 struct dp_srng *dp_rxdma_get_mon_buf_ring(struct dp_pdev *pdev,
1088 					  uint8_t mac_for_pdev)
1089 {
1090 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable)
1091 		return &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
1092 
1093 	/* For MCL there is only 1 rx refill ring */
1094 	return &pdev->soc->rx_refill_buf_ring[0];
1095 }
1096 
1097 /**
1098  * dp_rx_get_mon_desc_pool() - Return monitor descriptor pool
1099  *			       based on target
1100  * @soc: soc handle
1101  * @mac_id: mac id number
1102  * @pdev_id: pdev id number
1103  *
1104  * Return: descriptor pool address
1105  */
1106 static inline
1107 struct rx_desc_pool *dp_rx_get_mon_desc_pool(struct dp_soc *soc,
1108 					     uint8_t mac_id,
1109 					     uint8_t pdev_id)
1110 {
1111 	if (soc->wlan_cfg_ctx->rxdma1_enable)
1112 		return &soc->rx_desc_mon[mac_id];
1113 
1114 	return &soc->rx_desc_buf[pdev_id];
1115 }
1116 
1117 /**
1118  * dp_rx_get_mon_desc() - Return Rx descriptor based on target
1119  * @soc: soc handle
1120  * @cookie: cookie value
1121  *
1122  * Return: Rx descriptor
1123  */
1124 static inline
1125 struct dp_rx_desc *dp_rx_get_mon_desc(struct dp_soc *soc,
1126 				      uint32_t cookie)
1127 {
1128 	if (soc->wlan_cfg_ctx->rxdma1_enable)
1129 		return dp_rx_cookie_2_va_mon_buf(soc, cookie);
1130 
1131 	return dp_rx_cookie_2_va_rxdma_buf(soc, cookie);
1132 }
1133 
1134 #ifndef REMOVE_MON_DBG_STATS
1135 /*
1136  * dp_rx_mon_update_dbg_ppdu_stats() - Update status ring TLV count
1137  * @ppdu_info: HAL RX PPDU info retrieved from status ring TLV
1138  * @rx_mon_stats: monitor mode status/destination ring PPDU and MPDU count
1139  *
1140  * Update status ring PPDU start and end count. Keep track TLV state on
1141  * PPDU start and end to find out if start and end is matching. Keep
1142  * track missing PPDU start and end count. Keep track matching PPDU
1143  * start and end count.
1144  *
1145  * Return: None
1146  */
1147 static inline void
1148 dp_rx_mon_update_dbg_ppdu_stats(struct hal_rx_ppdu_info *ppdu_info,
1149 				struct cdp_pdev_mon_stats *rx_mon_stats)
1150 {
1151 	if (ppdu_info->rx_state ==
1152 		HAL_RX_MON_PPDU_START) {
1153 		rx_mon_stats->status_ppdu_start++;
1154 		if (rx_mon_stats->status_ppdu_state
1155 			!= CDP_MON_PPDU_END)
1156 			rx_mon_stats->status_ppdu_end_mis++;
1157 		rx_mon_stats->status_ppdu_state
1158 			= CDP_MON_PPDU_START;
1159 		ppdu_info->rx_state = HAL_RX_MON_PPDU_RESET;
1160 	} else if (ppdu_info->rx_state ==
1161 		HAL_RX_MON_PPDU_END) {
1162 		rx_mon_stats->status_ppdu_end++;
1163 		if (rx_mon_stats->status_ppdu_state
1164 			!= CDP_MON_PPDU_START)
1165 			rx_mon_stats->status_ppdu_start_mis++;
1166 		else
1167 			rx_mon_stats->status_ppdu_compl++;
1168 		rx_mon_stats->status_ppdu_state
1169 			= CDP_MON_PPDU_END;
1170 		ppdu_info->rx_state = HAL_RX_MON_PPDU_RESET;
1171 	}
1172 }
1173 
1174 /*
1175  * dp_rx_mon_init_dbg_ppdu_stats() - initialization for monitor mode stats
1176  * @ppdu_info: HAL RX PPDU info retrieved from status ring TLV
1177  * @rx_mon_stats: monitor mode status/destination ring PPDU and MPDU count
1178  *
1179  * Return: None
1180  */
1181 static inline void
1182 dp_rx_mon_init_dbg_ppdu_stats(struct hal_rx_ppdu_info *ppdu_info,
1183 			      struct cdp_pdev_mon_stats *rx_mon_stats)
1184 {
1185 	ppdu_info->rx_state = HAL_RX_MON_PPDU_END;
1186 	rx_mon_stats->status_ppdu_state
1187 		= CDP_MON_PPDU_END;
1188 }
1189 
1190 #else
1191 static inline void
1192 dp_rx_mon_update_dbg_ppdu_stats(struct hal_rx_ppdu_info *ppdu_info,
1193 				struct cdp_pdev_mon_stats *rx_mon_stats)
1194 {
1195 }
1196 
1197 static inline void
1198 dp_rx_mon_init_dbg_ppdu_stats(struct hal_rx_ppdu_info *ppdu_info,
1199 			      struct cdp_pdev_mon_stats *rx_mon_stats)
1200 {
1201 }
1202 
1203 #endif
1204 
1205 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
1206 /**
1207  * dp_mon_dest_srng_drop_for_mac() - Drop the mon dest ring packets for
1208  *  a given mac
1209  * @pdev: DP pdev
1210  * @mac_id: mac id
1211  *
1212  * Return: None
1213  */
1214 uint32_t
1215 dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id);
1216 #endif
1217 #endif
1218