xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/monitor/1.0/dp_rx_mon_1.0.h (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 #ifndef _DP_RX_MON_1_0_H_
18 #define _DP_RX_MON_1_0_H_
19 
20 #include <dp_rx.h>
21 /*
22  * MON_BUF_MIN_ENTRIES macro defines minimum number of network buffers
23  * to be refilled in the RXDMA monitor buffer ring at init, remaining
24  * buffers are replenished at the time of monitor vap creation
25  */
26 #define MON_BUF_MIN_ENTRIES 64
27 
28 /*
29  * The below macro defines the maximum number of ring entries that would
30  * be processed in a single instance when processing each of the non-monitoring
31  * RXDMA2SW ring.
32  */
33 #define MON_DROP_REAP_LIMIT 64
34 
35 QDF_STATUS dp_rx_pdev_mon_status_buffers_alloc(struct dp_pdev *pdev,
36 					       uint32_t mac_id);
37 QDF_STATUS dp_rx_pdev_mon_status_desc_pool_alloc(struct dp_pdev *pdev,
38 						 uint32_t mac_id);
39 void dp_rx_pdev_mon_status_desc_pool_init(struct dp_pdev *pdev,
40 					  uint32_t mac_id);
41 void dp_rx_pdev_mon_status_desc_pool_deinit(struct dp_pdev *pdev,
42 					    uint32_t mac_id);
43 void dp_rx_pdev_mon_status_desc_pool_free(struct dp_pdev *pdev,
44 					  uint32_t mac_id);
45 void dp_rx_pdev_mon_status_buffers_free(struct dp_pdev *pdev, uint32_t mac_id);
46 
47 QDF_STATUS dp_rx_pdev_mon_desc_pool_alloc(struct dp_pdev *pdev);
48 QDF_STATUS dp_rx_pdev_mon_buffers_alloc(struct dp_pdev *pdev);
49 void dp_rx_pdev_mon_buffers_free(struct dp_pdev *pdev);
50 void dp_rx_pdev_mon_desc_pool_init(struct dp_pdev *pdev);
51 void dp_rx_pdev_mon_desc_pool_deinit(struct dp_pdev *pdev);
52 void dp_rx_pdev_mon_desc_pool_free(struct dp_pdev *pdev);
53 
54 /**
55  * dp_rx_mon_dest_process() - Brain of the Rx processing functionality
56  *	Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
57  * @soc: core txrx main contex
58  * @int_ctx: interrupt context
59  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
60  * @quota: No. of units (packets) that can be serviced in one shot.
61  *
62  * This function implements the core of Rx functionality. This is
63  * expected to handle only non-error frames.
64  *
65  * Return: none
66  */
67 #ifdef QCA_MONITOR_PKT_SUPPORT
68 void dp_rx_mon_dest_process(struct dp_soc *soc, struct dp_intr *int_ctx,
69 			    uint32_t mac_id, uint32_t quota);
70 
71 void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id);
72 QDF_STATUS
73 dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id,
74 				 bool delayed_replenish);
75 QDF_STATUS
76 dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id);
77 void
78 dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id);
79 #else
80 static inline
81 void dp_rx_mon_dest_process(struct dp_soc *soc, struct dp_intr *int_ctx,
82 			    uint32_t mac_id, uint32_t quota)
83 {
84 }
85 
86 static inline
87 void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id)
88 {
89 }
90 
91 static inline QDF_STATUS
92 dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id,
93 				 bool delayed_replenish)
94 {
95 	return QDF_STATUS_SUCCESS;
96 }
97 
98 static inline QDF_STATUS
99 dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id)
100 {
101 	return QDF_STATUS_SUCCESS;
102 }
103 
104 static inline void
105 dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id)
106 {
107 }
108 #endif
109 
110 #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
111 /**
112  * dp_mon_dest_srng_drop_for_mac() - Drop the mon dest ring packets for
113  *  a given mac
114  * @pdev: DP pdev
115  * @mac_id: mac id
116  *
117  * Return: None
118  */
119 uint32_t
120 dp_mon_dest_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id);
121 #endif
122 
123 /**
124  * dp_rxdma_err_process() - RxDMA error processing functionality
125  * @soc: core txrx main contex
126  * @mac_id: mac id which is one of 3 mac_ids
127  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
128  * @quota: No. of units (packets) that can be serviced in one shot.
129  *
130  * Return: num of buffers processed
131  */
132 uint32_t dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
133 			      uint32_t mac_id, uint32_t quota);
134 
135 /**
136  * dp_mon_buf_delayed_replenish() - Helper routine to replenish monitor dest buf
137  * @pdev: DP pdev object
138  *
139  * Return: None
140  */
141 void dp_mon_buf_delayed_replenish(struct dp_pdev *pdev);
142 
143 #ifdef QCA_MONITOR_PKT_SUPPORT
144 /**
145  * dp_rx_mon_link_desc_return() - Return a MPDU link descriptor to HW
146  *			      (WBM), following error handling
147  *
148  * @dp_pdev: core txrx pdev context
149  * @buf_addr_info: void pointer to monitor link descriptor buf addr info
150  * Return: QDF_STATUS
151  */
152 QDF_STATUS
153 dp_rx_mon_link_desc_return(struct dp_pdev *dp_pdev,
154 			   hal_buff_addrinfo_t buf_addr_info,
155 			   int mac_id);
156 #else
157 static inline QDF_STATUS
158 dp_rx_mon_link_desc_return(struct dp_pdev *dp_pdev,
159 			   hal_buff_addrinfo_t buf_addr_info,
160 			   int mac_id)
161 {
162 	return QDF_STATUS_SUCCESS;
163 }
164 #endif
165 
166 /**
167  * dp_mon_adjust_frag_len() - MPDU and MSDU may spread across
168  *				multiple nbufs. This function
169  *                              is to return data length in
170  *				fragmented buffer
171  * @soc: Datapath soc handle
172  * @total_len: pointer to remaining data length.
173  * @frag_len: pointer to data length in this fragment.
174  * @l2_hdr_pad: l2 header padding
175  */
176 static inline void dp_mon_adjust_frag_len(struct dp_soc *soc,
177 					  uint32_t *total_len,
178 					  uint32_t *frag_len,
179 					  uint16_t l2_hdr_pad)
180 {
181 	uint32_t rx_pkt_tlv_len = soc->rx_pkt_tlv_size;
182 
183 	if (*total_len >= (RX_MONITOR_BUFFER_SIZE - rx_pkt_tlv_len)) {
184 		*frag_len = RX_MONITOR_BUFFER_SIZE - rx_pkt_tlv_len -
185 					l2_hdr_pad;
186 		*total_len -= *frag_len;
187 	} else {
188 		*frag_len = *total_len;
189 		*total_len = 0;
190 	}
191 }
192 
193 /**
194  * dp_rx_mon_frag_adjust_frag_len() - MPDU and MSDU may spread across
195  * multiple nbufs. This function is to return data length in
196  * fragmented buffer.
197  * It takes input as max_limit for any buffer(as it changes based
198  * on decap type and buffer sequence in MSDU.
199  *
200  * If MSDU is divided into multiple buffer then below format will
201  * be max limit.
202  * Decap type Non-Raw
203  *--------------------------------
204  *|  1st  |  2nd  | ...  | Last   |
205  *| 1662  |  1664 | 1664 | <=1664 |
206  *--------------------------------
207  * Decap type Raw
208  *--------------------------------
209  *|  1st  |  2nd  | ...  | Last   |
210  *| 1664  |  1664 | 1664 | <=1664 |
211  *--------------------------------
212  *
213  * It also calculate if current buffer has placeholder to keep padding byte.
214  *  --------------------------------
215  * |       MAX LIMIT(1662/1664)     |
216  *  --------------------------------
217  * | Actual Data | Pad byte Pholder |
218  *  --------------------------------
219  *
220  * @total_len: Remaining data length.
221  * @frag_len:  Data length in this fragment.
222  * @max_limit: Max limit of current buffer/MSDU.
223  */
224 #ifdef DP_RX_MON_MEM_FRAG
225 static inline
226 void dp_rx_mon_frag_adjust_frag_len(uint32_t *total_len, uint32_t *frag_len,
227 				    uint32_t max_limit)
228 {
229 	if (*total_len >= max_limit) {
230 		*frag_len = max_limit;
231 		*total_len -= *frag_len;
232 	} else {
233 		*frag_len = *total_len;
234 		*total_len = 0;
235 	}
236 }
237 
238 /**
239  * DP_RX_MON_GET_NBUF_FROM_DESC() - Get nbuf from desc
240  */
241 #define DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc) \
242 	NULL
243 
244 /**
245  * dp_rx_mon_add_msdu_to_list_failure_handler() - Handler for nbuf buffer
246  *                                                  attach failure
247  *
248  * @rx_tlv_hdr: rx_tlv_hdr
249  * @pdev: struct dp_pdev *
250  * @last: skb pointing to last skb in chained list at any moment
251  * @head_msdu: parent skb in the chained list
252  * @tail_msdu: Last skb in the chained list
253  * @func_name: caller function name
254  *
255  * Return: void
256  */
257 static inline void
258 dp_rx_mon_add_msdu_to_list_failure_handler(void *rx_tlv_hdr,
259 					   struct dp_pdev *pdev,
260 					   qdf_nbuf_t *last,
261 					   qdf_nbuf_t *head_msdu,
262 					   qdf_nbuf_t *tail_msdu,
263 					   const char *func_name)
264 {
265 	DP_STATS_INC(pdev, replenish.nbuf_alloc_fail, 1);
266 	qdf_frag_free(rx_tlv_hdr);
267 	if (head_msdu)
268 		qdf_nbuf_list_free(*head_msdu);
269 	dp_err("[%s] failed to allocate subsequent parent buffer to hold all frag\n",
270 	       func_name);
271 	if (head_msdu)
272 		*head_msdu = NULL;
273 	if (last)
274 		*last = NULL;
275 	if (tail_msdu)
276 		*tail_msdu = NULL;
277 }
278 
279 /**
280  * dp_rx_mon_get_paddr_from_desc() - Get paddr from desc
281  */
282 static inline
283 qdf_dma_addr_t dp_rx_mon_get_paddr_from_desc(struct dp_rx_desc *rx_desc)
284 {
285 	return rx_desc->paddr_buf_start;
286 }
287 
288 /**
289  * DP_RX_MON_IS_BUFFER_ADDR_NULL() - Is Buffer received from hw is NULL
290  */
291 #define DP_RX_MON_IS_BUFFER_ADDR_NULL(rx_desc) \
292 	(!(rx_desc->rx_buf_start))
293 
294 #define DP_RX_MON_IS_MSDU_NOT_NULL(msdu) \
295 	true
296 
297 /**
298  * dp_rx_mon_buffer_free() - Free nbuf or frag memory
299  * Free nbuf if feature is disabled, else free frag.
300  *
301  * @rx_desc: Rx desc
302  */
303 static inline void
304 dp_rx_mon_buffer_free(struct dp_rx_desc *rx_desc)
305 {
306 	qdf_frag_free(rx_desc->rx_buf_start);
307 }
308 
309 /**
310  * dp_rx_mon_buffer_unmap() - Unmap nbuf or frag memory
311  * Unmap nbuf if feature is disabled, else unmap frag.
312  *
313  * @soc: struct dp_soc *
314  * @rx_desc: struct dp_rx_desc *
315  * @size: Size to be unmapped
316  */
317 static inline void
318 dp_rx_mon_buffer_unmap(struct dp_soc *soc, struct dp_rx_desc *rx_desc,
319 		       uint16_t size)
320 {
321 	qdf_mem_unmap_page(soc->osdev, rx_desc->paddr_buf_start,
322 			   size, QDF_DMA_FROM_DEVICE);
323 }
324 
325 /**
326  * dp_rx_mon_alloc_parent_buffer() - Allocate parent buffer to hold
327  * radiotap header and accommodate all frag memory in nr_frag.
328  *
329  * @head_msdu: Ptr to hold allocated Msdu
330  *
331  * Return: QDF_STATUS
332  */
333 static inline
334 QDF_STATUS dp_rx_mon_alloc_parent_buffer(qdf_nbuf_t *head_msdu)
335 {
336 	/*
337 	 * Headroom should accommodate radiotap header
338 	 * and protocol and flow tag for all frag
339 	 * Length reserved to accommodate Radiotap header
340 	 * is 128 bytes and length reserved for Protocol
341 	 * flow tag will vary based on QDF_NBUF_MAX_FRAGS.
342 	 */
343 	/*  -------------------------------------------------
344 	 * |       Protocol & Flow TAG      | Radiotap header|
345 	 * |                                |  Length(128 B) |
346 	 * |  ((4* QDF_NBUF_MAX_FRAGS) * 2) |                |
347 	 *  -------------------------------------------------
348 	 */
349 
350 	*head_msdu = qdf_nbuf_alloc_no_recycler(DP_RX_MON_MAX_MONITOR_HEADER,
351 						DP_RX_MON_MAX_MONITOR_HEADER, 4);
352 
353 	if (!(*head_msdu))
354 		return QDF_STATUS_E_FAILURE;
355 
356 	qdf_mem_zero(qdf_nbuf_head(*head_msdu), qdf_nbuf_headroom(*head_msdu));
357 
358 	/* Set *head_msdu->next as NULL as all msdus are
359 	 * mapped via nr frags
360 	 */
361 	qdf_nbuf_set_next(*head_msdu, NULL);
362 
363 	return QDF_STATUS_SUCCESS;
364 }
365 
366 /**
367  * dp_rx_mon_parse_desc_buffer() - Parse desc buffer based.
368  *
369  * Below code will parse desc buffer, handle continuation frame,
370  * adjust frag length and update l2_hdr_padding
371  *
372  * @soc                : struct dp_soc*
373  * @msdu_info          : struct hal_rx_msdu_desc_info*
374  * @is_frag_p          : is_frag *
375  * @total_frag_len_p   : Remaining frag len to be updated
376  * @frag_len_p         : frag len
377  * @l2_hdr_offset_p    : l2 hdr offset
378  * @rx_desc_tlv        : rx_desc_tlv
379  * @is_frag_non_raw_p  : Non raw frag
380  * @data               : NBUF Data
381  */
382 static inline void
383 dp_rx_mon_parse_desc_buffer(struct dp_soc *dp_soc,
384 			    struct hal_rx_msdu_desc_info *msdu_info,
385 			    bool *is_frag_p, uint32_t *total_frag_len_p,
386 			    uint32_t *frag_len_p, uint16_t *l2_hdr_offset_p,
387 			    qdf_frag_t rx_desc_tlv,
388 			    bool *is_frag_non_raw_p, void *data)
389 {
390 	struct hal_rx_mon_dest_buf_info frame_info;
391 	uint16_t tot_payload_len =
392 			RX_MONITOR_BUFFER_SIZE - dp_soc->rx_pkt_tlv_size;
393 
394 	if (msdu_info->msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) {
395 		/* First buffer of MSDU */
396 		if (!(*is_frag_p)) {
397 			/* Set total frag_len from msdu_len */
398 			*total_frag_len_p = msdu_info->msdu_len;
399 
400 			*is_frag_p = true;
401 			if (HAL_HW_RX_DECAP_FORMAT_RAW ==
402 			    hal_rx_tlv_decap_format_get(dp_soc->hal_soc,
403 							rx_desc_tlv)) {
404 				*l2_hdr_offset_p =
405 					DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
406 				frame_info.is_decap_raw = 1;
407 			} else {
408 				*l2_hdr_offset_p =
409 					DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
410 				frame_info.is_decap_raw = 0;
411 				*is_frag_non_raw_p = true;
412 			}
413 			dp_rx_mon_frag_adjust_frag_len(total_frag_len_p,
414 						       frag_len_p,
415 						       tot_payload_len -
416 						       *l2_hdr_offset_p);
417 
418 			frame_info.first_buffer = 1;
419 			frame_info.last_buffer = 0;
420 			hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc,
421 						    rx_desc_tlv,
422 						    (uint8_t *)&frame_info,
423 						    sizeof(frame_info));
424 		} else {
425 			/*
426 			 * Continuation Middle frame
427 			 * Here max limit will be same for Raw and Non raw case.
428 			 */
429 			*l2_hdr_offset_p = DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
430 			dp_rx_mon_frag_adjust_frag_len(total_frag_len_p,
431 						       frag_len_p,
432 						       tot_payload_len);
433 
434 			/* Update frame info if is non raw frame */
435 			if (*is_frag_non_raw_p)
436 				frame_info.is_decap_raw = 0;
437 			else
438 				frame_info.is_decap_raw = 1;
439 
440 			frame_info.first_buffer = 0;
441 			frame_info.last_buffer = 0;
442 			hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc,
443 						    rx_desc_tlv,
444 						    (uint8_t *)&frame_info,
445 						    sizeof(frame_info));
446 		}
447 	} else {
448 		/**
449 		 * Last buffer of MSDU spread among multiple buffer
450 		 * Here max limit will be same for Raw and Non raw case.
451 		 */
452 		if (*is_frag_p) {
453 			*l2_hdr_offset_p = DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
454 
455 			dp_rx_mon_frag_adjust_frag_len(total_frag_len_p,
456 						       frag_len_p,
457 						       tot_payload_len);
458 
459 			/* Update frame info if is non raw frame */
460 			if (*is_frag_non_raw_p)
461 				frame_info.is_decap_raw = 0;
462 			else
463 				frame_info.is_decap_raw = 1;
464 
465 			frame_info.first_buffer = 0;
466 			frame_info.last_buffer = 1;
467 			hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc,
468 						    rx_desc_tlv,
469 						    (uint8_t *)&frame_info,
470 						    sizeof(frame_info));
471 		} else {
472 			/* MSDU with single buffer */
473 			*frag_len_p = msdu_info->msdu_len;
474 			if (HAL_HW_RX_DECAP_FORMAT_RAW ==
475 			    hal_rx_tlv_decap_format_get(dp_soc->hal_soc,
476 							rx_desc_tlv)) {
477 				*l2_hdr_offset_p =
478 					DP_RX_MON_RAW_L2_HDR_PAD_BYTE;
479 				frame_info.is_decap_raw = 1;
480 			} else {
481 				*l2_hdr_offset_p =
482 					DP_RX_MON_NONRAW_L2_HDR_PAD_BYTE;
483 				frame_info.is_decap_raw = 0;
484 			}
485 
486 			frame_info.first_buffer = 1;
487 			frame_info.last_buffer = 1;
488 			hal_rx_priv_info_set_in_tlv(dp_soc->hal_soc,
489 						    rx_desc_tlv,
490 						    (uint8_t *)&frame_info,
491 						    sizeof(frame_info));
492 		}
493 		/* Reset bool after complete processing of MSDU */
494 		*is_frag_p = false;
495 		*is_frag_non_raw_p = false;
496 	}
497 }
498 
499 /**
500  * dp_rx_mon_buffer_set_pktlen() - set pktlen for buffer
501  */
502 static inline void dp_rx_mon_buffer_set_pktlen(qdf_nbuf_t msdu, uint32_t size)
503 {
504 }
505 
506 /**
507  * dp_rx_mon_add_msdu_to_list()- Add msdu to list and update head_msdu
508  *      It will add reaped buffer frag to nr frag of parent msdu.
509  * @soc: DP soc handle
510  * @head_msdu: NULL if first time called else &msdu
511  * @msdu: Msdu where frag address needs to be added via nr_frag
512  * @last: Used to traverse in list if this feature is disabled.
513  * @rx_desc_tlv: Frag address
514  * @frag_len: Frag len
515  * @l2_hdr_offset: l2 hdr padding
516  */
517 static inline
518 QDF_STATUS dp_rx_mon_add_msdu_to_list(struct dp_soc *soc, qdf_nbuf_t *head_msdu,
519 				      qdf_nbuf_t msdu, qdf_nbuf_t *last,
520 				      qdf_frag_t rx_desc_tlv, uint32_t frag_len,
521 				      uint32_t l2_hdr_offset)
522 {
523 	uint32_t num_frags;
524 	qdf_nbuf_t msdu_curr;
525 
526 	/* Here head_msdu and *head_msdu must not be NULL */
527 	/* Dont add frag to skb if frag length is zero. Drop frame */
528 	if (qdf_unlikely(!frag_len || !head_msdu || !(*head_msdu))) {
529 		dp_err("[%s] frag_len[%d] || head_msdu[%pK] || *head_msdu is Null while adding frag to skb\n",
530 		       __func__, frag_len, head_msdu);
531 		return QDF_STATUS_E_FAILURE;
532 	}
533 
534 	/* In case of first desc of MPDU, assign curr msdu to *head_msdu */
535 	if (!qdf_nbuf_get_nr_frags(*head_msdu))
536 		msdu_curr = *head_msdu;
537 	else
538 		msdu_curr = *last;
539 
540 	/* Current msdu must not be NULL */
541 	if (qdf_unlikely(!msdu_curr)) {
542 		dp_err("[%s] Current msdu can't be Null while adding frag to skb\n",
543 		       __func__);
544 		return QDF_STATUS_E_FAILURE;
545 	}
546 
547 	num_frags = qdf_nbuf_get_nr_frags(msdu_curr);
548 	if (num_frags < QDF_NBUF_MAX_FRAGS) {
549 		qdf_nbuf_add_rx_frag(rx_desc_tlv, msdu_curr,
550 				     soc->rx_mon_pkt_tlv_size,
551 				     frag_len + l2_hdr_offset,
552 				     RX_MONITOR_BUFFER_SIZE,
553 				     false);
554 		if (*last != msdu_curr)
555 			*last = msdu_curr;
556 		return QDF_STATUS_SUCCESS;
557 	}
558 
559 	/* Execution will reach here only if num_frags == QDF_NBUF_MAX_FRAGS */
560 	msdu_curr = NULL;
561 	if ((dp_rx_mon_alloc_parent_buffer(&msdu_curr))
562 	    != QDF_STATUS_SUCCESS)
563 		return QDF_STATUS_E_FAILURE;
564 
565 	qdf_nbuf_add_rx_frag(rx_desc_tlv, msdu_curr, soc->rx_mon_pkt_tlv_size,
566 			     frag_len + l2_hdr_offset, RX_MONITOR_BUFFER_SIZE,
567 			     false);
568 
569 	/* Add allocated nbuf in the chain */
570 	qdf_nbuf_set_next(*last, msdu_curr);
571 
572 	/* Assign current msdu to last to avoid traversal */
573 	*last = msdu_curr;
574 
575 	return QDF_STATUS_SUCCESS;
576 }
577 
578 /**
579  * dp_rx_mon_init_tail_msdu() - Initialize tail msdu
580  *
581  * @head_msdu: Parent buffer to hold MPDU data
582  * @msdu: Msdu to be updated in tail_msdu
583  * @last: last msdu
584  * @tail_msdu: Last msdu
585  */
586 static inline
587 void dp_rx_mon_init_tail_msdu(qdf_nbuf_t *head_msdu, qdf_nbuf_t msdu,
588 			      qdf_nbuf_t last, qdf_nbuf_t *tail_msdu)
589 {
590 	if (!head_msdu || !(*head_msdu)) {
591 		*tail_msdu = NULL;
592 		return;
593 	}
594 
595 	if (last)
596 		qdf_nbuf_set_next(last, NULL);
597 	*tail_msdu = last;
598 }
599 
600 /**
601  * dp_rx_mon_remove_raw_frame_fcs_len() - Remove FCS length for Raw Frame
602  *
603  * If feature is disabled, then removal happens in restitch logic.
604  *
605  * @soc: Datapath soc handle
606  * @head_msdu: Head msdu
607  * @tail_msdu: Tail msdu
608  */
609 static inline
610 void dp_rx_mon_remove_raw_frame_fcs_len(struct dp_soc *soc,
611 					qdf_nbuf_t *head_msdu,
612 					qdf_nbuf_t *tail_msdu)
613 {
614 	qdf_frag_t addr;
615 
616 	if (qdf_unlikely(!head_msdu || !tail_msdu || !(*head_msdu)))
617 		return;
618 
619 	/* If *head_msdu is valid, then *tail_msdu must be valid */
620 	/* If head_msdu is valid, then it must have nr_frags */
621 	/* If tail_msdu is valid, then it must have nr_frags */
622 
623 	/* Strip FCS_LEN for Raw frame */
624 	addr = qdf_nbuf_get_frag_addr(*head_msdu, 0);
625 	addr -= soc->rx_mon_pkt_tlv_size;
626 	if (hal_rx_tlv_decap_format_get(soc->hal_soc, addr) ==
627 		HAL_HW_RX_DECAP_FORMAT_RAW) {
628 		qdf_nbuf_trim_add_frag_size(*tail_msdu,
629 			qdf_nbuf_get_nr_frags(*tail_msdu) - 1,
630 					-HAL_RX_FCS_LEN, 0);
631 	}
632 }
633 
634 /**
635  * dp_rx_mon_get_buffer_data()- Get data from desc buffer
636  * @rx_desc: desc
637  *
638  * Return address containing actual tlv content
639  */
640 static inline
641 uint8_t *dp_rx_mon_get_buffer_data(struct dp_rx_desc *rx_desc)
642 {
643 	return rx_desc->rx_buf_start;
644 }
645 
646 #else
647 
648 #define DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc) \
649 	(rx_desc->nbuf)
650 
651 static inline void
652 dp_rx_mon_add_msdu_to_list_failure_handler(void *rx_tlv_hdr,
653 					   struct dp_pdev *pdev,
654 					   qdf_nbuf_t *last,
655 					   qdf_nbuf_t *head_msdu,
656 					   qdf_nbuf_t *tail_msdu,
657 					   const char *func_name)
658 {
659 }
660 
661 static inline
662 qdf_dma_addr_t dp_rx_mon_get_paddr_from_desc(struct dp_rx_desc *rx_desc)
663 {
664 	qdf_dma_addr_t paddr = 0;
665 	qdf_nbuf_t msdu = NULL;
666 
667 	msdu = rx_desc->nbuf;
668 	if (msdu)
669 		paddr = qdf_nbuf_get_frag_paddr(msdu, 0);
670 
671 	return paddr;
672 }
673 
674 #define DP_RX_MON_IS_BUFFER_ADDR_NULL(rx_desc) \
675 	(!(rx_desc->nbuf))
676 
677 #define DP_RX_MON_IS_MSDU_NOT_NULL(msdu) \
678 	(msdu)
679 
680 static inline void
681 dp_rx_mon_buffer_free(struct dp_rx_desc *rx_desc)
682 {
683 	qdf_nbuf_free(rx_desc->nbuf);
684 }
685 
686 static inline void
687 dp_rx_mon_buffer_unmap(struct dp_soc *soc, struct dp_rx_desc *rx_desc,
688 		       uint16_t size)
689 {
690 	qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
691 				     QDF_DMA_FROM_DEVICE, size);
692 }
693 
694 static inline
695 QDF_STATUS dp_rx_mon_alloc_parent_buffer(qdf_nbuf_t *head_msdu)
696 {
697 	return QDF_STATUS_SUCCESS;
698 }
699 
700 static inline void
701 dp_rx_mon_parse_desc_buffer(struct dp_soc *dp_soc,
702 			    struct hal_rx_msdu_desc_info *msdu_info,
703 			    bool *is_frag_p, uint32_t *total_frag_len_p,
704 			    uint32_t *frag_len_p, uint16_t *l2_hdr_offset_p,
705 			    qdf_frag_t rx_desc_tlv,
706 			    bool *is_frag_non_raw_p, void *data)
707 {
708 	/*
709 	 * HW structures call this L3 header padding
710 	 * -- even though this is actually the offset
711 	 * from the buffer beginning where the L2
712 	 * header begins.
713 	 */
714 	*l2_hdr_offset_p =
715 	hal_rx_msdu_end_l3_hdr_padding_get(dp_soc->hal_soc, data);
716 
717 	if (msdu_info->msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION) {
718 		if (!*(is_frag_p)) {
719 			*total_frag_len_p = msdu_info->msdu_len;
720 			*is_frag_p = true;
721 		}
722 		dp_mon_adjust_frag_len(dp_soc, total_frag_len_p, frag_len_p,
723 				       *l2_hdr_offset_p);
724 	} else {
725 		if (*is_frag_p) {
726 			dp_mon_adjust_frag_len(dp_soc, total_frag_len_p,
727 					       frag_len_p,
728 					       *l2_hdr_offset_p);
729 		} else {
730 			*frag_len_p = msdu_info->msdu_len;
731 		}
732 		*is_frag_p = false;
733 	}
734 }
735 
736 static inline void dp_rx_mon_buffer_set_pktlen(qdf_nbuf_t msdu, uint32_t size)
737 {
738 	qdf_nbuf_set_pktlen(msdu, size);
739 }
740 
741 static inline
742 QDF_STATUS dp_rx_mon_add_msdu_to_list(struct dp_soc *soc, qdf_nbuf_t *head_msdu,
743 				      qdf_nbuf_t msdu, qdf_nbuf_t *last,
744 				      qdf_frag_t rx_desc_tlv, uint32_t frag_len,
745 				      uint32_t l2_hdr_offset)
746 {
747 	if (head_msdu && !*head_msdu) {
748 		*head_msdu = msdu;
749 	} else {
750 		if (*last)
751 			qdf_nbuf_set_next(*last, msdu);
752 	}
753 	*last = msdu;
754 	return QDF_STATUS_SUCCESS;
755 }
756 
757 static inline
758 void dp_rx_mon_init_tail_msdu(qdf_nbuf_t *head_msdu, qdf_nbuf_t msdu,
759 			      qdf_nbuf_t last, qdf_nbuf_t *tail_msdu)
760 {
761 	if (last)
762 		qdf_nbuf_set_next(last, NULL);
763 
764 	*tail_msdu = msdu;
765 }
766 
767 static inline
768 void dp_rx_mon_remove_raw_frame_fcs_len(struct dp_soc *soc,
769 					qdf_nbuf_t *head_msdu,
770 					qdf_nbuf_t *tail_msdu)
771 {
772 }
773 
774 static inline
775 uint8_t *dp_rx_mon_get_buffer_data(struct dp_rx_desc *rx_desc)
776 {
777 	qdf_nbuf_t msdu = NULL;
778 	uint8_t *data = NULL;
779 
780 	msdu = rx_desc->nbuf;
781 	if (qdf_likely(msdu))
782 		data = qdf_nbuf_data(msdu);
783 	return data;
784 }
785 
786 #endif
787 
788 /**
789  * dp_rx_cookie_2_mon_link_desc() - Retrieve Link descriptor based on target
790  * @pdev: core physical device context
791  * @hal_buf_info: structure holding the buffer info
792  * mac_id: mac number
793  *
794  * Return: link descriptor address
795  */
796 static inline
797 void *dp_rx_cookie_2_mon_link_desc(struct dp_pdev *pdev,
798 				   struct hal_buf_info buf_info,
799 				   uint8_t mac_id)
800 {
801 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable)
802 		return dp_rx_cookie_2_mon_link_desc_va(pdev, &buf_info,
803 						       mac_id);
804 
805 	return dp_rx_cookie_2_link_desc_va(pdev->soc, &buf_info);
806 }
807 
808 /**
809  * dp_rx_monitor_link_desc_return() - Return Link descriptor based on target
810  * @pdev: core physical device context
811  * @p_last_buf_addr_info: MPDU Link descriptor
812  * mac_id: mac number
813  *
814  * Return: QDF_STATUS
815  */
816 static inline
817 QDF_STATUS dp_rx_monitor_link_desc_return(struct dp_pdev *pdev,
818 					  hal_buff_addrinfo_t
819 					  p_last_buf_addr_info,
820 					  uint8_t mac_id, uint8_t bm_action)
821 {
822 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable)
823 		return dp_rx_mon_link_desc_return(pdev, p_last_buf_addr_info,
824 						  mac_id);
825 
826 	return dp_rx_link_desc_return_by_addr(pdev->soc, p_last_buf_addr_info,
827 				      bm_action);
828 }
829 
830 static inline bool dp_is_rxdma_dst_ring_common(struct dp_pdev *pdev)
831 {
832 	struct dp_soc *soc = pdev->soc;
833 
834 	return (soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev == 1);
835 }
836 
837 /**
838  * dp_rxdma_get_mon_dst_ring() - Return the pointer to rxdma_err_dst_ring
839  *					or mon_dst_ring based on the target
840  * @pdev: core physical device context
841  * @mac_for_pdev: mac_id number
842  *
843  * Return: ring address
844  */
845 static inline
846 void *dp_rxdma_get_mon_dst_ring(struct dp_pdev *pdev,
847 				uint8_t mac_for_pdev)
848 {
849 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable)
850 		return pdev->soc->rxdma_mon_dst_ring[mac_for_pdev].hal_srng;
851 
852 	/* For targets with 1 RXDMA DST ring for both mac */
853 	if (dp_is_rxdma_dst_ring_common(pdev))
854 		return pdev->soc->rxdma_err_dst_ring[0].hal_srng;
855 
856 	return pdev->soc->rxdma_err_dst_ring[mac_for_pdev].hal_srng;
857 }
858 
859 /**
860  * dp_rxdma_get_mon_buf_ring() - Return monitor buf ring address
861  *				    based on target
862  * @pdev: core physical device context
863  * @mac_for_pdev: mac id number
864  *
865  * Return: ring address
866  */
867 static inline
868 struct dp_srng *dp_rxdma_get_mon_buf_ring(struct dp_pdev *pdev,
869 					  uint8_t mac_for_pdev)
870 {
871 	if (pdev->soc->wlan_cfg_ctx->rxdma1_enable)
872 		return &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
873 
874 	/* For MCL there is only 1 rx refill ring */
875 	return &pdev->soc->rx_refill_buf_ring[0];
876 }
877 
878 /**
879  * dp_rx_get_mon_desc() - Return Rx descriptor based on target
880  * @soc: soc handle
881  * @cookie: cookie value
882  *
883  * Return: Rx descriptor
884  */
885 static inline
886 struct dp_rx_desc *dp_rx_get_mon_desc(struct dp_soc *soc,
887 				      uint32_t cookie)
888 {
889 	if (soc->wlan_cfg_ctx->rxdma1_enable)
890 		return dp_rx_cookie_2_va_mon_buf(soc, cookie);
891 
892 	return soc->arch_ops.dp_rx_desc_cookie_2_va(soc, cookie);
893 }
894 
895 #ifdef QCA_MONITOR_PKT_SUPPORT
896 /*
897  * dp_mon_htt_dest_srng_setup(): monitor dest srng setup
898  * @soc: DP SOC handle
899  * @pdev: DP PDEV handle
900  * @mac_id: MAC ID
901  * @mac_for_pdev: PDEV mac
902  *
903  * Return: status: QDF_STATUS_SUCCESS - Success, non-zero: Failure
904  */
905 QDF_STATUS dp_mon_htt_dest_srng_setup(struct dp_soc *soc,
906 				      struct dp_pdev *pdev,
907 				      int mac_id,
908 				      int mac_for_pdev);
909 
910 /*
911  * dp_mon_dest_rings_deinit(): deinit monitor dest rings
912  * @pdev: DP PDEV handle
913  * @lmac_id: MAC ID
914  *
915  * Return: status: None
916  */
917 void dp_mon_dest_rings_deinit(struct dp_pdev *pdev, int lmac_id);
918 
919 /*
920  * dp_mon_dest_rings_free(): free monitor dest rings
921  * @pdev: DP PDEV handle
922  * @lmac_id: MAC ID
923  *
924  * Return: status: None
925  */
926 void dp_mon_dest_rings_free(struct dp_pdev *pdev, int lmac_id);
927 
928 /*
929  * dp_mon_dest_rings_init(): init monitor dest rings
930  * @pdev: DP PDEV handle
931  * @lmac_id: MAC ID
932  *
933  * Return: status: QDF_STATUS_SUCCESS - Success, non-zero: Failure
934  */
935 QDF_STATUS dp_mon_dest_rings_init(struct dp_pdev *pdev, int lmac_id);
936 
937 /*
938  * dp_mon_dest_rings_allocate(): allocate monitor dest rings
939  * @pdev: DP PDEV handle
940  * @lmac_id: MAC ID
941  *
942  * Return: status: QDF_STATUS_SUCCESS - Success, non-zero: Failure
943  */
944 QDF_STATUS dp_mon_dest_rings_alloc(struct dp_pdev *pdev, int lmac_id);
945 
946 #else
947 QDF_STATUS dp_mon_htt_dest_srng_setup(struct dp_soc *soc,
948 				      struct dp_pdev *pdev,
949 				      int mac_id,
950 				      int mac_for_pdev)
951 {
952 	return QDF_STATUS_SUCCESS;
953 }
954 
955 static void dp_mon_dest_rings_deinit(struct dp_pdev *pdev, int lmac_id)
956 {
957 }
958 
959 static void dp_mon_dest_rings_free(struct dp_pdev *pdev, int lmac_id)
960 {
961 }
962 
963 static
964 QDF_STATUS dp_mon_dest_rings_init(struct dp_pdev *pdev, int lmac_id)
965 {
966 	return QDF_STATUS_SUCCESS;
967 }
968 
969 static
970 QDF_STATUS dp_mon_dest_rings_alloc(struct dp_pdev *pdev, int lmac_id)
971 {
972 	return QDF_STATUS_SUCCESS;
973 }
974 #endif /* QCA_MONITOR_PKT_SUPPORT */
975 
976 #endif /* _DP_RX_MON_1_0_H_ */
977