xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h (revision eb134979c1cacbd1eb12caa116020b86fad96e1c)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_RX_H
20 #define _DP_RX_H
21 
22 #include "hal_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 
27 #ifdef RXDMA_OPTIMIZATION
28 #ifndef RX_DATA_BUFFER_ALIGNMENT
29 #define RX_DATA_BUFFER_ALIGNMENT        128
30 #endif
31 #ifndef RX_MONITOR_BUFFER_ALIGNMENT
32 #define RX_MONITOR_BUFFER_ALIGNMENT     128
33 #endif
34 #else /* RXDMA_OPTIMIZATION */
35 #define RX_DATA_BUFFER_ALIGNMENT        4
36 #define RX_MONITOR_BUFFER_ALIGNMENT     4
37 #endif /* RXDMA_OPTIMIZATION */
38 
39 #ifdef QCA_HOST2FW_RXBUF_RING
40 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW1_BM
41 /* RBM value used for re-injecting defragmented packets into REO */
42 #define DP_DEFRAG_RBM HAL_RX_BUF_RBM_SW3_BM
43 #else
44 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW3_BM
45 #define DP_DEFRAG_RBM DP_WBM2SW_RBM
46 #endif /* QCA_HOST2FW_RXBUF_RING */
47 
48 #define RX_BUFFER_RESERVATION   0
49 
50 #define DP_PEER_METADATA_PEER_ID_MASK	0x0000ffff
51 #define DP_PEER_METADATA_PEER_ID_SHIFT	0
52 #define DP_PEER_METADATA_VDEV_ID_MASK	0x003f0000
53 #define DP_PEER_METADATA_VDEV_ID_SHIFT	16
54 
55 #define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata)		\
56 	(((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK)	\
57 			>> DP_PEER_METADATA_PEER_ID_SHIFT)
58 
59 #define DP_PEER_METADATA_VDEV_ID_GET(_peer_metadata)		\
60 	(((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK)	\
61 			>> DP_PEER_METADATA_VDEV_ID_SHIFT)
62 
63 #define DP_RX_DESC_MAGIC 0xdec0de
64 
65 /**
66  * enum dp_rx_desc_state
67  *
68  * @RX_DESC_REPLENISH: rx desc replenished
69  * @RX_DESC_FREELIST: rx desc in freelist
70  */
71 enum dp_rx_desc_state {
72 	RX_DESC_REPLENISHED,
73 	RX_DESC_IN_FREELIST,
74 };
75 
76 /**
77  * struct dp_rx_desc_dbg_info
78  *
79  * @freelist_caller: name of the function that put the
80  *  the rx desc in freelist
81  * @freelist_ts: timestamp when the rx desc is put in
82  *  a freelist
83  * @replenish_caller: name of the function that last
84  *  replenished the rx desc
85  * @replenish_ts: last replenish timestamp
86  */
87 struct dp_rx_desc_dbg_info {
88 	char freelist_caller[QDF_MEM_FUNC_NAME_SIZE];
89 	uint64_t freelist_ts;
90 	char replenish_caller[QDF_MEM_FUNC_NAME_SIZE];
91 	uint64_t replenish_ts;
92 };
93 
94 /**
95  * struct dp_rx_desc
96  *
97  * @nbuf		: VA of the "skb" posted
98  * @rx_buf_start	: VA of the original Rx buffer, before
99  *			  movement of any skb->data pointer
100  * @paddr_buf_start     : PA of the original Rx buffer, before
101  *                        movement of any frag pointer
102  * @cookie		: index into the sw array which holds
103  *			  the sw Rx descriptors
104  *			  Cookie space is 21 bits:
105  *			  lower 18 bits -- index
106  *			  upper  3 bits -- pool_id
107  * @pool_id		: pool Id for which this allocated.
108  *			  Can only be used if there is no flow
109  *			  steering
110  * @in_use		  rx_desc is in use
111  * @unmapped		  used to mark rx_desc an unmapped if the corresponding
112  *			  nbuf is already unmapped
113  * @in_err_state	: Nbuf sanity failed for this descriptor.
114  */
115 struct dp_rx_desc {
116 	qdf_nbuf_t nbuf;
117 	uint8_t *rx_buf_start;
118 	qdf_dma_addr_t paddr_buf_start;
119 	uint32_t cookie;
120 	uint8_t	 pool_id;
121 #ifdef RX_DESC_DEBUG_CHECK
122 	uint32_t magic;
123 	struct dp_rx_desc_dbg_info *dbg_info;
124 #endif
125 	uint8_t	in_use:1,
126 	unmapped:1,
127 	in_err_state:1;
128 };
129 
130 /* RX Descriptor Multi Page memory alloc related */
131 #define DP_RX_DESC_OFFSET_NUM_BITS 8
132 #define DP_RX_DESC_PAGE_ID_NUM_BITS 8
133 #define DP_RX_DESC_POOL_ID_NUM_BITS 4
134 
135 #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS
136 #define DP_RX_DESC_POOL_ID_SHIFT \
137 		(DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS)
138 #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \
139 	(((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT)
140 #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK	\
141 			(((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \
142 			 DP_RX_DESC_PAGE_ID_SHIFT)
143 #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \
144 			((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1)
145 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie)		\
146 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >>	\
147 			DP_RX_DESC_POOL_ID_SHIFT)
148 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie)		\
149 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >>	\
150 			DP_RX_DESC_PAGE_ID_SHIFT)
151 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie)		\
152 	((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK)
153 
154 #define RX_DESC_COOKIE_INDEX_SHIFT		0
155 #define RX_DESC_COOKIE_INDEX_MASK		0x3ffff /* 18 bits */
156 #define RX_DESC_COOKIE_POOL_ID_SHIFT		18
157 #define RX_DESC_COOKIE_POOL_ID_MASK		0x1c0000
158 
159 #define DP_RX_DESC_COOKIE_MAX	\
160 	(RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK)
161 
162 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie)		\
163 	(((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >>	\
164 			RX_DESC_COOKIE_POOL_ID_SHIFT)
165 
166 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie)		\
167 	(((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >>	\
168 			RX_DESC_COOKIE_INDEX_SHIFT)
169 
170 #define dp_rx_add_to_free_desc_list(head, tail, new) \
171 	__dp_rx_add_to_free_desc_list(head, tail, new, __func__)
172 
173 #define dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
174 				num_buffers, desc_list, tail) \
175 	__dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
176 				  num_buffers, desc_list, tail, __func__)
177 
178 #ifdef DP_RX_SPECIAL_FRAME_NEED
179 /**
180  * dp_rx_is_special_frame() - check is RX frame special needed
181  *
182  * @nbuf: RX skb pointer
183  * @frame_mask: the mask for speical frame needed
184  *
185  * Check is RX frame wanted matched with mask
186  *
187  * Return: true - special frame needed, false - no
188  */
189 static inline
190 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
191 {
192 	if (((frame_mask & FRAME_MASK_IPV4_ARP) &&
193 	     qdf_nbuf_is_ipv4_arp_pkt(nbuf)) ||
194 	    ((frame_mask & FRAME_MASK_IPV4_DHCP) &&
195 	     qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) ||
196 	    ((frame_mask & FRAME_MASK_IPV4_EAPOL) &&
197 	     qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) ||
198 	    ((frame_mask & FRAME_MASK_IPV6_DHCP) &&
199 	     qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))
200 		return true;
201 
202 	return false;
203 }
204 
205 /**
206  * dp_rx_deliver_special_frame() - Deliver the RX special frame to stack
207  *				   if matches mask
208  *
209  * @soc: Datapath soc handler
210  * @peer: pointer to DP peer
211  * @nbuf: pointer to the skb of RX frame
212  * @frame_mask: the mask for speical frame needed
213  * @rx_tlv_hdr: start of rx tlv header
214  *
215  * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
216  * single nbuf is expected.
217  *
218  * return: true - nbuf has been delivered to stack, false - not.
219  */
220 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
221 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
222 				 uint8_t *rx_tlv_hdr);
223 #else
224 static inline
225 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
226 {
227 	return false;
228 }
229 
230 static inline
231 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
232 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
233 				 uint8_t *rx_tlv_hdr)
234 {
235 	return false;
236 }
237 #endif
238 
239 /* DOC: Offset to obtain LLC hdr
240  *
241  * In the case of Wifi parse error
242  * to reach LLC header from beginning
243  * of VLAN tag we need to skip 8 bytes.
244  * Vlan_tag(4)+length(2)+length added
245  * by HW(2) = 8 bytes.
246  */
247 #define DP_SKIP_VLAN		8
248 
249 /**
250  * struct dp_rx_cached_buf - rx cached buffer
251  * @list: linked list node
252  * @buf: skb buffer
253  */
254 struct dp_rx_cached_buf {
255 	qdf_list_node_t node;
256 	qdf_nbuf_t buf;
257 };
258 
259 /*
260  *dp_rx_xor_block() - xor block of data
261  *@b: destination data block
262  *@a: source data block
263  *@len: length of the data to process
264  *
265  *Returns: None
266  */
267 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
268 {
269 	qdf_size_t i;
270 
271 	for (i = 0; i < len; i++)
272 		b[i] ^= a[i];
273 }
274 
275 /*
276  *dp_rx_rotl() - rotate the bits left
277  *@val: unsigned integer input value
278  *@bits: number of bits
279  *
280  *Returns: Integer with left rotated by number of 'bits'
281  */
282 static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
283 {
284 	return (val << bits) | (val >> (32 - bits));
285 }
286 
287 /*
288  *dp_rx_rotr() - rotate the bits right
289  *@val: unsigned integer input value
290  *@bits: number of bits
291  *
292  *Returns: Integer with right rotated by number of 'bits'
293  */
294 static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
295 {
296 	return (val >> bits) | (val << (32 - bits));
297 }
298 
299 /*
300  * dp_set_rx_queue() - set queue_mapping in skb
301  * @nbuf: skb
302  * @queue_id: rx queue_id
303  *
304  * Return: void
305  */
306 #ifdef QCA_OL_RX_MULTIQ_SUPPORT
307 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
308 {
309 	qdf_nbuf_record_rx_queue(nbuf, queue_id);
310 	return;
311 }
312 #else
313 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
314 {
315 }
316 #endif
317 
318 /*
319  *dp_rx_xswap() - swap the bits left
320  *@val: unsigned integer input value
321  *
322  *Returns: Integer with bits swapped
323  */
324 static inline uint32_t dp_rx_xswap(uint32_t val)
325 {
326 	return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
327 }
328 
329 /*
330  *dp_rx_get_le32_split() - get little endian 32 bits split
331  *@b0: byte 0
332  *@b1: byte 1
333  *@b2: byte 2
334  *@b3: byte 3
335  *
336  *Returns: Integer with split little endian 32 bits
337  */
338 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
339 					uint8_t b3)
340 {
341 	return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
342 }
343 
344 /*
345  *dp_rx_get_le32() - get little endian 32 bits
346  *@b0: byte 0
347  *@b1: byte 1
348  *@b2: byte 2
349  *@b3: byte 3
350  *
351  *Returns: Integer with little endian 32 bits
352  */
353 static inline uint32_t dp_rx_get_le32(const uint8_t *p)
354 {
355 	return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
356 }
357 
358 /*
359  * dp_rx_put_le32() - put little endian 32 bits
360  * @p: destination char array
361  * @v: source 32-bit integer
362  *
363  * Returns: None
364  */
365 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
366 {
367 	p[0] = (v) & 0xff;
368 	p[1] = (v >> 8) & 0xff;
369 	p[2] = (v >> 16) & 0xff;
370 	p[3] = (v >> 24) & 0xff;
371 }
372 
373 /* Extract michal mic block of data */
374 #define dp_rx_michael_block(l, r)	\
375 	do {					\
376 		r ^= dp_rx_rotl(l, 17);	\
377 		l += r;				\
378 		r ^= dp_rx_xswap(l);		\
379 		l += r;				\
380 		r ^= dp_rx_rotl(l, 3);	\
381 		l += r;				\
382 		r ^= dp_rx_rotr(l, 2);	\
383 		l += r;				\
384 	} while (0)
385 
386 /**
387  * struct dp_rx_desc_list_elem_t
388  *
389  * @next		: Next pointer to form free list
390  * @rx_desc		: DP Rx descriptor
391  */
392 union dp_rx_desc_list_elem_t {
393 	union dp_rx_desc_list_elem_t *next;
394 	struct dp_rx_desc rx_desc;
395 };
396 
397 #ifdef RX_DESC_MULTI_PAGE_ALLOC
398 /**
399  * dp_rx_desc_find() - find dp rx descriptor from page ID and offset
400  * @page_id: Page ID
401  * @offset: Offset of the descriptor element
402  *
403  * Return: RX descriptor element
404  */
405 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
406 					      struct rx_desc_pool *rx_pool);
407 
408 static inline
409 struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc,
410 					      struct rx_desc_pool *pool,
411 					      uint32_t cookie)
412 {
413 	uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
414 	uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
415 	uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
416 	struct rx_desc_pool *rx_desc_pool;
417 	union dp_rx_desc_list_elem_t *rx_desc_elem;
418 
419 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
420 		return NULL;
421 
422 	rx_desc_pool = &pool[pool_id];
423 	rx_desc_elem = (union dp_rx_desc_list_elem_t *)
424 		(rx_desc_pool->desc_pages.cacheable_pages[page_id] +
425 		rx_desc_pool->elem_size * offset);
426 
427 	return &rx_desc_elem->rx_desc;
428 }
429 
430 /**
431  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
432  *			 the Rx descriptor on Rx DMA source ring buffer
433  * @soc: core txrx main context
434  * @cookie: cookie used to lookup virtual address
435  *
436  * Return: Pointer to the Rx descriptor
437  */
438 static inline
439 struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc,
440 					       uint32_t cookie)
441 {
442 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie);
443 }
444 
445 /**
446  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
447  *			 the Rx descriptor on monitor ring buffer
448  * @soc: core txrx main context
449  * @cookie: cookie used to lookup virtual address
450  *
451  * Return: Pointer to the Rx descriptor
452  */
453 static inline
454 struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc,
455 					     uint32_t cookie)
456 {
457 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie);
458 }
459 
460 /**
461  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
462  *			 the Rx descriptor on monitor status ring buffer
463  * @soc: core txrx main context
464  * @cookie: cookie used to lookup virtual address
465  *
466  * Return: Pointer to the Rx descriptor
467  */
468 static inline
469 struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc,
470 						uint32_t cookie)
471 {
472 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_status[0], cookie);
473 }
474 #else
475 
476 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
477 			  uint32_t pool_size,
478 			  struct rx_desc_pool *rx_desc_pool);
479 
480 /**
481  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
482  *			 the Rx descriptor on Rx DMA source ring buffer
483  * @soc: core txrx main context
484  * @cookie: cookie used to lookup virtual address
485  *
486  * Return: void *: Virtual Address of the Rx descriptor
487  */
488 static inline
489 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
490 {
491 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
492 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
493 	struct rx_desc_pool *rx_desc_pool;
494 
495 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
496 		return NULL;
497 
498 	rx_desc_pool = &soc->rx_desc_buf[pool_id];
499 
500 	if (qdf_unlikely(index >= rx_desc_pool->pool_size))
501 		return NULL;
502 
503 	return &(soc->rx_desc_buf[pool_id].array[index].rx_desc);
504 }
505 
506 /**
507  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
508  *			 the Rx descriptor on monitor ring buffer
509  * @soc: core txrx main context
510  * @cookie: cookie used to lookup virtual address
511  *
512  * Return: void *: Virtual Address of the Rx descriptor
513  */
514 static inline
515 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
516 {
517 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
518 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
519 	/* TODO */
520 	/* Add sanity for pool_id & index */
521 	return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
522 }
523 
524 /**
525  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
526  *			 the Rx descriptor on monitor status ring buffer
527  * @soc: core txrx main context
528  * @cookie: cookie used to lookup virtual address
529  *
530  * Return: void *: Virtual Address of the Rx descriptor
531  */
532 static inline
533 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
534 {
535 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
536 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
537 	/* TODO */
538 	/* Add sanity for pool_id & index */
539 	return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
540 }
541 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
542 
543 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
544 static inline QDF_STATUS
545 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
546 {
547 	if (qdf_unlikely(HAL_RX_REO_BUF_COOKIE_INVALID_GET(ring_desc)))
548 		return QDF_STATUS_E_FAILURE;
549 
550 	HAL_RX_REO_BUF_COOKIE_INVALID_SET(ring_desc);
551 	return QDF_STATUS_SUCCESS;
552 }
553 #else
554 static inline QDF_STATUS
555 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
556 {
557 	return QDF_STATUS_SUCCESS;
558 }
559 #endif
560 
561 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool);
562 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
563 				 uint32_t pool_size,
564 				 struct rx_desc_pool *rx_desc_pool);
565 
566 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
567 			  uint32_t pool_size,
568 			  struct rx_desc_pool *rx_desc_pool);
569 void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id);
570 
571 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
572 				union dp_rx_desc_list_elem_t **local_desc_list,
573 				union dp_rx_desc_list_elem_t **tail,
574 				uint16_t pool_id,
575 				struct rx_desc_pool *rx_desc_pool);
576 
577 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
578 				struct rx_desc_pool *rx_desc_pool,
579 				uint16_t num_descs,
580 				union dp_rx_desc_list_elem_t **desc_list,
581 				union dp_rx_desc_list_elem_t **tail);
582 
583 
584 QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev);
585 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev);
586 
587 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev);
588 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev);
589 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
590 			    struct rx_desc_pool *rx_desc_pool);
591 
592 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
593 QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev);
594 void dp_rx_pdev_buffers_free(struct dp_pdev *pdev);
595 
596 void dp_rx_pdev_detach(struct dp_pdev *pdev);
597 
598 void dp_print_napi_stats(struct dp_soc *soc);
599 
600 /**
601  * dp_rx_vdev_detach() - detach vdev from dp rx
602  * @vdev: virtual device instance
603  *
604  * Return: QDF_STATUS_SUCCESS: success
605  *         QDF_STATUS_E_RESOURCES: Error return
606  */
607 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev);
608 
609 uint32_t
610 dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
611 	      uint8_t reo_ring_num,
612 	      uint32_t quota);
613 
614 /**
615  * dp_rx_err_process() - Processes error frames routed to REO error ring
616  * @int_ctx: pointer to DP interrupt context
617  * @soc: core txrx main context
618  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
619  * @quota: No. of units (packets) that can be serviced in one shot.
620  *
621  * This function implements error processing and top level demultiplexer
622  * for all the frames routed to REO error ring.
623  *
624  * Return: uint32_t: No. of elements processed
625  */
626 uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
627 			   hal_ring_handle_t hal_ring_hdl, uint32_t quota);
628 
629 /**
630  * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
631  * @int_ctx: pointer to DP interrupt context
632  * @soc: core txrx main context
633  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
634  * @quota: No. of units (packets) that can be serviced in one shot.
635  *
636  * This function implements error processing and top level demultiplexer
637  * for all the frames routed to WBM2HOST sw release ring.
638  *
639  * Return: uint32_t: No. of elements processed
640  */
641 uint32_t
642 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
643 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota);
644 
645 /**
646  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
647  *		     multiple nbufs.
648  * @nbuf: pointer to the first msdu of an amsdu.
649  *
650  * This function implements the creation of RX frag_list for cases
651  * where an MSDU is spread across multiple nbufs.
652  *
653  * Return: returns the head nbuf which contains complete frag_list.
654  */
655 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf);
656 
657 
658 /*
659  * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during
660  *				     de-initialization of wifi module.
661  *
662  * @soc: core txrx main context
663  * @pool_id: pool_id which is one of 3 mac_ids
664  * @rx_desc_pool: rx descriptor pool pointer
665  *
666  * Return: None
667  */
668 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
669 				   struct rx_desc_pool *rx_desc_pool);
670 
671 /*
672  * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during
673  *			    de-initialization of wifi module.
674  *
675  * @soc: core txrx main context
676  * @pool_id: pool_id which is one of 3 mac_ids
677  * @rx_desc_pool: rx descriptor pool pointer
678  *
679  * Return: None
680  */
681 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
682 			  struct rx_desc_pool *rx_desc_pool);
683 
684 #ifdef DP_RX_MON_MEM_FRAG
685 /*
686  * dp_rx_desc_frag_free() - free the sw rx desc frag called during
687  *			    de-initialization of wifi module.
688  *
689  * @soc: core txrx main context
690  * @rx_desc_pool: rx descriptor pool pointer
691  *
692  * Return: None
693  */
694 void dp_rx_desc_frag_free(struct dp_soc *soc,
695 			  struct rx_desc_pool *rx_desc_pool);
696 #else
697 static inline
698 void dp_rx_desc_frag_free(struct dp_soc *soc,
699 			  struct rx_desc_pool *rx_desc_pool)
700 {
701 }
702 #endif
703 /*
704  * dp_rx_desc_pool_free() - free the sw rx desc array called during
705  *			    de-initialization of wifi module.
706  *
707  * @soc: core txrx main context
708  * @rx_desc_pool: rx descriptor pool pointer
709  *
710  * Return: None
711  */
712 void dp_rx_desc_pool_free(struct dp_soc *soc,
713 			  struct rx_desc_pool *rx_desc_pool);
714 
715 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
716 				struct dp_peer *peer);
717 
718 #ifdef RX_DESC_DEBUG_CHECK
719 /**
720  * dp_rx_desc_paddr_sanity_check() - paddr sanity for ring desc vs rx_desc
721  * @rx_desc: rx descriptor
722  * @ring_paddr: paddr obatined from the ring
723  *
724  * Returns: QDF_STATUS
725  */
726 static inline
727 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
728 				   uint64_t ring_paddr)
729 {
730 	return (ring_paddr == qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0));
731 }
732 
733 /*
734  * dp_rx_desc_alloc_dbg_info() - Alloc memory for rx descriptor debug
735  *  structure
736  * @rx_desc: rx descriptor pointer
737  *
738  * Return: None
739  */
740 static inline
741 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
742 {
743 	rx_desc->dbg_info = qdf_mem_malloc(sizeof(struct dp_rx_desc_dbg_info));
744 }
745 
746 /*
747  * dp_rx_desc_free_dbg_info() - Free rx descriptor debug
748  *  structure memory
749  * @rx_desc: rx descriptor pointer
750  *
751  * Return: None
752  */
753 static inline
754 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
755 {
756 	qdf_mem_free(rx_desc->dbg_info);
757 }
758 
759 /*
760  * dp_rx_desc_update_dbg_info() - Update rx descriptor debug info
761  *  structure memory
762  * @rx_desc: rx descriptor pointer
763  *
764  * Return: None
765  */
766 static
767 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
768 				const char *func_name, uint8_t flag)
769 {
770 	struct dp_rx_desc_dbg_info *info = rx_desc->dbg_info;
771 
772 	if (!info)
773 		return;
774 
775 	if (flag == RX_DESC_REPLENISHED) {
776 		qdf_str_lcopy(info->replenish_caller, func_name,
777 			      QDF_MEM_FUNC_NAME_SIZE);
778 		info->replenish_ts = qdf_get_log_timestamp();
779 	} else {
780 		qdf_str_lcopy(info->freelist_caller, func_name,
781 			      QDF_MEM_FUNC_NAME_SIZE);
782 		info->freelist_ts = qdf_get_log_timestamp();
783 	}
784 }
785 #else
786 
787 static inline
788 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
789 				   uint64_t ring_paddr)
790 {
791 	return true;
792 }
793 
794 static inline
795 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
796 {
797 }
798 
799 static inline
800 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
801 {
802 }
803 
804 static inline
805 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
806 				const char *func_name, uint8_t flag)
807 {
808 }
809 #endif /* RX_DESC_DEBUG_CHECK */
810 
811 /**
812  * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
813  *
814  * @head: pointer to the head of local free list
815  * @tail: pointer to the tail of local free list
816  * @new: new descriptor that is added to the free list
817  * @func_name: caller func name
818  *
819  * Return: void:
820  */
821 static inline
822 void __dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
823 				 union dp_rx_desc_list_elem_t **tail,
824 				 struct dp_rx_desc *new, const char *func_name)
825 {
826 	qdf_assert(head && new);
827 
828 	new->nbuf = NULL;
829 	new->in_use = 0;
830 
831 	((union dp_rx_desc_list_elem_t *)new)->next = *head;
832 	*head = (union dp_rx_desc_list_elem_t *)new;
833 	/* reset tail if head->next is NULL */
834 	if (!*tail || !(*head)->next)
835 		*tail = *head;
836 
837 	dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST);
838 }
839 
840 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
841 				   uint8_t mac_id);
842 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
843 		qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id);
844 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
845 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer);
846 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
847 		       uint16_t peer_id, uint8_t tid);
848 
849 #define DP_RX_HEAD_APPEND(head, elem) \
850 	do {                                                            \
851 		qdf_nbuf_set_next((elem), (head));			\
852 		(head) = (elem);                                        \
853 	} while (0)
854 
855 
856 #define DP_RX_LIST_APPEND(head, tail, elem) \
857 	do {                                                          \
858 		if (!(head)) {                                        \
859 			(head) = (elem);                              \
860 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\
861 		} else {                                              \
862 			qdf_nbuf_set_next((tail), (elem));            \
863 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++;  \
864 		}                                                     \
865 		(tail) = (elem);                                      \
866 		qdf_nbuf_set_next((tail), NULL);                      \
867 	} while (0)
868 
869 #define DP_RX_MERGE_TWO_LIST(phead, ptail, chead, ctail) \
870 	do {                                                          \
871 		if (!(phead)) {                                       \
872 			(phead) = (chead);                            \
873 		} else {                                              \
874 			qdf_nbuf_set_next((ptail), (chead));          \
875 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(phead) += \
876 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(chead);   \
877 		}                                                     \
878 		(ptail) = (ctail);                                    \
879 		qdf_nbuf_set_next((ptail), NULL);                     \
880 	} while (0)
881 
882 /*for qcn9000 emulation the pcie is complete phy and no address restrictions*/
883 #if !defined(BUILD_X86) || defined(QCA_WIFI_QCN9000)
884 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
885 		qdf_dma_addr_t *paddr, struct rx_desc_pool *rx_desc_pool)
886 {
887 	return QDF_STATUS_SUCCESS;
888 }
889 #else
890 #define MAX_RETRY 100
891 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
892 		qdf_dma_addr_t *paddr, struct rx_desc_pool *rx_desc_pool)
893 {
894 	uint32_t nbuf_retry = 0;
895 	int32_t ret;
896 	const uint32_t x86_phy_addr = 0x50000000;
897 	/*
898 	 * in M2M emulation platforms (x86) the memory below 0x50000000
899 	 * is reserved for target use, so any memory allocated in this
900 	 * region should not be used by host
901 	 */
902 	do {
903 		if (qdf_likely(*paddr > x86_phy_addr))
904 			return QDF_STATUS_SUCCESS;
905 		else {
906 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
907 					"phy addr %pK exceeded 0x50000000 trying again",
908 					paddr);
909 
910 			nbuf_retry++;
911 			if ((*rx_netbuf)) {
912 				qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
913 						QDF_DMA_FROM_DEVICE);
914 				/* Not freeing buffer intentionally.
915 				 * Observed that same buffer is getting
916 				 * re-allocated resulting in longer load time
917 				 * WMI init timeout.
918 				 * This buffer is anyway not useful so skip it.
919 				 **/
920 			}
921 
922 			*rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
923 						    rx_desc_pool->buf_size,
924 						    RX_BUFFER_RESERVATION,
925 						    rx_desc_pool->buf_alignment,
926 						    FALSE);
927 
928 			if (qdf_unlikely(!(*rx_netbuf)))
929 				return QDF_STATUS_E_FAILURE;
930 
931 			ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf,
932 							QDF_DMA_FROM_DEVICE);
933 
934 			if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
935 				qdf_nbuf_free(*rx_netbuf);
936 				*rx_netbuf = NULL;
937 				continue;
938 			}
939 
940 			*paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
941 		}
942 	} while (nbuf_retry < MAX_RETRY);
943 
944 	if ((*rx_netbuf)) {
945 		qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
946 					QDF_DMA_FROM_DEVICE);
947 		qdf_nbuf_free(*rx_netbuf);
948 	}
949 
950 	return QDF_STATUS_E_FAILURE;
951 }
952 #endif
953 
954 /**
955  * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
956  *				   the MSDU Link Descriptor
957  * @soc: core txrx main context
958  * @buf_info: buf_info includes cookie that is used to lookup
959  * virtual address of link descriptor after deriving the page id
960  * and the offset or index of the desc on the associatde page.
961  *
962  * This is the VA of the link descriptor, that HAL layer later uses to
963  * retrieve the list of MSDU's for a given MPDU.
964  *
965  * Return: void *: Virtual Address of the Rx descriptor
966  */
967 static inline
968 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
969 				  struct hal_buf_info *buf_info)
970 {
971 	void *link_desc_va;
972 	struct qdf_mem_multi_page_t *pages;
973 	uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie);
974 
975 	pages = &soc->link_desc_pages;
976 	if (!pages)
977 		return NULL;
978 	if (qdf_unlikely(page_id >= pages->num_pages))
979 		return NULL;
980 	link_desc_va = pages->dma_pages[page_id].page_v_addr_start +
981 		(buf_info->paddr - pages->dma_pages[page_id].page_p_addr);
982 	return link_desc_va;
983 }
984 
985 /**
986  * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of
987  *				   the MSDU Link Descriptor
988  * @pdev: core txrx pdev context
989  * @buf_info: buf_info includes cookie that used to lookup virtual address of
990  * link descriptor. Normally this is just an index into a per pdev array.
991  *
992  * This is the VA of the link descriptor in monitor mode destination ring,
993  * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU.
994  *
995  * Return: void *: Virtual Address of the Rx descriptor
996  */
997 static inline
998 void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev,
999 				  struct hal_buf_info *buf_info,
1000 				  int mac_id)
1001 {
1002 	void *link_desc_va;
1003 	struct qdf_mem_multi_page_t *pages;
1004 	uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie);
1005 
1006 	pages = &pdev->soc->mon_link_desc_pages[mac_id];
1007 	if (!pages)
1008 		return NULL;
1009 
1010 	if (qdf_unlikely(page_id >= pages->num_pages))
1011 		return NULL;
1012 
1013 	link_desc_va = pages->dma_pages[page_id].page_v_addr_start +
1014 		(buf_info->paddr - pages->dma_pages[page_id].page_p_addr);
1015 
1016 	return link_desc_va;
1017 }
1018 
1019 /**
1020  * dp_rx_defrag_concat() - Concatenate the fragments
1021  *
1022  * @dst: destination pointer to the buffer
1023  * @src: source pointer from where the fragment payload is to be copied
1024  *
1025  * Return: QDF_STATUS
1026  */
1027 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
1028 {
1029 	/*
1030 	 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
1031 	 * to provide space for src, the headroom portion is copied from
1032 	 * the original dst buffer to the larger new dst buffer.
1033 	 * (This is needed, because the headroom of the dst buffer
1034 	 * contains the rx desc.)
1035 	 */
1036 	if (!qdf_nbuf_cat(dst, src)) {
1037 		/*
1038 		 * qdf_nbuf_cat does not free the src memory.
1039 		 * Free src nbuf before returning
1040 		 * For failure case the caller takes of freeing the nbuf
1041 		 */
1042 		qdf_nbuf_free(src);
1043 		return QDF_STATUS_SUCCESS;
1044 	}
1045 
1046 	return QDF_STATUS_E_DEFRAG_ERROR;
1047 }
1048 
1049 #ifndef FEATURE_WDS
1050 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
1051 {
1052 	return QDF_STATUS_SUCCESS;
1053 }
1054 
1055 static inline void
1056 dp_rx_wds_srcport_learn(struct dp_soc *soc,
1057 			uint8_t *rx_tlv_hdr,
1058 			struct dp_peer *ta_peer,
1059 			qdf_nbuf_t nbuf,
1060 			struct hal_rx_msdu_metadata msdu_metadata)
1061 {
1062 }
1063 #endif
1064 
1065 /*
1066  * dp_rx_desc_dump() - dump the sw rx descriptor
1067  *
1068  * @rx_desc: sw rx descriptor
1069  */
1070 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc)
1071 {
1072 	dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d",
1073 		rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id,
1074 		rx_desc->in_use, rx_desc->unmapped);
1075 }
1076 
1077 /*
1078  * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
1079  *					In qwrap mode, packets originated from
1080  *					any vdev should not loopback and
1081  *					should be dropped.
1082  * @vdev: vdev on which rx packet is received
1083  * @nbuf: rx pkt
1084  *
1085  */
1086 #if ATH_SUPPORT_WRAP
1087 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1088 						qdf_nbuf_t nbuf)
1089 {
1090 	struct dp_vdev *psta_vdev;
1091 	struct dp_pdev *pdev = vdev->pdev;
1092 	uint8_t *data = qdf_nbuf_data(nbuf);
1093 
1094 	if (qdf_unlikely(vdev->proxysta_vdev)) {
1095 		/* In qwrap isolation mode, allow loopback packets as all
1096 		 * packets go to RootAP and Loopback on the mpsta.
1097 		 */
1098 		if (vdev->isolation_vdev)
1099 			return false;
1100 		TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
1101 			if (qdf_unlikely(psta_vdev->proxysta_vdev &&
1102 					 !qdf_mem_cmp(psta_vdev->mac_addr.raw,
1103 						      &data[QDF_MAC_ADDR_SIZE],
1104 						      QDF_MAC_ADDR_SIZE))) {
1105 				/* Drop packet if source address is equal to
1106 				 * any of the vdev addresses.
1107 				 */
1108 				return true;
1109 			}
1110 		}
1111 	}
1112 	return false;
1113 }
1114 #else
1115 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1116 						qdf_nbuf_t nbuf)
1117 {
1118 	return false;
1119 }
1120 #endif
1121 
1122 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
1123 	defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\
1124 	defined(WLAN_SUPPORT_RX_FLOW_TAG)
1125 #include "dp_rx_tag.h"
1126 #endif
1127 
1128 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
1129 /**
1130  * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV
1131  *                              and set the corresponding tag in QDF packet
1132  * @soc: core txrx main context
1133  * @vdev: vdev on which the packet is received
1134  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1135  * @rx_tlv_hdr: rBbase address where the RX TLVs starts
1136  * @ring_index: REO ring number, not used for error & monitor ring
1137  * @is_reo_exception: flag to indicate if rx from REO ring or exception ring
1138  * @is_update_stats: flag to indicate whether to update stats or not
1139  * Return: void
1140  */
1141 static inline void
1142 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1143 			  qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
1144 			  uint16_t ring_index,
1145 			  bool is_reo_exception, bool is_update_stats)
1146 {
1147 }
1148 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
1149 
1150 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
1151 /**
1152  * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV
1153  *                           and set the corresponding tag in QDF packet
1154  * @soc: core txrx main context
1155  * @vdev: vdev on which the packet is received
1156  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1157  * @rx_tlv_hdr: base address where the RX TLVs starts
1158  * @is_update_stats: flag to indicate whether to update stats or not
1159  *
1160  * Return: void
1161  */
1162 static inline void
1163 dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1164 		      qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats)
1165 {
1166 }
1167 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
1168 
1169 #if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\
1170 	!defined(WLAN_SUPPORT_RX_FLOW_TAG)
1171 /**
1172  * dp_rx_mon_update_protocol_flow_tag() - Performs necessary checks for monitor
1173  *                                       mode and then tags appropriate packets
1174  * @soc: core txrx main context
1175  * @vdev: pdev on which packet is received
1176  * @msdu: QDF packet buffer on which the protocol tag should be set
1177  * @rx_desc: base address where the RX TLVs start
1178  * Return: void
1179  */
1180 static inline
1181 void dp_rx_mon_update_protocol_flow_tag(struct dp_soc *soc,
1182 					struct dp_pdev *dp_pdev,
1183 					qdf_nbuf_t msdu, void *rx_desc)
1184 {
1185 }
1186 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG || WLAN_SUPPORT_RX_FLOW_TAG */
1187 
1188 /*
1189  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
1190  *			       called during dp rx initialization
1191  *			       and at the end of dp_rx_process.
1192  *
1193  * @soc: core txrx main context
1194  * @mac_id: mac_id which is one of 3 mac_ids
1195  * @dp_rxdma_srng: dp rxdma circular ring
1196  * @rx_desc_pool: Pointer to free Rx descriptor pool
1197  * @num_req_buffers: number of buffer to be replenished
1198  * @desc_list: list of descs if called from dp_rx_process
1199  *	       or NULL during dp rx initialization or out of buffer
1200  *	       interrupt.
1201  * @tail: tail of descs list
1202  * @func_name: name of the caller function
1203  * Return: return success or failure
1204  */
1205 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1206 				 struct dp_srng *dp_rxdma_srng,
1207 				 struct rx_desc_pool *rx_desc_pool,
1208 				 uint32_t num_req_buffers,
1209 				 union dp_rx_desc_list_elem_t **desc_list,
1210 				 union dp_rx_desc_list_elem_t **tail,
1211 				 const char *func_name);
1212 
1213 /*
1214  * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs
1215  *                               called during dp rx initialization
1216  *
1217  * @soc: core txrx main context
1218  * @mac_id: mac_id which is one of 3 mac_ids
1219  * @dp_rxdma_srng: dp rxdma circular ring
1220  * @rx_desc_pool: Pointer to free Rx descriptor pool
1221  * @num_req_buffers: number of buffer to be replenished
1222  *
1223  * Return: return success or failure
1224  */
1225 QDF_STATUS
1226 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
1227 			  struct dp_srng *dp_rxdma_srng,
1228 			  struct rx_desc_pool *rx_desc_pool,
1229 			  uint32_t num_req_buffers);
1230 
1231 /**
1232  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
1233  *			      (WBM), following error handling
1234  *
1235  * @soc: core DP main context
1236  * @buf_addr_info: opaque pointer to the REO error ring descriptor
1237  * @buf_addr_info: void pointer to the buffer_addr_info
1238  * @bm_action: put to idle_list or release to msdu_list
1239  *
1240  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
1241  */
1242 QDF_STATUS
1243 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
1244 		       uint8_t bm_action);
1245 
1246 /**
1247  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
1248  *					(WBM) by address
1249  *
1250  * @soc: core DP main context
1251  * @link_desc_addr: link descriptor addr
1252  *
1253  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
1254  */
1255 QDF_STATUS
1256 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
1257 			       hal_buff_addrinfo_t link_desc_addr,
1258 			       uint8_t bm_action);
1259 
1260 /**
1261  * dp_rxdma_err_process() - RxDMA error processing functionality
1262  * @soc: core txrx main contex
1263  * @mac_id: mac id which is one of 3 mac_ids
1264  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1265  * @quota: No. of units (packets) that can be serviced in one shot.
1266  *
1267  * Return: num of buffers processed
1268  */
1269 uint32_t
1270 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1271 		     uint32_t mac_id, uint32_t quota);
1272 
1273 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1274 				uint8_t *rx_tlv_hdr, struct dp_peer *peer);
1275 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1276 					uint8_t *rx_tlv_hdr);
1277 
1278 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
1279 			   struct dp_peer *peer);
1280 
1281 /*
1282  * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
1283  *
1284  * @soc: core txrx main context
1285  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1286  * @ring_desc: opaque pointer to the RX ring descriptor
1287  * @rx_desc: host rs descriptor
1288  *
1289  * Return: void
1290  */
1291 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
1292 				hal_ring_handle_t hal_ring_hdl,
1293 				hal_ring_desc_t ring_desc,
1294 				struct dp_rx_desc *rx_desc);
1295 
1296 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
1297 
1298 #ifdef QCA_PEER_EXT_STATS
1299 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
1300 			     qdf_nbuf_t nbuf);
1301 #endif /* QCA_PEER_EXT_STATS */
1302 
1303 #ifdef RX_DESC_DEBUG_CHECK
1304 /**
1305  * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc
1306  * @rx_desc: rx descriptor pointer
1307  *
1308  * Return: true, if magic is correct, else false.
1309  */
1310 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1311 {
1312 	if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC))
1313 		return false;
1314 
1315 	rx_desc->magic = 0;
1316 	return true;
1317 }
1318 
1319 /**
1320  * dp_rx_desc_prep() - prepare rx desc
1321  * @rx_desc: rx descriptor pointer to be prepared
1322  * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
1323  *
1324  * Note: assumption is that we are associating a nbuf which is mapped
1325  *
1326  * Return: none
1327  */
1328 static inline
1329 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
1330 		     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1331 {
1332 	rx_desc->magic = DP_RX_DESC_MAGIC;
1333 	rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
1334 	rx_desc->unmapped = 0;
1335 }
1336 
1337 /**
1338  * dp_rx_desc_frag_prep() - prepare rx desc
1339  * @rx_desc: rx descriptor pointer to be prepared
1340  * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
1341  *
1342  * Note: assumption is that we frag address is mapped
1343  *
1344  * Return: none
1345  */
1346 #ifdef DP_RX_MON_MEM_FRAG
1347 static inline
1348 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1349 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1350 {
1351 	rx_desc->magic = DP_RX_DESC_MAGIC;
1352 	rx_desc->rx_buf_start =
1353 		(uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
1354 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
1355 	rx_desc->unmapped = 0;
1356 }
1357 #else
1358 static inline
1359 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1360 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1361 {
1362 }
1363 #endif /* DP_RX_MON_MEM_FRAG */
1364 #else
1365 
1366 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1367 {
1368 	return true;
1369 }
1370 
1371 static inline
1372 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
1373 		     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1374 {
1375 	rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
1376 	rx_desc->unmapped = 0;
1377 }
1378 
1379 #ifdef DP_RX_MON_MEM_FRAG
1380 static inline
1381 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1382 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1383 {
1384 	rx_desc->rx_buf_start =
1385 		(uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
1386 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
1387 	rx_desc->unmapped = 0;
1388 }
1389 #else
1390 static inline
1391 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1392 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1393 {
1394 }
1395 #endif /* DP_RX_MON_MEM_FRAG */
1396 
1397 #endif /* RX_DESC_DEBUG_CHECK */
1398 
1399 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
1400 				bool is_mon_dest_desc);
1401 
1402 void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1403 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer,
1404 			     uint8_t err_code, uint8_t mac_id);
1405 
1406 #ifndef QCA_MULTIPASS_SUPPORT
1407 static inline
1408 bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, uint8_t tid)
1409 {
1410 	return false;
1411 }
1412 #else
1413 bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf,
1414 			     uint8_t tid);
1415 #endif
1416 
1417 #ifndef WLAN_RX_PKT_CAPTURE_ENH
1418 static inline
1419 QDF_STATUS dp_peer_set_rx_capture_enabled(struct dp_pdev *pdev,
1420 					  struct dp_peer *peer_handle,
1421 					  bool value, uint8_t *mac_addr)
1422 {
1423 	return QDF_STATUS_SUCCESS;
1424 }
1425 #endif
1426 
1427 /**
1428  * dp_rx_deliver_to_stack() - deliver pkts to network stack
1429  * Caller to hold peer refcount and check for valid peer
1430  * @soc: soc
1431  * @vdev: vdev
1432  * @peer: peer
1433  * @nbuf_head: skb list head
1434  * @nbuf_tail: skb list tail
1435  *
1436  * Return: None
1437  */
1438 void dp_rx_deliver_to_stack(struct dp_soc *soc,
1439 			    struct dp_vdev *vdev,
1440 			    struct dp_peer *peer,
1441 			    qdf_nbuf_t nbuf_head,
1442 			    qdf_nbuf_t nbuf_tail);
1443 
1444 #ifdef QCA_OL_RX_LOCK_LESS_ACCESS
1445 /*
1446  * dp_rx_ring_access_start()- Wrapper function to log access start of a hal ring
1447  * @int_ctx: pointer to DP interrupt context
1448  * @dp_soc - DP soc structure pointer
1449  * @hal_ring_hdl - HAL ring handle
1450  *
1451  * Return: 0 on success; error on failure
1452  */
1453 static inline int
1454 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
1455 			hal_ring_handle_t hal_ring_hdl)
1456 {
1457 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
1458 }
1459 
1460 /*
1461  * dp_rx_ring_access_end()- Wrapper function to log access end of a hal ring
1462  * @int_ctx: pointer to DP interrupt context
1463  * @dp_soc - DP soc structure pointer
1464  * @hal_ring_hdl - HAL ring handle
1465  *
1466  * Return - None
1467  */
1468 static inline void
1469 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
1470 		      hal_ring_handle_t hal_ring_hdl)
1471 {
1472 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
1473 }
1474 #else
1475 static inline int
1476 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
1477 			hal_ring_handle_t hal_ring_hdl)
1478 {
1479 	return dp_srng_access_start(int_ctx, soc, hal_ring_hdl);
1480 }
1481 
1482 static inline void
1483 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
1484 		      hal_ring_handle_t hal_ring_hdl)
1485 {
1486 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
1487 }
1488 #endif
1489 
1490 /*
1491  * dp_rx_wbm_sg_list_reset() - Initialize sg list
1492  *
1493  * This api should be called at soc init and afterevery sg processing.
1494  *@soc: DP SOC handle
1495  */
1496 static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc)
1497 {
1498 	if (soc) {
1499 		soc->wbm_sg_param.wbm_is_first_msdu_in_sg = false;
1500 		soc->wbm_sg_param.wbm_sg_nbuf_head = NULL;
1501 		soc->wbm_sg_param.wbm_sg_nbuf_tail = NULL;
1502 		soc->wbm_sg_param.wbm_sg_desc_msdu_len = 0;
1503 	}
1504 }
1505 
1506 /*
1507  * dp_rx_wbm_sg_list_deinit() - De-initialize sg list
1508  *
1509  * This api should be called in down path, to avoid any leak.
1510  *@soc: DP SOC handle
1511  */
1512 static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc)
1513 {
1514 	if (soc) {
1515 		if (soc->wbm_sg_param.wbm_sg_nbuf_head)
1516 			qdf_nbuf_list_free(soc->wbm_sg_param.wbm_sg_nbuf_head);
1517 
1518 		dp_rx_wbm_sg_list_reset(soc);
1519 	}
1520 }
1521 
1522 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
1523 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
1524 	do {								   \
1525 		if (!soc->rx_buff_pool[rx_desc->pool_id].is_initialized) { \
1526 			DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf);	   \
1527 			break;						   \
1528 		}							   \
1529 		DP_RX_LIST_APPEND(ebuf_head, ebuf_tail, rx_desc->nbuf);	   \
1530 		if (!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)) {	   \
1531 			if (!dp_rx_buffer_pool_refill(soc, ebuf_head,	   \
1532 						      rx_desc->pool_id))   \
1533 				DP_RX_MERGE_TWO_LIST(head, tail,	   \
1534 						     ebuf_head, ebuf_tail);\
1535 			ebuf_head = NULL;				   \
1536 			ebuf_tail = NULL;				   \
1537 		}							   \
1538 	} while (0)
1539 #else
1540 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
1541 	DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf)
1542 #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
1543 
1544 /*
1545  * dp_rx_link_desc_refill_duplicate_check() - check if link desc duplicate
1546 					      to refill
1547  * @soc: DP SOC handle
1548  * @buf_info: the last link desc buf info
1549  * @ring_buf_info: current buf address pointor including link desc
1550  *
1551  * return: none.
1552  */
1553 void dp_rx_link_desc_refill_duplicate_check(
1554 				struct dp_soc *soc,
1555 				struct hal_buf_info *buf_info,
1556 				hal_buff_addrinfo_t ring_buf_info);
1557 #endif /* _DP_RX_H */
1558