xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_RX_H
20 #define _DP_RX_H
21 
22 #include "hal_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 
27 #ifdef RXDMA_OPTIMIZATION
28 #ifndef RX_DATA_BUFFER_ALIGNMENT
29 #define RX_DATA_BUFFER_ALIGNMENT        128
30 #endif
31 #ifndef RX_MONITOR_BUFFER_ALIGNMENT
32 #define RX_MONITOR_BUFFER_ALIGNMENT     128
33 #endif
34 #else /* RXDMA_OPTIMIZATION */
35 #define RX_DATA_BUFFER_ALIGNMENT        4
36 #define RX_MONITOR_BUFFER_ALIGNMENT     4
37 #endif /* RXDMA_OPTIMIZATION */
38 
39 #ifdef QCA_HOST2FW_RXBUF_RING
40 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW1_BM
41 /* RBM value used for re-injecting defragmented packets into REO */
42 #define DP_DEFRAG_RBM HAL_RX_BUF_RBM_SW3_BM
43 #else
44 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW3_BM
45 #define DP_DEFRAG_RBM DP_WBM2SW_RBM
46 #endif /* QCA_HOST2FW_RXBUF_RING */
47 
48 #define RX_BUFFER_RESERVATION   0
49 
50 #define DP_PEER_METADATA_PEER_ID_MASK	0x0000ffff
51 #define DP_PEER_METADATA_PEER_ID_SHIFT	0
52 #define DP_PEER_METADATA_VDEV_ID_MASK	0x003f0000
53 #define DP_PEER_METADATA_VDEV_ID_SHIFT	16
54 
55 #define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata)		\
56 	(((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK)	\
57 			>> DP_PEER_METADATA_PEER_ID_SHIFT)
58 
59 #define DP_PEER_METADATA_VDEV_ID_GET(_peer_metadata)		\
60 	(((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK)	\
61 			>> DP_PEER_METADATA_VDEV_ID_SHIFT)
62 
63 #define DP_RX_DESC_MAGIC 0xdec0de
64 
65 /**
66  * enum dp_rx_desc_state
67  *
68  * @RX_DESC_REPLENISH: rx desc replenished
69  * @RX_DESC_FREELIST: rx desc in freelist
70  */
71 enum dp_rx_desc_state {
72 	RX_DESC_REPLENISHED,
73 	RX_DESC_IN_FREELIST,
74 };
75 
76 /**
77  * struct dp_rx_desc_dbg_info
78  *
79  * @freelist_caller: name of the function that put the
80  *  the rx desc in freelist
81  * @freelist_ts: timestamp when the rx desc is put in
82  *  a freelist
83  * @replenish_caller: name of the function that last
84  *  replenished the rx desc
85  * @replenish_ts: last replenish timestamp
86  */
87 struct dp_rx_desc_dbg_info {
88 	char freelist_caller[QDF_MEM_FUNC_NAME_SIZE];
89 	uint64_t freelist_ts;
90 	char replenish_caller[QDF_MEM_FUNC_NAME_SIZE];
91 	uint64_t replenish_ts;
92 };
93 
94 /**
95  * struct dp_rx_desc
96  *
97  * @nbuf		: VA of the "skb" posted
98  * @rx_buf_start	: VA of the original Rx buffer, before
99  *			  movement of any skb->data pointer
100  * @paddr_buf_start     : PA of the original Rx buffer, before
101  *                        movement of any frag pointer
102  * @cookie		: index into the sw array which holds
103  *			  the sw Rx descriptors
104  *			  Cookie space is 21 bits:
105  *			  lower 18 bits -- index
106  *			  upper  3 bits -- pool_id
107  * @pool_id		: pool Id for which this allocated.
108  *			  Can only be used if there is no flow
109  *			  steering
110  * @in_use		  rx_desc is in use
111  * @unmapped		  used to mark rx_desc an unmapped if the corresponding
112  *			  nbuf is already unmapped
113  * @in_err_state	: Nbuf sanity failed for this descriptor.
114  */
115 struct dp_rx_desc {
116 	qdf_nbuf_t nbuf;
117 	uint8_t *rx_buf_start;
118 	qdf_dma_addr_t paddr_buf_start;
119 	uint32_t cookie;
120 	uint8_t	 pool_id;
121 #ifdef RX_DESC_DEBUG_CHECK
122 	uint32_t magic;
123 	struct dp_rx_desc_dbg_info *dbg_info;
124 #endif
125 	uint8_t	in_use:1,
126 	unmapped:1,
127 	in_err_state:1;
128 };
129 
130 /* RX Descriptor Multi Page memory alloc related */
131 #define DP_RX_DESC_OFFSET_NUM_BITS 8
132 #define DP_RX_DESC_PAGE_ID_NUM_BITS 8
133 #define DP_RX_DESC_POOL_ID_NUM_BITS 4
134 
135 #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS
136 #define DP_RX_DESC_POOL_ID_SHIFT \
137 		(DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS)
138 #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \
139 	(((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT)
140 #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK	\
141 			(((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \
142 			 DP_RX_DESC_PAGE_ID_SHIFT)
143 #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \
144 			((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1)
145 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie)		\
146 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >>	\
147 			DP_RX_DESC_POOL_ID_SHIFT)
148 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie)		\
149 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >>	\
150 			DP_RX_DESC_PAGE_ID_SHIFT)
151 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie)		\
152 	((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK)
153 
154 #define RX_DESC_COOKIE_INDEX_SHIFT		0
155 #define RX_DESC_COOKIE_INDEX_MASK		0x3ffff /* 18 bits */
156 #define RX_DESC_COOKIE_POOL_ID_SHIFT		18
157 #define RX_DESC_COOKIE_POOL_ID_MASK		0x1c0000
158 
159 #define DP_RX_DESC_COOKIE_MAX	\
160 	(RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK)
161 
162 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie)		\
163 	(((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >>	\
164 			RX_DESC_COOKIE_POOL_ID_SHIFT)
165 
166 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie)		\
167 	(((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >>	\
168 			RX_DESC_COOKIE_INDEX_SHIFT)
169 
170 #define dp_rx_add_to_free_desc_list(head, tail, new) \
171 	__dp_rx_add_to_free_desc_list(head, tail, new, __func__)
172 
173 #define dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
174 				num_buffers, desc_list, tail) \
175 	__dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
176 				  num_buffers, desc_list, tail, __func__)
177 
178 #ifdef DP_RX_SPECIAL_FRAME_NEED
179 /**
180  * dp_rx_is_special_frame() - check is RX frame special needed
181  *
182  * @nbuf: RX skb pointer
183  * @frame_mask: the mask for speical frame needed
184  *
185  * Check is RX frame wanted matched with mask
186  *
187  * Return: true - special frame needed, false - no
188  */
189 static inline
190 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
191 {
192 	if (((frame_mask & FRAME_MASK_IPV4_ARP) &&
193 	     qdf_nbuf_is_ipv4_arp_pkt(nbuf)) ||
194 	    ((frame_mask & FRAME_MASK_IPV4_DHCP) &&
195 	     qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) ||
196 	    ((frame_mask & FRAME_MASK_IPV4_EAPOL) &&
197 	     qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) ||
198 	    ((frame_mask & FRAME_MASK_IPV6_DHCP) &&
199 	     qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))
200 		return true;
201 
202 	return false;
203 }
204 
205 /**
206  * dp_rx_deliver_special_frame() - Deliver the RX special frame to stack
207  *				   if matches mask
208  *
209  * @soc: Datapath soc handler
210  * @peer: pointer to DP peer
211  * @nbuf: pointer to the skb of RX frame
212  * @frame_mask: the mask for speical frame needed
213  * @rx_tlv_hdr: start of rx tlv header
214  *
215  * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
216  * single nbuf is expected.
217  *
218  * return: true - nbuf has been delivered to stack, false - not.
219  */
220 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
221 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
222 				 uint8_t *rx_tlv_hdr);
223 #else
224 static inline
225 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
226 {
227 	return false;
228 }
229 
230 static inline
231 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
232 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
233 				 uint8_t *rx_tlv_hdr)
234 {
235 	return false;
236 }
237 #endif
238 
239 /* DOC: Offset to obtain LLC hdr
240  *
241  * In the case of Wifi parse error
242  * to reach LLC header from beginning
243  * of VLAN tag we need to skip 8 bytes.
244  * Vlan_tag(4)+length(2)+length added
245  * by HW(2) = 8 bytes.
246  */
247 #define DP_SKIP_VLAN		8
248 
249 /**
250  * struct dp_rx_cached_buf - rx cached buffer
251  * @list: linked list node
252  * @buf: skb buffer
253  */
254 struct dp_rx_cached_buf {
255 	qdf_list_node_t node;
256 	qdf_nbuf_t buf;
257 };
258 
259 /*
260  *dp_rx_xor_block() - xor block of data
261  *@b: destination data block
262  *@a: source data block
263  *@len: length of the data to process
264  *
265  *Returns: None
266  */
267 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
268 {
269 	qdf_size_t i;
270 
271 	for (i = 0; i < len; i++)
272 		b[i] ^= a[i];
273 }
274 
275 /*
276  *dp_rx_rotl() - rotate the bits left
277  *@val: unsigned integer input value
278  *@bits: number of bits
279  *
280  *Returns: Integer with left rotated by number of 'bits'
281  */
282 static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
283 {
284 	return (val << bits) | (val >> (32 - bits));
285 }
286 
287 /*
288  *dp_rx_rotr() - rotate the bits right
289  *@val: unsigned integer input value
290  *@bits: number of bits
291  *
292  *Returns: Integer with right rotated by number of 'bits'
293  */
294 static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
295 {
296 	return (val >> bits) | (val << (32 - bits));
297 }
298 
299 /*
300  * dp_set_rx_queue() - set queue_mapping in skb
301  * @nbuf: skb
302  * @queue_id: rx queue_id
303  *
304  * Return: void
305  */
306 #ifdef QCA_OL_RX_MULTIQ_SUPPORT
307 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
308 {
309 	qdf_nbuf_record_rx_queue(nbuf, queue_id);
310 	return;
311 }
312 #else
313 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
314 {
315 }
316 #endif
317 
318 /*
319  *dp_rx_xswap() - swap the bits left
320  *@val: unsigned integer input value
321  *
322  *Returns: Integer with bits swapped
323  */
324 static inline uint32_t dp_rx_xswap(uint32_t val)
325 {
326 	return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
327 }
328 
329 /*
330  *dp_rx_get_le32_split() - get little endian 32 bits split
331  *@b0: byte 0
332  *@b1: byte 1
333  *@b2: byte 2
334  *@b3: byte 3
335  *
336  *Returns: Integer with split little endian 32 bits
337  */
338 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
339 					uint8_t b3)
340 {
341 	return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
342 }
343 
344 /*
345  *dp_rx_get_le32() - get little endian 32 bits
346  *@b0: byte 0
347  *@b1: byte 1
348  *@b2: byte 2
349  *@b3: byte 3
350  *
351  *Returns: Integer with little endian 32 bits
352  */
353 static inline uint32_t dp_rx_get_le32(const uint8_t *p)
354 {
355 	return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
356 }
357 
358 /*
359  * dp_rx_put_le32() - put little endian 32 bits
360  * @p: destination char array
361  * @v: source 32-bit integer
362  *
363  * Returns: None
364  */
365 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
366 {
367 	p[0] = (v) & 0xff;
368 	p[1] = (v >> 8) & 0xff;
369 	p[2] = (v >> 16) & 0xff;
370 	p[3] = (v >> 24) & 0xff;
371 }
372 
373 /* Extract michal mic block of data */
374 #define dp_rx_michael_block(l, r)	\
375 	do {					\
376 		r ^= dp_rx_rotl(l, 17);	\
377 		l += r;				\
378 		r ^= dp_rx_xswap(l);		\
379 		l += r;				\
380 		r ^= dp_rx_rotl(l, 3);	\
381 		l += r;				\
382 		r ^= dp_rx_rotr(l, 2);	\
383 		l += r;				\
384 	} while (0)
385 
386 /**
387  * struct dp_rx_desc_list_elem_t
388  *
389  * @next		: Next pointer to form free list
390  * @rx_desc		: DP Rx descriptor
391  */
392 union dp_rx_desc_list_elem_t {
393 	union dp_rx_desc_list_elem_t *next;
394 	struct dp_rx_desc rx_desc;
395 };
396 
397 #ifdef RX_DESC_MULTI_PAGE_ALLOC
398 /**
399  * dp_rx_desc_find() - find dp rx descriptor from page ID and offset
400  * @page_id: Page ID
401  * @offset: Offset of the descriptor element
402  *
403  * Return: RX descriptor element
404  */
405 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
406 					      struct rx_desc_pool *rx_pool);
407 
408 static inline
409 struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc,
410 					      struct rx_desc_pool *pool,
411 					      uint32_t cookie)
412 {
413 	uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
414 	uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
415 	uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
416 	struct rx_desc_pool *rx_desc_pool;
417 	union dp_rx_desc_list_elem_t *rx_desc_elem;
418 
419 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
420 		return NULL;
421 
422 	rx_desc_pool = &pool[pool_id];
423 	rx_desc_elem = (union dp_rx_desc_list_elem_t *)
424 		(rx_desc_pool->desc_pages.cacheable_pages[page_id] +
425 		rx_desc_pool->elem_size * offset);
426 
427 	return &rx_desc_elem->rx_desc;
428 }
429 
430 /**
431  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
432  *			 the Rx descriptor on Rx DMA source ring buffer
433  * @soc: core txrx main context
434  * @cookie: cookie used to lookup virtual address
435  *
436  * Return: Pointer to the Rx descriptor
437  */
438 static inline
439 struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc,
440 					       uint32_t cookie)
441 {
442 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie);
443 }
444 
445 /**
446  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
447  *			 the Rx descriptor on monitor ring buffer
448  * @soc: core txrx main context
449  * @cookie: cookie used to lookup virtual address
450  *
451  * Return: Pointer to the Rx descriptor
452  */
453 static inline
454 struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc,
455 					     uint32_t cookie)
456 {
457 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie);
458 }
459 
460 /**
461  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
462  *			 the Rx descriptor on monitor status ring buffer
463  * @soc: core txrx main context
464  * @cookie: cookie used to lookup virtual address
465  *
466  * Return: Pointer to the Rx descriptor
467  */
468 static inline
469 struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc,
470 						uint32_t cookie)
471 {
472 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_status[0], cookie);
473 }
474 #else
475 
476 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
477 			  uint32_t pool_size,
478 			  struct rx_desc_pool *rx_desc_pool);
479 
480 /**
481  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
482  *			 the Rx descriptor on Rx DMA source ring buffer
483  * @soc: core txrx main context
484  * @cookie: cookie used to lookup virtual address
485  *
486  * Return: void *: Virtual Address of the Rx descriptor
487  */
488 static inline
489 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
490 {
491 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
492 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
493 	struct rx_desc_pool *rx_desc_pool;
494 
495 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
496 		return NULL;
497 
498 	rx_desc_pool = &soc->rx_desc_buf[pool_id];
499 
500 	if (qdf_unlikely(index >= rx_desc_pool->pool_size))
501 		return NULL;
502 
503 	return &(soc->rx_desc_buf[pool_id].array[index].rx_desc);
504 }
505 
506 /**
507  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
508  *			 the Rx descriptor on monitor ring buffer
509  * @soc: core txrx main context
510  * @cookie: cookie used to lookup virtual address
511  *
512  * Return: void *: Virtual Address of the Rx descriptor
513  */
514 static inline
515 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
516 {
517 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
518 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
519 	/* TODO */
520 	/* Add sanity for pool_id & index */
521 	return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
522 }
523 
524 /**
525  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
526  *			 the Rx descriptor on monitor status ring buffer
527  * @soc: core txrx main context
528  * @cookie: cookie used to lookup virtual address
529  *
530  * Return: void *: Virtual Address of the Rx descriptor
531  */
532 static inline
533 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
534 {
535 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
536 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
537 	/* TODO */
538 	/* Add sanity for pool_id & index */
539 	return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
540 }
541 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
542 
543 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
544 static inline QDF_STATUS
545 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
546 {
547 	if (qdf_unlikely(HAL_RX_REO_BUF_COOKIE_INVALID_GET(ring_desc)))
548 		return QDF_STATUS_E_FAILURE;
549 
550 	HAL_RX_REO_BUF_COOKIE_INVALID_SET(ring_desc);
551 	return QDF_STATUS_SUCCESS;
552 }
553 #else
554 static inline QDF_STATUS
555 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
556 {
557 	return QDF_STATUS_SUCCESS;
558 }
559 #endif
560 
561 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool);
562 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
563 				 uint32_t pool_size,
564 				 struct rx_desc_pool *rx_desc_pool);
565 
566 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
567 			  uint32_t pool_size,
568 			  struct rx_desc_pool *rx_desc_pool);
569 void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id);
570 
571 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
572 				union dp_rx_desc_list_elem_t **local_desc_list,
573 				union dp_rx_desc_list_elem_t **tail,
574 				uint16_t pool_id,
575 				struct rx_desc_pool *rx_desc_pool);
576 
577 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
578 				struct rx_desc_pool *rx_desc_pool,
579 				uint16_t num_descs,
580 				union dp_rx_desc_list_elem_t **desc_list,
581 				union dp_rx_desc_list_elem_t **tail);
582 
583 
584 QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev);
585 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev);
586 
587 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev);
588 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev);
589 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
590 			    struct rx_desc_pool *rx_desc_pool);
591 
592 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
593 QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev);
594 void dp_rx_pdev_buffers_free(struct dp_pdev *pdev);
595 
596 void dp_rx_pdev_detach(struct dp_pdev *pdev);
597 
598 void dp_print_napi_stats(struct dp_soc *soc);
599 
600 /**
601  * dp_rx_vdev_detach() - detach vdev from dp rx
602  * @vdev: virtual device instance
603  *
604  * Return: QDF_STATUS_SUCCESS: success
605  *         QDF_STATUS_E_RESOURCES: Error return
606  */
607 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev);
608 
609 uint32_t
610 dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
611 	      uint8_t reo_ring_num,
612 	      uint32_t quota);
613 
614 /**
615  * dp_rx_err_process() - Processes error frames routed to REO error ring
616  * @int_ctx: pointer to DP interrupt context
617  * @soc: core txrx main context
618  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
619  * @quota: No. of units (packets) that can be serviced in one shot.
620  *
621  * This function implements error processing and top level demultiplexer
622  * for all the frames routed to REO error ring.
623  *
624  * Return: uint32_t: No. of elements processed
625  */
626 uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
627 			   hal_ring_handle_t hal_ring_hdl, uint32_t quota);
628 
629 /**
630  * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
631  * @int_ctx: pointer to DP interrupt context
632  * @soc: core txrx main context
633  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
634  * @quota: No. of units (packets) that can be serviced in one shot.
635  *
636  * This function implements error processing and top level demultiplexer
637  * for all the frames routed to WBM2HOST sw release ring.
638  *
639  * Return: uint32_t: No. of elements processed
640  */
641 uint32_t
642 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
643 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota);
644 
645 /**
646  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
647  *		     multiple nbufs.
648  * @soc: core txrx main context
649  * @nbuf: pointer to the first msdu of an amsdu.
650  *
651  * This function implements the creation of RX frag_list for cases
652  * where an MSDU is spread across multiple nbufs.
653  *
654  * Return: returns the head nbuf which contains complete frag_list.
655  */
656 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf);
657 
658 
659 /*
660  * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during
661  *				     de-initialization of wifi module.
662  *
663  * @soc: core txrx main context
664  * @pool_id: pool_id which is one of 3 mac_ids
665  * @rx_desc_pool: rx descriptor pool pointer
666  *
667  * Return: None
668  */
669 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
670 				   struct rx_desc_pool *rx_desc_pool);
671 
672 /*
673  * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during
674  *			    de-initialization of wifi module.
675  *
676  * @soc: core txrx main context
677  * @pool_id: pool_id which is one of 3 mac_ids
678  * @rx_desc_pool: rx descriptor pool pointer
679  *
680  * Return: None
681  */
682 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
683 			  struct rx_desc_pool *rx_desc_pool);
684 
685 #ifdef DP_RX_MON_MEM_FRAG
686 /*
687  * dp_rx_desc_frag_free() - free the sw rx desc frag called during
688  *			    de-initialization of wifi module.
689  *
690  * @soc: core txrx main context
691  * @rx_desc_pool: rx descriptor pool pointer
692  *
693  * Return: None
694  */
695 void dp_rx_desc_frag_free(struct dp_soc *soc,
696 			  struct rx_desc_pool *rx_desc_pool);
697 #else
698 static inline
699 void dp_rx_desc_frag_free(struct dp_soc *soc,
700 			  struct rx_desc_pool *rx_desc_pool)
701 {
702 }
703 #endif
704 /*
705  * dp_rx_desc_pool_free() - free the sw rx desc array called during
706  *			    de-initialization of wifi module.
707  *
708  * @soc: core txrx main context
709  * @rx_desc_pool: rx descriptor pool pointer
710  *
711  * Return: None
712  */
713 void dp_rx_desc_pool_free(struct dp_soc *soc,
714 			  struct rx_desc_pool *rx_desc_pool);
715 
716 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
717 				struct dp_peer *peer);
718 
719 #ifdef RX_DESC_DEBUG_CHECK
720 /**
721  * dp_rx_desc_paddr_sanity_check() - paddr sanity for ring desc vs rx_desc
722  * @rx_desc: rx descriptor
723  * @ring_paddr: paddr obatined from the ring
724  *
725  * Returns: QDF_STATUS
726  */
727 static inline
728 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
729 				   uint64_t ring_paddr)
730 {
731 	return (ring_paddr == qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0));
732 }
733 
734 /*
735  * dp_rx_desc_alloc_dbg_info() - Alloc memory for rx descriptor debug
736  *  structure
737  * @rx_desc: rx descriptor pointer
738  *
739  * Return: None
740  */
741 static inline
742 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
743 {
744 	rx_desc->dbg_info = qdf_mem_malloc(sizeof(struct dp_rx_desc_dbg_info));
745 }
746 
747 /*
748  * dp_rx_desc_free_dbg_info() - Free rx descriptor debug
749  *  structure memory
750  * @rx_desc: rx descriptor pointer
751  *
752  * Return: None
753  */
754 static inline
755 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
756 {
757 	qdf_mem_free(rx_desc->dbg_info);
758 }
759 
760 /*
761  * dp_rx_desc_update_dbg_info() - Update rx descriptor debug info
762  *  structure memory
763  * @rx_desc: rx descriptor pointer
764  *
765  * Return: None
766  */
767 static
768 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
769 				const char *func_name, uint8_t flag)
770 {
771 	struct dp_rx_desc_dbg_info *info = rx_desc->dbg_info;
772 
773 	if (!info)
774 		return;
775 
776 	if (flag == RX_DESC_REPLENISHED) {
777 		qdf_str_lcopy(info->replenish_caller, func_name,
778 			      QDF_MEM_FUNC_NAME_SIZE);
779 		info->replenish_ts = qdf_get_log_timestamp();
780 	} else {
781 		qdf_str_lcopy(info->freelist_caller, func_name,
782 			      QDF_MEM_FUNC_NAME_SIZE);
783 		info->freelist_ts = qdf_get_log_timestamp();
784 	}
785 }
786 #else
787 
788 static inline
789 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
790 				   uint64_t ring_paddr)
791 {
792 	return true;
793 }
794 
795 static inline
796 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
797 {
798 }
799 
800 static inline
801 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
802 {
803 }
804 
805 static inline
806 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
807 				const char *func_name, uint8_t flag)
808 {
809 }
810 #endif /* RX_DESC_DEBUG_CHECK */
811 
812 /**
813  * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
814  *
815  * @head: pointer to the head of local free list
816  * @tail: pointer to the tail of local free list
817  * @new: new descriptor that is added to the free list
818  * @func_name: caller func name
819  *
820  * Return: void:
821  */
822 static inline
823 void __dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
824 				 union dp_rx_desc_list_elem_t **tail,
825 				 struct dp_rx_desc *new, const char *func_name)
826 {
827 	qdf_assert(head && new);
828 
829 	new->nbuf = NULL;
830 	new->in_use = 0;
831 
832 	((union dp_rx_desc_list_elem_t *)new)->next = *head;
833 	*head = (union dp_rx_desc_list_elem_t *)new;
834 	/* reset tail if head->next is NULL */
835 	if (!*tail || !(*head)->next)
836 		*tail = *head;
837 
838 	dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST);
839 }
840 
841 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
842 				   uint8_t mac_id);
843 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
844 		qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id);
845 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
846 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer);
847 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
848 		       uint16_t peer_id, uint8_t tid);
849 
850 #define DP_RX_HEAD_APPEND(head, elem) \
851 	do {                                                            \
852 		qdf_nbuf_set_next((elem), (head));			\
853 		(head) = (elem);                                        \
854 	} while (0)
855 
856 
857 #define DP_RX_LIST_APPEND(head, tail, elem) \
858 	do {                                                          \
859 		if (!(head)) {                                        \
860 			(head) = (elem);                              \
861 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\
862 		} else {                                              \
863 			qdf_nbuf_set_next((tail), (elem));            \
864 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++;  \
865 		}                                                     \
866 		(tail) = (elem);                                      \
867 		qdf_nbuf_set_next((tail), NULL);                      \
868 	} while (0)
869 
870 #define DP_RX_MERGE_TWO_LIST(phead, ptail, chead, ctail) \
871 	do {                                                          \
872 		if (!(phead)) {                                       \
873 			(phead) = (chead);                            \
874 		} else {                                              \
875 			qdf_nbuf_set_next((ptail), (chead));          \
876 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(phead) += \
877 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(chead);   \
878 		}                                                     \
879 		(ptail) = (ctail);                                    \
880 		qdf_nbuf_set_next((ptail), NULL);                     \
881 	} while (0)
882 
883 /*for qcn9000 emulation the pcie is complete phy and no address restrictions*/
884 #if !defined(BUILD_X86) || defined(QCA_WIFI_QCN9000)
885 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
886 		qdf_dma_addr_t *paddr, struct rx_desc_pool *rx_desc_pool)
887 {
888 	return QDF_STATUS_SUCCESS;
889 }
890 #else
891 #define MAX_RETRY 100
892 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
893 		qdf_dma_addr_t *paddr, struct rx_desc_pool *rx_desc_pool)
894 {
895 	uint32_t nbuf_retry = 0;
896 	int32_t ret;
897 	const uint32_t x86_phy_addr = 0x50000000;
898 	/*
899 	 * in M2M emulation platforms (x86) the memory below 0x50000000
900 	 * is reserved for target use, so any memory allocated in this
901 	 * region should not be used by host
902 	 */
903 	do {
904 		if (qdf_likely(*paddr > x86_phy_addr))
905 			return QDF_STATUS_SUCCESS;
906 		else {
907 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
908 					"phy addr %pK exceeded 0x50000000 trying again",
909 					paddr);
910 
911 			nbuf_retry++;
912 			if ((*rx_netbuf)) {
913 				qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
914 						QDF_DMA_FROM_DEVICE);
915 				/* Not freeing buffer intentionally.
916 				 * Observed that same buffer is getting
917 				 * re-allocated resulting in longer load time
918 				 * WMI init timeout.
919 				 * This buffer is anyway not useful so skip it.
920 				 **/
921 			}
922 
923 			*rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
924 						    rx_desc_pool->buf_size,
925 						    RX_BUFFER_RESERVATION,
926 						    rx_desc_pool->buf_alignment,
927 						    FALSE);
928 
929 			if (qdf_unlikely(!(*rx_netbuf)))
930 				return QDF_STATUS_E_FAILURE;
931 
932 			ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf,
933 							QDF_DMA_FROM_DEVICE);
934 
935 			if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
936 				qdf_nbuf_free(*rx_netbuf);
937 				*rx_netbuf = NULL;
938 				continue;
939 			}
940 
941 			*paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
942 		}
943 	} while (nbuf_retry < MAX_RETRY);
944 
945 	if ((*rx_netbuf)) {
946 		qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
947 					QDF_DMA_FROM_DEVICE);
948 		qdf_nbuf_free(*rx_netbuf);
949 	}
950 
951 	return QDF_STATUS_E_FAILURE;
952 }
953 #endif
954 
955 /**
956  * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
957  *				   the MSDU Link Descriptor
958  * @soc: core txrx main context
959  * @buf_info: buf_info includes cookie that is used to lookup
960  * virtual address of link descriptor after deriving the page id
961  * and the offset or index of the desc on the associatde page.
962  *
963  * This is the VA of the link descriptor, that HAL layer later uses to
964  * retrieve the list of MSDU's for a given MPDU.
965  *
966  * Return: void *: Virtual Address of the Rx descriptor
967  */
968 static inline
969 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
970 				  struct hal_buf_info *buf_info)
971 {
972 	void *link_desc_va;
973 	struct qdf_mem_multi_page_t *pages;
974 	uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie);
975 
976 	pages = &soc->link_desc_pages;
977 	if (!pages)
978 		return NULL;
979 	if (qdf_unlikely(page_id >= pages->num_pages))
980 		return NULL;
981 	link_desc_va = pages->dma_pages[page_id].page_v_addr_start +
982 		(buf_info->paddr - pages->dma_pages[page_id].page_p_addr);
983 	return link_desc_va;
984 }
985 
986 /**
987  * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of
988  *				   the MSDU Link Descriptor
989  * @pdev: core txrx pdev context
990  * @buf_info: buf_info includes cookie that used to lookup virtual address of
991  * link descriptor. Normally this is just an index into a per pdev array.
992  *
993  * This is the VA of the link descriptor in monitor mode destination ring,
994  * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU.
995  *
996  * Return: void *: Virtual Address of the Rx descriptor
997  */
998 static inline
999 void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev,
1000 				  struct hal_buf_info *buf_info,
1001 				  int mac_id)
1002 {
1003 	void *link_desc_va;
1004 	struct qdf_mem_multi_page_t *pages;
1005 	uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie);
1006 
1007 	pages = &pdev->soc->mon_link_desc_pages[mac_id];
1008 	if (!pages)
1009 		return NULL;
1010 
1011 	if (qdf_unlikely(page_id >= pages->num_pages))
1012 		return NULL;
1013 
1014 	link_desc_va = pages->dma_pages[page_id].page_v_addr_start +
1015 		(buf_info->paddr - pages->dma_pages[page_id].page_p_addr);
1016 
1017 	return link_desc_va;
1018 }
1019 
1020 /**
1021  * dp_rx_defrag_concat() - Concatenate the fragments
1022  *
1023  * @dst: destination pointer to the buffer
1024  * @src: source pointer from where the fragment payload is to be copied
1025  *
1026  * Return: QDF_STATUS
1027  */
1028 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
1029 {
1030 	/*
1031 	 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
1032 	 * to provide space for src, the headroom portion is copied from
1033 	 * the original dst buffer to the larger new dst buffer.
1034 	 * (This is needed, because the headroom of the dst buffer
1035 	 * contains the rx desc.)
1036 	 */
1037 	if (!qdf_nbuf_cat(dst, src)) {
1038 		/*
1039 		 * qdf_nbuf_cat does not free the src memory.
1040 		 * Free src nbuf before returning
1041 		 * For failure case the caller takes of freeing the nbuf
1042 		 */
1043 		qdf_nbuf_free(src);
1044 		return QDF_STATUS_SUCCESS;
1045 	}
1046 
1047 	return QDF_STATUS_E_DEFRAG_ERROR;
1048 }
1049 
1050 #ifndef FEATURE_WDS
1051 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
1052 {
1053 	return QDF_STATUS_SUCCESS;
1054 }
1055 
1056 static inline void
1057 dp_rx_wds_srcport_learn(struct dp_soc *soc,
1058 			uint8_t *rx_tlv_hdr,
1059 			struct dp_peer *ta_peer,
1060 			qdf_nbuf_t nbuf,
1061 			struct hal_rx_msdu_metadata msdu_metadata)
1062 {
1063 }
1064 #endif
1065 
1066 /*
1067  * dp_rx_desc_dump() - dump the sw rx descriptor
1068  *
1069  * @rx_desc: sw rx descriptor
1070  */
1071 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc)
1072 {
1073 	dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d",
1074 		rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id,
1075 		rx_desc->in_use, rx_desc->unmapped);
1076 }
1077 
1078 /*
1079  * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
1080  *					In qwrap mode, packets originated from
1081  *					any vdev should not loopback and
1082  *					should be dropped.
1083  * @vdev: vdev on which rx packet is received
1084  * @nbuf: rx pkt
1085  *
1086  */
1087 #if ATH_SUPPORT_WRAP
1088 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1089 						qdf_nbuf_t nbuf)
1090 {
1091 	struct dp_vdev *psta_vdev;
1092 	struct dp_pdev *pdev = vdev->pdev;
1093 	uint8_t *data = qdf_nbuf_data(nbuf);
1094 
1095 	if (qdf_unlikely(vdev->proxysta_vdev)) {
1096 		/* In qwrap isolation mode, allow loopback packets as all
1097 		 * packets go to RootAP and Loopback on the mpsta.
1098 		 */
1099 		if (vdev->isolation_vdev)
1100 			return false;
1101 		TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
1102 			if (qdf_unlikely(psta_vdev->proxysta_vdev &&
1103 					 !qdf_mem_cmp(psta_vdev->mac_addr.raw,
1104 						      &data[QDF_MAC_ADDR_SIZE],
1105 						      QDF_MAC_ADDR_SIZE))) {
1106 				/* Drop packet if source address is equal to
1107 				 * any of the vdev addresses.
1108 				 */
1109 				return true;
1110 			}
1111 		}
1112 	}
1113 	return false;
1114 }
1115 #else
1116 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1117 						qdf_nbuf_t nbuf)
1118 {
1119 	return false;
1120 }
1121 #endif
1122 
1123 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
1124 	defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\
1125 	defined(WLAN_SUPPORT_RX_FLOW_TAG)
1126 #include "dp_rx_tag.h"
1127 #endif
1128 
1129 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
1130 /**
1131  * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV
1132  *                              and set the corresponding tag in QDF packet
1133  * @soc: core txrx main context
1134  * @vdev: vdev on which the packet is received
1135  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1136  * @rx_tlv_hdr: rBbase address where the RX TLVs starts
1137  * @ring_index: REO ring number, not used for error & monitor ring
1138  * @is_reo_exception: flag to indicate if rx from REO ring or exception ring
1139  * @is_update_stats: flag to indicate whether to update stats or not
1140  * Return: void
1141  */
1142 static inline void
1143 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1144 			  qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
1145 			  uint16_t ring_index,
1146 			  bool is_reo_exception, bool is_update_stats)
1147 {
1148 }
1149 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
1150 
1151 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
1152 /**
1153  * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV
1154  *                           and set the corresponding tag in QDF packet
1155  * @soc: core txrx main context
1156  * @vdev: vdev on which the packet is received
1157  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1158  * @rx_tlv_hdr: base address where the RX TLVs starts
1159  * @is_update_stats: flag to indicate whether to update stats or not
1160  *
1161  * Return: void
1162  */
1163 static inline void
1164 dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1165 		      qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats)
1166 {
1167 }
1168 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
1169 
1170 #if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\
1171 	!defined(WLAN_SUPPORT_RX_FLOW_TAG)
1172 /**
1173  * dp_rx_mon_update_protocol_flow_tag() - Performs necessary checks for monitor
1174  *                                       mode and then tags appropriate packets
1175  * @soc: core txrx main context
1176  * @vdev: pdev on which packet is received
1177  * @msdu: QDF packet buffer on which the protocol tag should be set
1178  * @rx_desc: base address where the RX TLVs start
1179  * Return: void
1180  */
1181 static inline
1182 void dp_rx_mon_update_protocol_flow_tag(struct dp_soc *soc,
1183 					struct dp_pdev *dp_pdev,
1184 					qdf_nbuf_t msdu, void *rx_desc)
1185 {
1186 }
1187 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG || WLAN_SUPPORT_RX_FLOW_TAG */
1188 
1189 /*
1190  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
1191  *			       called during dp rx initialization
1192  *			       and at the end of dp_rx_process.
1193  *
1194  * @soc: core txrx main context
1195  * @mac_id: mac_id which is one of 3 mac_ids
1196  * @dp_rxdma_srng: dp rxdma circular ring
1197  * @rx_desc_pool: Pointer to free Rx descriptor pool
1198  * @num_req_buffers: number of buffer to be replenished
1199  * @desc_list: list of descs if called from dp_rx_process
1200  *	       or NULL during dp rx initialization or out of buffer
1201  *	       interrupt.
1202  * @tail: tail of descs list
1203  * @func_name: name of the caller function
1204  * Return: return success or failure
1205  */
1206 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1207 				 struct dp_srng *dp_rxdma_srng,
1208 				 struct rx_desc_pool *rx_desc_pool,
1209 				 uint32_t num_req_buffers,
1210 				 union dp_rx_desc_list_elem_t **desc_list,
1211 				 union dp_rx_desc_list_elem_t **tail,
1212 				 const char *func_name);
1213 
1214 /*
1215  * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs
1216  *                               called during dp rx initialization
1217  *
1218  * @soc: core txrx main context
1219  * @mac_id: mac_id which is one of 3 mac_ids
1220  * @dp_rxdma_srng: dp rxdma circular ring
1221  * @rx_desc_pool: Pointer to free Rx descriptor pool
1222  * @num_req_buffers: number of buffer to be replenished
1223  *
1224  * Return: return success or failure
1225  */
1226 QDF_STATUS
1227 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
1228 			  struct dp_srng *dp_rxdma_srng,
1229 			  struct rx_desc_pool *rx_desc_pool,
1230 			  uint32_t num_req_buffers);
1231 
1232 /**
1233  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
1234  *			      (WBM), following error handling
1235  *
1236  * @soc: core DP main context
1237  * @buf_addr_info: opaque pointer to the REO error ring descriptor
1238  * @buf_addr_info: void pointer to the buffer_addr_info
1239  * @bm_action: put to idle_list or release to msdu_list
1240  *
1241  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
1242  */
1243 QDF_STATUS
1244 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
1245 		       uint8_t bm_action);
1246 
1247 /**
1248  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
1249  *					(WBM) by address
1250  *
1251  * @soc: core DP main context
1252  * @link_desc_addr: link descriptor addr
1253  *
1254  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
1255  */
1256 QDF_STATUS
1257 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
1258 			       hal_buff_addrinfo_t link_desc_addr,
1259 			       uint8_t bm_action);
1260 
1261 /**
1262  * dp_rxdma_err_process() - RxDMA error processing functionality
1263  * @soc: core txrx main contex
1264  * @mac_id: mac id which is one of 3 mac_ids
1265  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1266  * @quota: No. of units (packets) that can be serviced in one shot.
1267  *
1268  * Return: num of buffers processed
1269  */
1270 uint32_t
1271 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1272 		     uint32_t mac_id, uint32_t quota);
1273 
1274 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1275 				uint8_t *rx_tlv_hdr, struct dp_peer *peer);
1276 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1277 					uint8_t *rx_tlv_hdr);
1278 
1279 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
1280 			   struct dp_peer *peer);
1281 
1282 /*
1283  * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
1284  *
1285  * @soc: core txrx main context
1286  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1287  * @ring_desc: opaque pointer to the RX ring descriptor
1288  * @rx_desc: host rx descriptor
1289  *
1290  * Return: void
1291  */
1292 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
1293 				hal_ring_handle_t hal_ring_hdl,
1294 				hal_ring_desc_t ring_desc,
1295 				struct dp_rx_desc *rx_desc);
1296 
1297 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
1298 
1299 #ifdef QCA_PEER_EXT_STATS
1300 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
1301 			     qdf_nbuf_t nbuf);
1302 #endif /* QCA_PEER_EXT_STATS */
1303 
1304 #ifdef RX_DESC_DEBUG_CHECK
1305 /**
1306  * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc
1307  * @rx_desc: rx descriptor pointer
1308  *
1309  * Return: true, if magic is correct, else false.
1310  */
1311 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1312 {
1313 	if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC))
1314 		return false;
1315 
1316 	rx_desc->magic = 0;
1317 	return true;
1318 }
1319 
1320 /**
1321  * dp_rx_desc_prep() - prepare rx desc
1322  * @rx_desc: rx descriptor pointer to be prepared
1323  * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
1324  *
1325  * Note: assumption is that we are associating a nbuf which is mapped
1326  *
1327  * Return: none
1328  */
1329 static inline
1330 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
1331 		     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1332 {
1333 	rx_desc->magic = DP_RX_DESC_MAGIC;
1334 	rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
1335 	rx_desc->unmapped = 0;
1336 }
1337 
1338 /**
1339  * dp_rx_desc_frag_prep() - prepare rx desc
1340  * @rx_desc: rx descriptor pointer to be prepared
1341  * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
1342  *
1343  * Note: assumption is that we frag address is mapped
1344  *
1345  * Return: none
1346  */
1347 #ifdef DP_RX_MON_MEM_FRAG
1348 static inline
1349 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1350 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1351 {
1352 	rx_desc->magic = DP_RX_DESC_MAGIC;
1353 	rx_desc->rx_buf_start =
1354 		(uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
1355 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
1356 	rx_desc->unmapped = 0;
1357 }
1358 #else
1359 static inline
1360 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1361 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1362 {
1363 }
1364 #endif /* DP_RX_MON_MEM_FRAG */
1365 #else
1366 
1367 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1368 {
1369 	return true;
1370 }
1371 
1372 static inline
1373 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
1374 		     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1375 {
1376 	rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
1377 	rx_desc->unmapped = 0;
1378 }
1379 
1380 #ifdef DP_RX_MON_MEM_FRAG
1381 static inline
1382 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1383 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1384 {
1385 	rx_desc->rx_buf_start =
1386 		(uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
1387 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
1388 	rx_desc->unmapped = 0;
1389 }
1390 #else
1391 static inline
1392 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1393 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1394 {
1395 }
1396 #endif /* DP_RX_MON_MEM_FRAG */
1397 
1398 #endif /* RX_DESC_DEBUG_CHECK */
1399 
1400 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
1401 				bool is_mon_dest_desc);
1402 
1403 void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1404 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer,
1405 			     uint8_t err_code, uint8_t mac_id);
1406 
1407 #ifndef QCA_MULTIPASS_SUPPORT
1408 static inline
1409 bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, uint8_t tid)
1410 {
1411 	return false;
1412 }
1413 #else
1414 bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf,
1415 			     uint8_t tid);
1416 #endif
1417 
1418 #ifndef WLAN_RX_PKT_CAPTURE_ENH
1419 static inline
1420 QDF_STATUS dp_peer_set_rx_capture_enabled(struct dp_pdev *pdev,
1421 					  struct dp_peer *peer_handle,
1422 					  bool value, uint8_t *mac_addr)
1423 {
1424 	return QDF_STATUS_SUCCESS;
1425 }
1426 #endif
1427 
1428 /**
1429  * dp_rx_deliver_to_stack() - deliver pkts to network stack
1430  * Caller to hold peer refcount and check for valid peer
1431  * @soc: soc
1432  * @vdev: vdev
1433  * @peer: peer
1434  * @nbuf_head: skb list head
1435  * @nbuf_tail: skb list tail
1436  *
1437  * Return: None
1438  */
1439 void dp_rx_deliver_to_stack(struct dp_soc *soc,
1440 			    struct dp_vdev *vdev,
1441 			    struct dp_peer *peer,
1442 			    qdf_nbuf_t nbuf_head,
1443 			    qdf_nbuf_t nbuf_tail);
1444 
1445 #ifdef QCA_OL_RX_LOCK_LESS_ACCESS
1446 /*
1447  * dp_rx_ring_access_start()- Wrapper function to log access start of a hal ring
1448  * @int_ctx: pointer to DP interrupt context
1449  * @dp_soc - DP soc structure pointer
1450  * @hal_ring_hdl - HAL ring handle
1451  *
1452  * Return: 0 on success; error on failure
1453  */
1454 static inline int
1455 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
1456 			hal_ring_handle_t hal_ring_hdl)
1457 {
1458 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
1459 }
1460 
1461 /*
1462  * dp_rx_ring_access_end()- Wrapper function to log access end of a hal ring
1463  * @int_ctx: pointer to DP interrupt context
1464  * @dp_soc - DP soc structure pointer
1465  * @hal_ring_hdl - HAL ring handle
1466  *
1467  * Return - None
1468  */
1469 static inline void
1470 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
1471 		      hal_ring_handle_t hal_ring_hdl)
1472 {
1473 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
1474 }
1475 #else
1476 static inline int
1477 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
1478 			hal_ring_handle_t hal_ring_hdl)
1479 {
1480 	return dp_srng_access_start(int_ctx, soc, hal_ring_hdl);
1481 }
1482 
1483 static inline void
1484 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
1485 		      hal_ring_handle_t hal_ring_hdl)
1486 {
1487 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
1488 }
1489 #endif
1490 
1491 /*
1492  * dp_rx_wbm_sg_list_reset() - Initialize sg list
1493  *
1494  * This api should be called at soc init and afterevery sg processing.
1495  *@soc: DP SOC handle
1496  */
1497 static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc)
1498 {
1499 	if (soc) {
1500 		soc->wbm_sg_param.wbm_is_first_msdu_in_sg = false;
1501 		soc->wbm_sg_param.wbm_sg_nbuf_head = NULL;
1502 		soc->wbm_sg_param.wbm_sg_nbuf_tail = NULL;
1503 		soc->wbm_sg_param.wbm_sg_desc_msdu_len = 0;
1504 	}
1505 }
1506 
1507 /*
1508  * dp_rx_wbm_sg_list_deinit() - De-initialize sg list
1509  *
1510  * This api should be called in down path, to avoid any leak.
1511  *@soc: DP SOC handle
1512  */
1513 static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc)
1514 {
1515 	if (soc) {
1516 		if (soc->wbm_sg_param.wbm_sg_nbuf_head)
1517 			qdf_nbuf_list_free(soc->wbm_sg_param.wbm_sg_nbuf_head);
1518 
1519 		dp_rx_wbm_sg_list_reset(soc);
1520 	}
1521 }
1522 
1523 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
1524 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
1525 	do {								   \
1526 		if (!soc->rx_buff_pool[rx_desc->pool_id].is_initialized) { \
1527 			DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf);	   \
1528 			break;						   \
1529 		}							   \
1530 		DP_RX_LIST_APPEND(ebuf_head, ebuf_tail, rx_desc->nbuf);	   \
1531 		if (!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)) {	   \
1532 			if (!dp_rx_buffer_pool_refill(soc, ebuf_head,	   \
1533 						      rx_desc->pool_id))   \
1534 				DP_RX_MERGE_TWO_LIST(head, tail,	   \
1535 						     ebuf_head, ebuf_tail);\
1536 			ebuf_head = NULL;				   \
1537 			ebuf_tail = NULL;				   \
1538 		}							   \
1539 	} while (0)
1540 #else
1541 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
1542 	DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf)
1543 #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
1544 
1545 /*
1546  * dp_rx_link_desc_refill_duplicate_check() - check if link desc duplicate
1547 					      to refill
1548  * @soc: DP SOC handle
1549  * @buf_info: the last link desc buf info
1550  * @ring_buf_info: current buf address pointor including link desc
1551  *
1552  * return: none.
1553  */
1554 void dp_rx_link_desc_refill_duplicate_check(
1555 				struct dp_soc *soc,
1556 				struct hal_buf_info *buf_info,
1557 				hal_buff_addrinfo_t ring_buf_info);
1558 #endif /* _DP_RX_H */
1559