xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_RX_H
20 #define _DP_RX_H
21 
22 #include "hal_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 
27 #ifdef RXDMA_OPTIMIZATION
28 #ifndef RX_DATA_BUFFER_ALIGNMENT
29 #define RX_DATA_BUFFER_ALIGNMENT        128
30 #endif
31 #ifndef RX_MONITOR_BUFFER_ALIGNMENT
32 #define RX_MONITOR_BUFFER_ALIGNMENT     128
33 #endif
34 #else /* RXDMA_OPTIMIZATION */
35 #define RX_DATA_BUFFER_ALIGNMENT        4
36 #define RX_MONITOR_BUFFER_ALIGNMENT     4
37 #endif /* RXDMA_OPTIMIZATION */
38 
39 #ifdef QCA_HOST2FW_RXBUF_RING
40 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW1_BM
41 /* RBM value used for re-injecting defragmented packets into REO */
42 #define DP_DEFRAG_RBM HAL_RX_BUF_RBM_SW3_BM
43 #else
44 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW3_BM
45 #define DP_DEFRAG_RBM DP_WBM2SW_RBM
46 #endif /* QCA_HOST2FW_RXBUF_RING */
47 
48 #define RX_BUFFER_RESERVATION   0
49 
50 #define DP_PEER_METADATA_PEER_ID_MASK	0x0000ffff
51 #define DP_PEER_METADATA_PEER_ID_SHIFT	0
52 #define DP_PEER_METADATA_VDEV_ID_MASK	0x003f0000
53 #define DP_PEER_METADATA_VDEV_ID_SHIFT	16
54 
55 #define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata)		\
56 	(((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK)	\
57 			>> DP_PEER_METADATA_PEER_ID_SHIFT)
58 
59 #define DP_PEER_METADATA_VDEV_ID_GET(_peer_metadata)		\
60 	(((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK)	\
61 			>> DP_PEER_METADATA_VDEV_ID_SHIFT)
62 
63 #define DP_RX_DESC_MAGIC 0xdec0de
64 
65 /**
66  * enum dp_rx_desc_state
67  *
68  * @RX_DESC_REPLENISH: rx desc replenished
69  * @RX_DESC_FREELIST: rx desc in freelist
70  */
71 enum dp_rx_desc_state {
72 	RX_DESC_REPLENISHED,
73 	RX_DESC_IN_FREELIST,
74 };
75 
76 /**
77  * struct dp_rx_desc_dbg_info
78  *
79  * @freelist_caller: name of the function that put the
80  *  the rx desc in freelist
81  * @freelist_ts: timestamp when the rx desc is put in
82  *  a freelist
83  * @replenish_caller: name of the function that last
84  *  replenished the rx desc
85  * @replenish_ts: last replenish timestamp
86  */
87 struct dp_rx_desc_dbg_info {
88 	char freelist_caller[QDF_MEM_FUNC_NAME_SIZE];
89 	uint64_t freelist_ts;
90 	char replenish_caller[QDF_MEM_FUNC_NAME_SIZE];
91 	uint64_t replenish_ts;
92 };
93 
94 /**
95  * struct dp_rx_desc
96  *
97  * @nbuf		: VA of the "skb" posted
98  * @rx_buf_start	: VA of the original Rx buffer, before
99  *			  movement of any skb->data pointer
100  * @paddr_buf_start     : PA of the original Rx buffer, before
101  *                        movement of any frag pointer
102  * @cookie		: index into the sw array which holds
103  *			  the sw Rx descriptors
104  *			  Cookie space is 21 bits:
105  *			  lower 18 bits -- index
106  *			  upper  3 bits -- pool_id
107  * @pool_id		: pool Id for which this allocated.
108  *			  Can only be used if there is no flow
109  *			  steering
110  * @in_use		  rx_desc is in use
111  * @unmapped		  used to mark rx_desc an unmapped if the corresponding
112  *			  nbuf is already unmapped
113  * @in_err_state	: Nbuf sanity failed for this descriptor.
114  */
115 struct dp_rx_desc {
116 	qdf_nbuf_t nbuf;
117 	uint8_t *rx_buf_start;
118 	qdf_dma_addr_t paddr_buf_start;
119 	uint32_t cookie;
120 	uint8_t	 pool_id;
121 #ifdef RX_DESC_DEBUG_CHECK
122 	uint32_t magic;
123 	struct dp_rx_desc_dbg_info *dbg_info;
124 #endif
125 	uint8_t	in_use:1,
126 	unmapped:1,
127 	in_err_state:1;
128 };
129 
130 /* RX Descriptor Multi Page memory alloc related */
131 #define DP_RX_DESC_OFFSET_NUM_BITS 8
132 #define DP_RX_DESC_PAGE_ID_NUM_BITS 8
133 #define DP_RX_DESC_POOL_ID_NUM_BITS 4
134 
135 #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS
136 #define DP_RX_DESC_POOL_ID_SHIFT \
137 		(DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS)
138 #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \
139 	(((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT)
140 #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK	\
141 			(((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \
142 			 DP_RX_DESC_PAGE_ID_SHIFT)
143 #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \
144 			((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1)
145 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie)		\
146 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >>	\
147 			DP_RX_DESC_POOL_ID_SHIFT)
148 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie)		\
149 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >>	\
150 			DP_RX_DESC_PAGE_ID_SHIFT)
151 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie)		\
152 	((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK)
153 
154 #define RX_DESC_COOKIE_INDEX_SHIFT		0
155 #define RX_DESC_COOKIE_INDEX_MASK		0x3ffff /* 18 bits */
156 #define RX_DESC_COOKIE_POOL_ID_SHIFT		18
157 #define RX_DESC_COOKIE_POOL_ID_MASK		0x1c0000
158 
159 #define DP_RX_DESC_COOKIE_MAX	\
160 	(RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK)
161 
162 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie)		\
163 	(((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >>	\
164 			RX_DESC_COOKIE_POOL_ID_SHIFT)
165 
166 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie)		\
167 	(((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >>	\
168 			RX_DESC_COOKIE_INDEX_SHIFT)
169 
170 #define FRAME_MASK_IPV4_ARP   1
171 #define FRAME_MASK_IPV4_DHCP  2
172 #define FRAME_MASK_IPV4_EAPOL 4
173 #define FRAME_MASK_IPV6_DHCP  8
174 
175 #define dp_rx_add_to_free_desc_list(head, tail, new) \
176 	__dp_rx_add_to_free_desc_list(head, tail, new, __func__)
177 
178 #define dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
179 				num_buffers, desc_list, tail) \
180 	__dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
181 				  num_buffers, desc_list, tail, __func__)
182 
183 #ifdef DP_RX_SPECIAL_FRAME_NEED
184 /**
185  * dp_rx_is_special_frame() - check is RX frame special needed
186  *
187  * @nbuf: RX skb pointer
188  * @frame_mask: the mask for speical frame needed
189  *
190  * Check is RX frame wanted matched with mask
191  *
192  * Return: true - special frame needed, false - no
193  */
194 static inline
195 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
196 {
197 	if (((frame_mask & FRAME_MASK_IPV4_ARP) &&
198 	     qdf_nbuf_is_ipv4_arp_pkt(nbuf)) ||
199 	    ((frame_mask & FRAME_MASK_IPV4_DHCP) &&
200 	     qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) ||
201 	    ((frame_mask & FRAME_MASK_IPV4_EAPOL) &&
202 	     qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) ||
203 	    ((frame_mask & FRAME_MASK_IPV6_DHCP) &&
204 	     qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))
205 		return true;
206 
207 	return false;
208 }
209 
210 /**
211  * dp_rx_deliver_special_frame() - Deliver the RX special frame to stack
212  *				   if matches mask
213  *
214  * @soc: Datapath soc handler
215  * @peer: pointer to DP peer
216  * @nbuf: pointer to the skb of RX frame
217  * @frame_mask: the mask for speical frame needed
218  * @rx_tlv_hdr: start of rx tlv header
219  *
220  * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
221  * single nbuf is expected.
222  *
223  * return: true - nbuf has been delivered to stack, false - not.
224  */
225 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
226 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
227 				 uint8_t *rx_tlv_hdr);
228 #else
229 static inline
230 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
231 {
232 	return false;
233 }
234 
235 static inline
236 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
237 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
238 				 uint8_t *rx_tlv_hdr)
239 {
240 	return false;
241 }
242 #endif
243 
244 /* DOC: Offset to obtain LLC hdr
245  *
246  * In the case of Wifi parse error
247  * to reach LLC header from beginning
248  * of VLAN tag we need to skip 8 bytes.
249  * Vlan_tag(4)+length(2)+length added
250  * by HW(2) = 8 bytes.
251  */
252 #define DP_SKIP_VLAN		8
253 
254 /**
255  * struct dp_rx_cached_buf - rx cached buffer
256  * @list: linked list node
257  * @buf: skb buffer
258  */
259 struct dp_rx_cached_buf {
260 	qdf_list_node_t node;
261 	qdf_nbuf_t buf;
262 };
263 
264 /*
265  *dp_rx_xor_block() - xor block of data
266  *@b: destination data block
267  *@a: source data block
268  *@len: length of the data to process
269  *
270  *Returns: None
271  */
272 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
273 {
274 	qdf_size_t i;
275 
276 	for (i = 0; i < len; i++)
277 		b[i] ^= a[i];
278 }
279 
280 /*
281  *dp_rx_rotl() - rotate the bits left
282  *@val: unsigned integer input value
283  *@bits: number of bits
284  *
285  *Returns: Integer with left rotated by number of 'bits'
286  */
287 static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
288 {
289 	return (val << bits) | (val >> (32 - bits));
290 }
291 
292 /*
293  *dp_rx_rotr() - rotate the bits right
294  *@val: unsigned integer input value
295  *@bits: number of bits
296  *
297  *Returns: Integer with right rotated by number of 'bits'
298  */
299 static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
300 {
301 	return (val >> bits) | (val << (32 - bits));
302 }
303 
304 /*
305  * dp_set_rx_queue() - set queue_mapping in skb
306  * @nbuf: skb
307  * @queue_id: rx queue_id
308  *
309  * Return: void
310  */
311 #ifdef QCA_OL_RX_MULTIQ_SUPPORT
312 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
313 {
314 	qdf_nbuf_record_rx_queue(nbuf, queue_id);
315 	return;
316 }
317 #else
318 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
319 {
320 }
321 #endif
322 
323 /*
324  *dp_rx_xswap() - swap the bits left
325  *@val: unsigned integer input value
326  *
327  *Returns: Integer with bits swapped
328  */
329 static inline uint32_t dp_rx_xswap(uint32_t val)
330 {
331 	return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
332 }
333 
334 /*
335  *dp_rx_get_le32_split() - get little endian 32 bits split
336  *@b0: byte 0
337  *@b1: byte 1
338  *@b2: byte 2
339  *@b3: byte 3
340  *
341  *Returns: Integer with split little endian 32 bits
342  */
343 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
344 					uint8_t b3)
345 {
346 	return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
347 }
348 
349 /*
350  *dp_rx_get_le32() - get little endian 32 bits
351  *@b0: byte 0
352  *@b1: byte 1
353  *@b2: byte 2
354  *@b3: byte 3
355  *
356  *Returns: Integer with little endian 32 bits
357  */
358 static inline uint32_t dp_rx_get_le32(const uint8_t *p)
359 {
360 	return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
361 }
362 
363 /*
364  * dp_rx_put_le32() - put little endian 32 bits
365  * @p: destination char array
366  * @v: source 32-bit integer
367  *
368  * Returns: None
369  */
370 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
371 {
372 	p[0] = (v) & 0xff;
373 	p[1] = (v >> 8) & 0xff;
374 	p[2] = (v >> 16) & 0xff;
375 	p[3] = (v >> 24) & 0xff;
376 }
377 
378 /* Extract michal mic block of data */
379 #define dp_rx_michael_block(l, r)	\
380 	do {					\
381 		r ^= dp_rx_rotl(l, 17);	\
382 		l += r;				\
383 		r ^= dp_rx_xswap(l);		\
384 		l += r;				\
385 		r ^= dp_rx_rotl(l, 3);	\
386 		l += r;				\
387 		r ^= dp_rx_rotr(l, 2);	\
388 		l += r;				\
389 	} while (0)
390 
391 /**
392  * struct dp_rx_desc_list_elem_t
393  *
394  * @next		: Next pointer to form free list
395  * @rx_desc		: DP Rx descriptor
396  */
397 union dp_rx_desc_list_elem_t {
398 	union dp_rx_desc_list_elem_t *next;
399 	struct dp_rx_desc rx_desc;
400 };
401 
402 #ifdef RX_DESC_MULTI_PAGE_ALLOC
403 /**
404  * dp_rx_desc_find() - find dp rx descriptor from page ID and offset
405  * @page_id: Page ID
406  * @offset: Offset of the descriptor element
407  *
408  * Return: RX descriptor element
409  */
410 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
411 					      struct rx_desc_pool *rx_pool);
412 
413 static inline
414 struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc,
415 					      struct rx_desc_pool *pool,
416 					      uint32_t cookie)
417 {
418 	uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
419 	uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
420 	uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
421 	struct rx_desc_pool *rx_desc_pool;
422 	union dp_rx_desc_list_elem_t *rx_desc_elem;
423 
424 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
425 		return NULL;
426 
427 	rx_desc_pool = &pool[pool_id];
428 	rx_desc_elem = (union dp_rx_desc_list_elem_t *)
429 		(rx_desc_pool->desc_pages.cacheable_pages[page_id] +
430 		rx_desc_pool->elem_size * offset);
431 
432 	return &rx_desc_elem->rx_desc;
433 }
434 
435 /**
436  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
437  *			 the Rx descriptor on Rx DMA source ring buffer
438  * @soc: core txrx main context
439  * @cookie: cookie used to lookup virtual address
440  *
441  * Return: Pointer to the Rx descriptor
442  */
443 static inline
444 struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc,
445 					       uint32_t cookie)
446 {
447 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie);
448 }
449 
450 /**
451  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
452  *			 the Rx descriptor on monitor ring buffer
453  * @soc: core txrx main context
454  * @cookie: cookie used to lookup virtual address
455  *
456  * Return: Pointer to the Rx descriptor
457  */
458 static inline
459 struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc,
460 					     uint32_t cookie)
461 {
462 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie);
463 }
464 
465 /**
466  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
467  *			 the Rx descriptor on monitor status ring buffer
468  * @soc: core txrx main context
469  * @cookie: cookie used to lookup virtual address
470  *
471  * Return: Pointer to the Rx descriptor
472  */
473 static inline
474 struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc,
475 						uint32_t cookie)
476 {
477 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_status[0], cookie);
478 }
479 #else
480 
481 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
482 			  uint32_t pool_size,
483 			  struct rx_desc_pool *rx_desc_pool);
484 
485 /**
486  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
487  *			 the Rx descriptor on Rx DMA source ring buffer
488  * @soc: core txrx main context
489  * @cookie: cookie used to lookup virtual address
490  *
491  * Return: void *: Virtual Address of the Rx descriptor
492  */
493 static inline
494 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
495 {
496 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
497 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
498 	struct rx_desc_pool *rx_desc_pool;
499 
500 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
501 		return NULL;
502 
503 	rx_desc_pool = &soc->rx_desc_buf[pool_id];
504 
505 	if (qdf_unlikely(index >= rx_desc_pool->pool_size))
506 		return NULL;
507 
508 	return &(soc->rx_desc_buf[pool_id].array[index].rx_desc);
509 }
510 
511 /**
512  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
513  *			 the Rx descriptor on monitor ring buffer
514  * @soc: core txrx main context
515  * @cookie: cookie used to lookup virtual address
516  *
517  * Return: void *: Virtual Address of the Rx descriptor
518  */
519 static inline
520 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
521 {
522 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
523 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
524 	/* TODO */
525 	/* Add sanity for pool_id & index */
526 	return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
527 }
528 
529 /**
530  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
531  *			 the Rx descriptor on monitor status ring buffer
532  * @soc: core txrx main context
533  * @cookie: cookie used to lookup virtual address
534  *
535  * Return: void *: Virtual Address of the Rx descriptor
536  */
537 static inline
538 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
539 {
540 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
541 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
542 	/* TODO */
543 	/* Add sanity for pool_id & index */
544 	return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
545 }
546 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
547 
548 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
549 static inline QDF_STATUS
550 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
551 {
552 	if (qdf_unlikely(HAL_RX_REO_BUF_COOKIE_INVALID_GET(ring_desc)))
553 		return QDF_STATUS_E_FAILURE;
554 
555 	HAL_RX_REO_BUF_COOKIE_INVALID_SET(ring_desc);
556 	return QDF_STATUS_SUCCESS;
557 }
558 #else
559 static inline QDF_STATUS
560 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
561 {
562 	return QDF_STATUS_SUCCESS;
563 }
564 #endif
565 
566 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool);
567 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
568 				 uint32_t pool_size,
569 				 struct rx_desc_pool *rx_desc_pool);
570 
571 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
572 			  uint32_t pool_size,
573 			  struct rx_desc_pool *rx_desc_pool);
574 void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id);
575 
576 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
577 				union dp_rx_desc_list_elem_t **local_desc_list,
578 				union dp_rx_desc_list_elem_t **tail,
579 				uint16_t pool_id,
580 				struct rx_desc_pool *rx_desc_pool);
581 
582 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
583 				struct rx_desc_pool *rx_desc_pool,
584 				uint16_t num_descs,
585 				union dp_rx_desc_list_elem_t **desc_list,
586 				union dp_rx_desc_list_elem_t **tail);
587 
588 
589 QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev);
590 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev);
591 
592 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev);
593 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev);
594 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
595 			    struct rx_desc_pool *rx_desc_pool);
596 
597 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
598 QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev);
599 void dp_rx_pdev_buffers_free(struct dp_pdev *pdev);
600 
601 void dp_rx_pdev_detach(struct dp_pdev *pdev);
602 
603 void dp_print_napi_stats(struct dp_soc *soc);
604 
605 /**
606  * dp_rx_vdev_detach() - detach vdev from dp rx
607  * @vdev: virtual device instance
608  *
609  * Return: QDF_STATUS_SUCCESS: success
610  *         QDF_STATUS_E_RESOURCES: Error return
611  */
612 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev);
613 
614 uint32_t
615 dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
616 	      uint8_t reo_ring_num,
617 	      uint32_t quota);
618 
619 /**
620  * dp_rx_err_process() - Processes error frames routed to REO error ring
621  * @int_ctx: pointer to DP interrupt context
622  * @soc: core txrx main context
623  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
624  * @quota: No. of units (packets) that can be serviced in one shot.
625  *
626  * This function implements error processing and top level demultiplexer
627  * for all the frames routed to REO error ring.
628  *
629  * Return: uint32_t: No. of elements processed
630  */
631 uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
632 			   hal_ring_handle_t hal_ring_hdl, uint32_t quota);
633 
634 /**
635  * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
636  * @int_ctx: pointer to DP interrupt context
637  * @soc: core txrx main context
638  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
639  * @quota: No. of units (packets) that can be serviced in one shot.
640  *
641  * This function implements error processing and top level demultiplexer
642  * for all the frames routed to WBM2HOST sw release ring.
643  *
644  * Return: uint32_t: No. of elements processed
645  */
646 uint32_t
647 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
648 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota);
649 
650 /**
651  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
652  *		     multiple nbufs.
653  * @nbuf: pointer to the first msdu of an amsdu.
654  *
655  * This function implements the creation of RX frag_list for cases
656  * where an MSDU is spread across multiple nbufs.
657  *
658  * Return: returns the head nbuf which contains complete frag_list.
659  */
660 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf);
661 
662 
663 /*
664  * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during
665  *				     de-initialization of wifi module.
666  *
667  * @soc: core txrx main context
668  * @pool_id: pool_id which is one of 3 mac_ids
669  * @rx_desc_pool: rx descriptor pool pointer
670  *
671  * Return: None
672  */
673 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
674 				   struct rx_desc_pool *rx_desc_pool);
675 
676 /*
677  * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during
678  *			    de-initialization of wifi module.
679  *
680  * @soc: core txrx main context
681  * @pool_id: pool_id which is one of 3 mac_ids
682  * @rx_desc_pool: rx descriptor pool pointer
683  *
684  * Return: None
685  */
686 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
687 			  struct rx_desc_pool *rx_desc_pool);
688 
689 #ifdef DP_RX_MON_MEM_FRAG
690 /*
691  * dp_rx_desc_frag_free() - free the sw rx desc frag called during
692  *			    de-initialization of wifi module.
693  *
694  * @soc: core txrx main context
695  * @rx_desc_pool: rx descriptor pool pointer
696  *
697  * Return: None
698  */
699 void dp_rx_desc_frag_free(struct dp_soc *soc,
700 			  struct rx_desc_pool *rx_desc_pool);
701 #else
702 static inline
703 void dp_rx_desc_frag_free(struct dp_soc *soc,
704 			  struct rx_desc_pool *rx_desc_pool)
705 {
706 }
707 #endif
708 /*
709  * dp_rx_desc_pool_free() - free the sw rx desc array called during
710  *			    de-initialization of wifi module.
711  *
712  * @soc: core txrx main context
713  * @rx_desc_pool: rx descriptor pool pointer
714  *
715  * Return: None
716  */
717 void dp_rx_desc_pool_free(struct dp_soc *soc,
718 			  struct rx_desc_pool *rx_desc_pool);
719 
720 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
721 				struct dp_peer *peer);
722 
723 #ifdef RX_DESC_DEBUG_CHECK
724 /**
725  * dp_rx_desc_paddr_sanity_check() - paddr sanity for ring desc vs rx_desc
726  * @rx_desc: rx descriptor
727  * @ring_paddr: paddr obatined from the ring
728  *
729  * Returns: QDF_STATUS
730  */
731 static inline
732 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
733 				   uint64_t ring_paddr)
734 {
735 	return (ring_paddr == qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0));
736 }
737 
738 /*
739  * dp_rx_desc_alloc_dbg_info() - Alloc memory for rx descriptor debug
740  *  structure
741  * @rx_desc: rx descriptor pointer
742  *
743  * Return: None
744  */
745 static inline
746 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
747 {
748 	rx_desc->dbg_info = qdf_mem_malloc(sizeof(struct dp_rx_desc_dbg_info));
749 }
750 
751 /*
752  * dp_rx_desc_free_dbg_info() - Free rx descriptor debug
753  *  structure memory
754  * @rx_desc: rx descriptor pointer
755  *
756  * Return: None
757  */
758 static inline
759 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
760 {
761 	qdf_mem_free(rx_desc->dbg_info);
762 }
763 
764 /*
765  * dp_rx_desc_update_dbg_info() - Update rx descriptor debug info
766  *  structure memory
767  * @rx_desc: rx descriptor pointer
768  *
769  * Return: None
770  */
771 static
772 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
773 				const char *func_name, uint8_t flag)
774 {
775 	struct dp_rx_desc_dbg_info *info = rx_desc->dbg_info;
776 
777 	if (!info)
778 		return;
779 
780 	if (flag == RX_DESC_REPLENISHED) {
781 		qdf_str_lcopy(info->replenish_caller, func_name,
782 			      QDF_MEM_FUNC_NAME_SIZE);
783 		info->replenish_ts = qdf_get_log_timestamp();
784 	} else {
785 		qdf_str_lcopy(info->freelist_caller, func_name,
786 			      QDF_MEM_FUNC_NAME_SIZE);
787 		info->freelist_ts = qdf_get_log_timestamp();
788 	}
789 }
790 #else
791 
792 static inline
793 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
794 				   uint64_t ring_paddr)
795 {
796 	return true;
797 }
798 
799 static inline
800 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
801 {
802 }
803 
804 static inline
805 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
806 {
807 }
808 
809 static inline
810 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
811 				const char *func_name, uint8_t flag)
812 {
813 }
814 #endif /* RX_DESC_DEBUG_CHECK */
815 
816 /**
817  * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
818  *
819  * @head: pointer to the head of local free list
820  * @tail: pointer to the tail of local free list
821  * @new: new descriptor that is added to the free list
822  * @func_name: caller func name
823  *
824  * Return: void:
825  */
826 static inline
827 void __dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
828 				 union dp_rx_desc_list_elem_t **tail,
829 				 struct dp_rx_desc *new, const char *func_name)
830 {
831 	qdf_assert(head && new);
832 
833 	new->nbuf = NULL;
834 	new->in_use = 0;
835 
836 	((union dp_rx_desc_list_elem_t *)new)->next = *head;
837 	*head = (union dp_rx_desc_list_elem_t *)new;
838 	/* reset tail if head->next is NULL */
839 	if (!*tail || !(*head)->next)
840 		*tail = *head;
841 
842 	dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST);
843 }
844 
845 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
846 				   uint8_t mac_id);
847 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
848 		qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id);
849 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
850 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer);
851 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
852 		       uint16_t peer_id, uint8_t tid);
853 
854 
855 #define DP_RX_LIST_APPEND(head, tail, elem) \
856 	do {                                                          \
857 		if (!(head)) {                                        \
858 			(head) = (elem);                              \
859 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\
860 		} else {                                              \
861 			qdf_nbuf_set_next((tail), (elem));            \
862 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++;  \
863 		}                                                     \
864 		(tail) = (elem);                                      \
865 		qdf_nbuf_set_next((tail), NULL);                      \
866 	} while (0)
867 
868 #define DP_RX_MERGE_TWO_LIST(phead, ptail, chead, ctail) \
869 	do {                                                          \
870 		if (!(phead)) {                                       \
871 			(phead) = (chead);                            \
872 		} else {                                              \
873 			qdf_nbuf_set_next((ptail), (chead));          \
874 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(phead) += \
875 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(chead);   \
876 		}                                                     \
877 		(ptail) = (ctail);                                    \
878 		qdf_nbuf_set_next((ptail), NULL);                     \
879 	} while (0)
880 
881 /*for qcn9000 emulation the pcie is complete phy and no address restrictions*/
882 #if !defined(BUILD_X86) || defined(QCA_WIFI_QCN9000)
883 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
884 		qdf_dma_addr_t *paddr, struct rx_desc_pool *rx_desc_pool)
885 {
886 	return QDF_STATUS_SUCCESS;
887 }
888 #else
889 #define MAX_RETRY 100
890 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
891 		qdf_dma_addr_t *paddr, struct rx_desc_pool *rx_desc_pool)
892 {
893 	uint32_t nbuf_retry = 0;
894 	int32_t ret;
895 	const uint32_t x86_phy_addr = 0x50000000;
896 	/*
897 	 * in M2M emulation platforms (x86) the memory below 0x50000000
898 	 * is reserved for target use, so any memory allocated in this
899 	 * region should not be used by host
900 	 */
901 	do {
902 		if (qdf_likely(*paddr > x86_phy_addr))
903 			return QDF_STATUS_SUCCESS;
904 		else {
905 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
906 					"phy addr %pK exceeded 0x50000000 trying again",
907 					paddr);
908 
909 			nbuf_retry++;
910 			if ((*rx_netbuf)) {
911 				qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
912 						QDF_DMA_FROM_DEVICE);
913 				/* Not freeing buffer intentionally.
914 				 * Observed that same buffer is getting
915 				 * re-allocated resulting in longer load time
916 				 * WMI init timeout.
917 				 * This buffer is anyway not useful so skip it.
918 				 **/
919 			}
920 
921 			*rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
922 						    rx_desc_pool->buf_size,
923 						    RX_BUFFER_RESERVATION,
924 						    rx_desc_pool->buf_alignment,
925 						    FALSE);
926 
927 			if (qdf_unlikely(!(*rx_netbuf)))
928 				return QDF_STATUS_E_FAILURE;
929 
930 			ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf,
931 							QDF_DMA_FROM_DEVICE);
932 
933 			if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
934 				qdf_nbuf_free(*rx_netbuf);
935 				*rx_netbuf = NULL;
936 				continue;
937 			}
938 
939 			*paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
940 		}
941 	} while (nbuf_retry < MAX_RETRY);
942 
943 	if ((*rx_netbuf)) {
944 		qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
945 					QDF_DMA_FROM_DEVICE);
946 		qdf_nbuf_free(*rx_netbuf);
947 	}
948 
949 	return QDF_STATUS_E_FAILURE;
950 }
951 #endif
952 
953 /**
954  * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
955  *				   the MSDU Link Descriptor
956  * @soc: core txrx main context
957  * @buf_info: buf_info includes cookie that is used to lookup
958  * virtual address of link descriptor after deriving the page id
959  * and the offset or index of the desc on the associatde page.
960  *
961  * This is the VA of the link descriptor, that HAL layer later uses to
962  * retrieve the list of MSDU's for a given MPDU.
963  *
964  * Return: void *: Virtual Address of the Rx descriptor
965  */
966 static inline
967 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
968 				  struct hal_buf_info *buf_info)
969 {
970 	void *link_desc_va;
971 	struct qdf_mem_multi_page_t *pages;
972 	uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie);
973 
974 	pages = &soc->link_desc_pages;
975 	if (!pages)
976 		return NULL;
977 	if (qdf_unlikely(page_id >= pages->num_pages))
978 		return NULL;
979 	link_desc_va = pages->dma_pages[page_id].page_v_addr_start +
980 		(buf_info->paddr - pages->dma_pages[page_id].page_p_addr);
981 	return link_desc_va;
982 }
983 
984 /**
985  * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of
986  *				   the MSDU Link Descriptor
987  * @pdev: core txrx pdev context
988  * @buf_info: buf_info includes cookie that used to lookup virtual address of
989  * link descriptor. Normally this is just an index into a per pdev array.
990  *
991  * This is the VA of the link descriptor in monitor mode destination ring,
992  * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU.
993  *
994  * Return: void *: Virtual Address of the Rx descriptor
995  */
996 static inline
997 void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev,
998 				  struct hal_buf_info *buf_info,
999 				  int mac_id)
1000 {
1001 	void *link_desc_va;
1002 	struct qdf_mem_multi_page_t *pages;
1003 	uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie);
1004 
1005 	pages = &pdev->soc->mon_link_desc_pages[mac_id];
1006 	if (!pages)
1007 		return NULL;
1008 
1009 	if (qdf_unlikely(page_id >= pages->num_pages))
1010 		return NULL;
1011 
1012 	link_desc_va = pages->dma_pages[page_id].page_v_addr_start +
1013 		(buf_info->paddr - pages->dma_pages[page_id].page_p_addr);
1014 
1015 	return link_desc_va;
1016 }
1017 
1018 /**
1019  * dp_rx_defrag_concat() - Concatenate the fragments
1020  *
1021  * @dst: destination pointer to the buffer
1022  * @src: source pointer from where the fragment payload is to be copied
1023  *
1024  * Return: QDF_STATUS
1025  */
1026 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
1027 {
1028 	/*
1029 	 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
1030 	 * to provide space for src, the headroom portion is copied from
1031 	 * the original dst buffer to the larger new dst buffer.
1032 	 * (This is needed, because the headroom of the dst buffer
1033 	 * contains the rx desc.)
1034 	 */
1035 	if (!qdf_nbuf_cat(dst, src)) {
1036 		/*
1037 		 * qdf_nbuf_cat does not free the src memory.
1038 		 * Free src nbuf before returning
1039 		 * For failure case the caller takes of freeing the nbuf
1040 		 */
1041 		qdf_nbuf_free(src);
1042 		return QDF_STATUS_SUCCESS;
1043 	}
1044 
1045 	return QDF_STATUS_E_DEFRAG_ERROR;
1046 }
1047 
1048 #ifndef FEATURE_WDS
1049 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
1050 {
1051 	return QDF_STATUS_SUCCESS;
1052 }
1053 
1054 static inline void
1055 dp_rx_wds_srcport_learn(struct dp_soc *soc,
1056 			uint8_t *rx_tlv_hdr,
1057 			struct dp_peer *ta_peer,
1058 			qdf_nbuf_t nbuf,
1059 			struct hal_rx_msdu_metadata msdu_metadata)
1060 {
1061 }
1062 #endif
1063 
1064 /*
1065  * dp_rx_desc_dump() - dump the sw rx descriptor
1066  *
1067  * @rx_desc: sw rx descriptor
1068  */
1069 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc)
1070 {
1071 	dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d",
1072 		rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id,
1073 		rx_desc->in_use, rx_desc->unmapped);
1074 }
1075 
1076 /*
1077  * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
1078  *					In qwrap mode, packets originated from
1079  *					any vdev should not loopback and
1080  *					should be dropped.
1081  * @vdev: vdev on which rx packet is received
1082  * @nbuf: rx pkt
1083  *
1084  */
1085 #if ATH_SUPPORT_WRAP
1086 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1087 						qdf_nbuf_t nbuf)
1088 {
1089 	struct dp_vdev *psta_vdev;
1090 	struct dp_pdev *pdev = vdev->pdev;
1091 	uint8_t *data = qdf_nbuf_data(nbuf);
1092 
1093 	if (qdf_unlikely(vdev->proxysta_vdev)) {
1094 		/* In qwrap isolation mode, allow loopback packets as all
1095 		 * packets go to RootAP and Loopback on the mpsta.
1096 		 */
1097 		if (vdev->isolation_vdev)
1098 			return false;
1099 		TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
1100 			if (qdf_unlikely(psta_vdev->proxysta_vdev &&
1101 					 !qdf_mem_cmp(psta_vdev->mac_addr.raw,
1102 						      &data[QDF_MAC_ADDR_SIZE],
1103 						      QDF_MAC_ADDR_SIZE))) {
1104 				/* Drop packet if source address is equal to
1105 				 * any of the vdev addresses.
1106 				 */
1107 				return true;
1108 			}
1109 		}
1110 	}
1111 	return false;
1112 }
1113 #else
1114 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1115 						qdf_nbuf_t nbuf)
1116 {
1117 	return false;
1118 }
1119 #endif
1120 
1121 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
1122 	defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\
1123 	defined(WLAN_SUPPORT_RX_FLOW_TAG)
1124 #include "dp_rx_tag.h"
1125 #endif
1126 
1127 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
1128 /**
1129  * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV
1130  *                              and set the corresponding tag in QDF packet
1131  * @soc: core txrx main context
1132  * @vdev: vdev on which the packet is received
1133  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1134  * @rx_tlv_hdr: rBbase address where the RX TLVs starts
1135  * @ring_index: REO ring number, not used for error & monitor ring
1136  * @is_reo_exception: flag to indicate if rx from REO ring or exception ring
1137  * @is_update_stats: flag to indicate whether to update stats or not
1138  * Return: void
1139  */
1140 static inline void
1141 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1142 			  qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
1143 			  uint16_t ring_index,
1144 			  bool is_reo_exception, bool is_update_stats)
1145 {
1146 }
1147 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
1148 
1149 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
1150 /**
1151  * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV
1152  *                           and set the corresponding tag in QDF packet
1153  * @soc: core txrx main context
1154  * @vdev: vdev on which the packet is received
1155  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1156  * @rx_tlv_hdr: base address where the RX TLVs starts
1157  * @is_update_stats: flag to indicate whether to update stats or not
1158  *
1159  * Return: void
1160  */
1161 static inline void
1162 dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1163 		      qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats)
1164 {
1165 }
1166 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
1167 
1168 #if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\
1169 	!defined(WLAN_SUPPORT_RX_FLOW_TAG)
1170 /**
1171  * dp_rx_mon_update_protocol_flow_tag() - Performs necessary checks for monitor
1172  *                                       mode and then tags appropriate packets
1173  * @soc: core txrx main context
1174  * @vdev: pdev on which packet is received
1175  * @msdu: QDF packet buffer on which the protocol tag should be set
1176  * @rx_desc: base address where the RX TLVs start
1177  * Return: void
1178  */
1179 static inline
1180 void dp_rx_mon_update_protocol_flow_tag(struct dp_soc *soc,
1181 					struct dp_pdev *dp_pdev,
1182 					qdf_nbuf_t msdu, void *rx_desc)
1183 {
1184 }
1185 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG || WLAN_SUPPORT_RX_FLOW_TAG */
1186 
1187 /*
1188  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
1189  *			       called during dp rx initialization
1190  *			       and at the end of dp_rx_process.
1191  *
1192  * @soc: core txrx main context
1193  * @mac_id: mac_id which is one of 3 mac_ids
1194  * @dp_rxdma_srng: dp rxdma circular ring
1195  * @rx_desc_pool: Pointer to free Rx descriptor pool
1196  * @num_req_buffers: number of buffer to be replenished
1197  * @desc_list: list of descs if called from dp_rx_process
1198  *	       or NULL during dp rx initialization or out of buffer
1199  *	       interrupt.
1200  * @tail: tail of descs list
1201  * @func_name: name of the caller function
1202  * Return: return success or failure
1203  */
1204 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1205 				 struct dp_srng *dp_rxdma_srng,
1206 				 struct rx_desc_pool *rx_desc_pool,
1207 				 uint32_t num_req_buffers,
1208 				 union dp_rx_desc_list_elem_t **desc_list,
1209 				 union dp_rx_desc_list_elem_t **tail,
1210 				 const char *func_name);
1211 
1212 /*
1213  * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs
1214  *                               called during dp rx initialization
1215  *
1216  * @soc: core txrx main context
1217  * @mac_id: mac_id which is one of 3 mac_ids
1218  * @dp_rxdma_srng: dp rxdma circular ring
1219  * @rx_desc_pool: Pointer to free Rx descriptor pool
1220  * @num_req_buffers: number of buffer to be replenished
1221  *
1222  * Return: return success or failure
1223  */
1224 QDF_STATUS
1225 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
1226 			  struct dp_srng *dp_rxdma_srng,
1227 			  struct rx_desc_pool *rx_desc_pool,
1228 			  uint32_t num_req_buffers);
1229 
1230 /**
1231  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
1232  *			      (WBM), following error handling
1233  *
1234  * @soc: core DP main context
1235  * @buf_addr_info: opaque pointer to the REO error ring descriptor
1236  * @buf_addr_info: void pointer to the buffer_addr_info
1237  * @bm_action: put to idle_list or release to msdu_list
1238  *
1239  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
1240  */
1241 QDF_STATUS
1242 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
1243 		       uint8_t bm_action);
1244 
1245 /**
1246  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
1247  *					(WBM) by address
1248  *
1249  * @soc: core DP main context
1250  * @link_desc_addr: link descriptor addr
1251  *
1252  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
1253  */
1254 QDF_STATUS
1255 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
1256 			       hal_buff_addrinfo_t link_desc_addr,
1257 			       uint8_t bm_action);
1258 
1259 /**
1260  * dp_rxdma_err_process() - RxDMA error processing functionality
1261  * @soc: core txrx main contex
1262  * @mac_id: mac id which is one of 3 mac_ids
1263  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1264  * @quota: No. of units (packets) that can be serviced in one shot.
1265  *
1266  * Return: num of buffers processed
1267  */
1268 uint32_t
1269 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1270 		     uint32_t mac_id, uint32_t quota);
1271 
1272 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1273 				uint8_t *rx_tlv_hdr, struct dp_peer *peer);
1274 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1275 					uint8_t *rx_tlv_hdr);
1276 
1277 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
1278 			   struct dp_peer *peer);
1279 
1280 /*
1281  * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
1282  *
1283  * @soc: core txrx main context
1284  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1285  * @ring_desc: opaque pointer to the RX ring descriptor
1286  * @rx_desc: host rs descriptor
1287  *
1288  * Return: void
1289  */
1290 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
1291 				hal_ring_handle_t hal_ring_hdl,
1292 				hal_ring_desc_t ring_desc,
1293 				struct dp_rx_desc *rx_desc);
1294 
1295 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
1296 
1297 #ifdef QCA_PEER_EXT_STATS
1298 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
1299 			     qdf_nbuf_t nbuf);
1300 #endif /* QCA_PEER_EXT_STATS */
1301 
1302 #ifdef RX_DESC_DEBUG_CHECK
1303 /**
1304  * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc
1305  * @rx_desc: rx descriptor pointer
1306  *
1307  * Return: true, if magic is correct, else false.
1308  */
1309 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1310 {
1311 	if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC))
1312 		return false;
1313 
1314 	rx_desc->magic = 0;
1315 	return true;
1316 }
1317 
1318 /**
1319  * dp_rx_desc_prep() - prepare rx desc
1320  * @rx_desc: rx descriptor pointer to be prepared
1321  * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
1322  *
1323  * Note: assumption is that we are associating a nbuf which is mapped
1324  *
1325  * Return: none
1326  */
1327 static inline
1328 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
1329 		     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1330 {
1331 	rx_desc->magic = DP_RX_DESC_MAGIC;
1332 	rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
1333 	rx_desc->unmapped = 0;
1334 }
1335 
1336 /**
1337  * dp_rx_desc_frag_prep() - prepare rx desc
1338  * @rx_desc: rx descriptor pointer to be prepared
1339  * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
1340  *
1341  * Note: assumption is that we frag address is mapped
1342  *
1343  * Return: none
1344  */
1345 #ifdef DP_RX_MON_MEM_FRAG
1346 static inline
1347 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1348 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1349 {
1350 	rx_desc->magic = DP_RX_DESC_MAGIC;
1351 	rx_desc->rx_buf_start =
1352 		(uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
1353 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
1354 	rx_desc->unmapped = 0;
1355 }
1356 #else
1357 static inline
1358 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1359 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1360 {
1361 }
1362 #endif /* DP_RX_MON_MEM_FRAG */
1363 #else
1364 
1365 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1366 {
1367 	return true;
1368 }
1369 
1370 static inline
1371 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
1372 		     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1373 {
1374 	rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
1375 	rx_desc->unmapped = 0;
1376 }
1377 
1378 #ifdef DP_RX_MON_MEM_FRAG
1379 static inline
1380 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1381 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1382 {
1383 	rx_desc->rx_buf_start =
1384 		(uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
1385 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
1386 	rx_desc->unmapped = 0;
1387 }
1388 #else
1389 static inline
1390 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1391 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1392 {
1393 }
1394 #endif /* DP_RX_MON_MEM_FRAG */
1395 
1396 #endif /* RX_DESC_DEBUG_CHECK */
1397 
1398 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
1399 				bool is_mon_dest_desc);
1400 
1401 void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1402 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer,
1403 			     uint8_t err_code, uint8_t mac_id);
1404 
1405 #ifndef QCA_MULTIPASS_SUPPORT
1406 static inline
1407 bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, uint8_t tid)
1408 {
1409 	return false;
1410 }
1411 #else
1412 bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf,
1413 			     uint8_t tid);
1414 #endif
1415 
1416 #ifndef WLAN_RX_PKT_CAPTURE_ENH
1417 static inline
1418 QDF_STATUS dp_peer_set_rx_capture_enabled(struct dp_pdev *pdev,
1419 					  struct dp_peer *peer_handle,
1420 					  bool value, uint8_t *mac_addr)
1421 {
1422 	return QDF_STATUS_SUCCESS;
1423 }
1424 #endif
1425 
1426 /**
1427  * dp_rx_deliver_to_stack() - deliver pkts to network stack
1428  * Caller to hold peer refcount and check for valid peer
1429  * @soc: soc
1430  * @vdev: vdev
1431  * @peer: peer
1432  * @nbuf_head: skb list head
1433  * @nbuf_tail: skb list tail
1434  *
1435  * Return: None
1436  */
1437 void dp_rx_deliver_to_stack(struct dp_soc *soc,
1438 			    struct dp_vdev *vdev,
1439 			    struct dp_peer *peer,
1440 			    qdf_nbuf_t nbuf_head,
1441 			    qdf_nbuf_t nbuf_tail);
1442 
1443 #ifdef QCA_OL_RX_LOCK_LESS_ACCESS
1444 /*
1445  * dp_rx_ring_access_start()- Wrapper function to log access start of a hal ring
1446  * @int_ctx: pointer to DP interrupt context
1447  * @dp_soc - DP soc structure pointer
1448  * @hal_ring_hdl - HAL ring handle
1449  *
1450  * Return: 0 on success; error on failure
1451  */
1452 static inline int
1453 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
1454 			hal_ring_handle_t hal_ring_hdl)
1455 {
1456 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
1457 }
1458 
1459 /*
1460  * dp_rx_ring_access_end()- Wrapper function to log access end of a hal ring
1461  * @int_ctx: pointer to DP interrupt context
1462  * @dp_soc - DP soc structure pointer
1463  * @hal_ring_hdl - HAL ring handle
1464  *
1465  * Return - None
1466  */
1467 static inline void
1468 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
1469 		      hal_ring_handle_t hal_ring_hdl)
1470 {
1471 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
1472 }
1473 #else
1474 static inline int
1475 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
1476 			hal_ring_handle_t hal_ring_hdl)
1477 {
1478 	return dp_srng_access_start(int_ctx, soc, hal_ring_hdl);
1479 }
1480 
1481 static inline void
1482 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
1483 		      hal_ring_handle_t hal_ring_hdl)
1484 {
1485 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
1486 }
1487 #endif
1488 
1489 /*
1490  * dp_rx_wbm_sg_list_reset() - Initialize sg list
1491  *
1492  * This api should be called at soc init and afterevery sg processing.
1493  *@soc: DP SOC handle
1494  */
1495 static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc)
1496 {
1497 	if (soc) {
1498 		soc->wbm_sg_param.wbm_is_first_msdu_in_sg = false;
1499 		soc->wbm_sg_param.wbm_sg_nbuf_head = NULL;
1500 		soc->wbm_sg_param.wbm_sg_nbuf_tail = NULL;
1501 		soc->wbm_sg_param.wbm_sg_desc_msdu_len = 0;
1502 	}
1503 }
1504 
1505 /*
1506  * dp_rx_wbm_sg_list_deinit() - De-initialize sg list
1507  *
1508  * This api should be called in down path, to avoid any leak.
1509  *@soc: DP SOC handle
1510  */
1511 static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc)
1512 {
1513 	if (soc) {
1514 		if (soc->wbm_sg_param.wbm_sg_nbuf_head)
1515 			qdf_nbuf_list_free(soc->wbm_sg_param.wbm_sg_nbuf_head);
1516 
1517 		dp_rx_wbm_sg_list_reset(soc);
1518 	}
1519 }
1520 
1521 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
1522 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
1523 	do {								   \
1524 		if (!soc->rx_buff_pool[rx_desc->pool_id].is_initialized) { \
1525 			DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf);	   \
1526 			break;						   \
1527 		}							   \
1528 		DP_RX_LIST_APPEND(ebuf_head, ebuf_tail, rx_desc->nbuf);	   \
1529 		if (!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)) {	   \
1530 			if (!dp_rx_buffer_pool_refill(soc, ebuf_head,	   \
1531 						      rx_desc->pool_id))   \
1532 				DP_RX_MERGE_TWO_LIST(head, tail,	   \
1533 						     ebuf_head, ebuf_tail);\
1534 			ebuf_head = NULL;				   \
1535 			ebuf_tail = NULL;				   \
1536 		}							   \
1537 	} while (0)
1538 #else
1539 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
1540 	DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf)
1541 #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
1542 
1543 /*
1544  * dp_rx_link_desc_refill_duplicate_check() - check if link desc duplicate
1545 					      to refill
1546  * @soc: DP SOC handle
1547  * @buf_info: the last link desc buf info
1548  * @ring_buf_info: current buf address pointor including link desc
1549  *
1550  * return: none.
1551  */
1552 void dp_rx_link_desc_refill_duplicate_check(
1553 				struct dp_soc *soc,
1554 				struct hal_buf_info *buf_info,
1555 				hal_buff_addrinfo_t ring_buf_info);
1556 #endif /* _DP_RX_H */
1557