xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h (revision a86b23ee68a2491aede2e03991f3fb37046f4e41)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_RX_H
20 #define _DP_RX_H
21 
22 #include "hal_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 
27 #ifdef RXDMA_OPTIMIZATION
28 #ifndef RX_DATA_BUFFER_ALIGNMENT
29 #define RX_DATA_BUFFER_ALIGNMENT        128
30 #endif
31 #ifndef RX_MONITOR_BUFFER_ALIGNMENT
32 #define RX_MONITOR_BUFFER_ALIGNMENT     128
33 #endif
34 #else /* RXDMA_OPTIMIZATION */
35 #define RX_DATA_BUFFER_ALIGNMENT        4
36 #define RX_MONITOR_BUFFER_ALIGNMENT     4
37 #endif /* RXDMA_OPTIMIZATION */
38 
39 #ifdef QCA_HOST2FW_RXBUF_RING
40 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW1_BM
41 /* RBM value used for re-injecting defragmented packets into REO */
42 #define DP_DEFRAG_RBM HAL_RX_BUF_RBM_SW3_BM
43 #else
44 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW3_BM
45 #define DP_DEFRAG_RBM DP_WBM2SW_RBM
46 #endif /* QCA_HOST2FW_RXBUF_RING */
47 
48 #define RX_BUFFER_RESERVATION   0
49 
50 #define DP_PEER_METADATA_PEER_ID_MASK	0x0000ffff
51 #define DP_PEER_METADATA_PEER_ID_SHIFT	0
52 #define DP_PEER_METADATA_VDEV_ID_MASK	0x003f0000
53 #define DP_PEER_METADATA_VDEV_ID_SHIFT	16
54 
55 #define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata)		\
56 	(((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK)	\
57 			>> DP_PEER_METADATA_PEER_ID_SHIFT)
58 
59 #define DP_PEER_METADATA_VDEV_ID_GET(_peer_metadata)		\
60 	(((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK)	\
61 			>> DP_PEER_METADATA_VDEV_ID_SHIFT)
62 
63 #define DP_RX_DESC_MAGIC 0xdec0de
64 
65 /**
66  * enum dp_rx_desc_state
67  *
68  * @RX_DESC_REPLENISH: rx desc replenished
69  * @RX_DESC_FREELIST: rx desc in freelist
70  */
71 enum dp_rx_desc_state {
72 	RX_DESC_REPLENISHED,
73 	RX_DESC_IN_FREELIST,
74 };
75 
76 /**
77  * struct dp_rx_desc_dbg_info
78  *
79  * @freelist_caller: name of the function that put the
80  *  the rx desc in freelist
81  * @freelist_ts: timestamp when the rx desc is put in
82  *  a freelist
83  * @replenish_caller: name of the function that last
84  *  replenished the rx desc
85  * @replenish_ts: last replenish timestamp
86  */
87 struct dp_rx_desc_dbg_info {
88 	char freelist_caller[QDF_MEM_FUNC_NAME_SIZE];
89 	uint64_t freelist_ts;
90 	char replenish_caller[QDF_MEM_FUNC_NAME_SIZE];
91 	uint64_t replenish_ts;
92 };
93 
94 /**
95  * struct dp_rx_desc
96  *
97  * @nbuf		: VA of the "skb" posted
98  * @rx_buf_start	: VA of the original Rx buffer, before
99  *			  movement of any skb->data pointer
100  * @cookie		: index into the sw array which holds
101  *			  the sw Rx descriptors
102  *			  Cookie space is 21 bits:
103  *			  lower 18 bits -- index
104  *			  upper  3 bits -- pool_id
105  * @pool_id		: pool Id for which this allocated.
106  *			  Can only be used if there is no flow
107  *			  steering
108  * @in_use		  rx_desc is in use
109  * @unmapped		  used to mark rx_desc an unmapped if the corresponding
110  *			  nbuf is already unmapped
111  */
112 struct dp_rx_desc {
113 	qdf_nbuf_t nbuf;
114 	uint8_t *rx_buf_start;
115 	uint32_t cookie;
116 	uint8_t	 pool_id;
117 #ifdef RX_DESC_DEBUG_CHECK
118 	uint32_t magic;
119 	struct dp_rx_desc_dbg_info *dbg_info;
120 #endif
121 	uint8_t	in_use:1,
122 	unmapped:1;
123 };
124 
125 /* RX Descriptor Multi Page memory alloc related */
126 #define DP_RX_DESC_OFFSET_NUM_BITS 8
127 #define DP_RX_DESC_PAGE_ID_NUM_BITS 8
128 #define DP_RX_DESC_POOL_ID_NUM_BITS 4
129 
130 #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS
131 #define DP_RX_DESC_POOL_ID_SHIFT \
132 		(DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS)
133 #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \
134 	(((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT)
135 #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK	\
136 			(((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \
137 			 DP_RX_DESC_PAGE_ID_SHIFT)
138 #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \
139 			((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1)
140 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie)		\
141 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >>	\
142 			DP_RX_DESC_POOL_ID_SHIFT)
143 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie)		\
144 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >>	\
145 			DP_RX_DESC_PAGE_ID_SHIFT)
146 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie)		\
147 	((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK)
148 
149 #define RX_DESC_COOKIE_INDEX_SHIFT		0
150 #define RX_DESC_COOKIE_INDEX_MASK		0x3ffff /* 18 bits */
151 #define RX_DESC_COOKIE_POOL_ID_SHIFT		18
152 #define RX_DESC_COOKIE_POOL_ID_MASK		0x1c0000
153 
154 #define DP_RX_DESC_COOKIE_MAX	\
155 	(RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK)
156 
157 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie)		\
158 	(((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >>	\
159 			RX_DESC_COOKIE_POOL_ID_SHIFT)
160 
161 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie)		\
162 	(((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >>	\
163 			RX_DESC_COOKIE_INDEX_SHIFT)
164 
165 #define FRAME_MASK_IPV4_ARP   1
166 #define FRAME_MASK_IPV4_DHCP  2
167 #define FRAME_MASK_IPV4_EAPOL 4
168 #define FRAME_MASK_IPV6_DHCP  8
169 
170 #define dp_rx_add_to_free_desc_list(head, tail, new) \
171 	__dp_rx_add_to_free_desc_list(head, tail, new, __func__)
172 
173 #define dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
174 				num_buffers, desc_list, tail) \
175 	__dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
176 				  num_buffers, desc_list, tail, __func__)
177 
178 #ifdef DP_RX_SPECIAL_FRAME_NEED
179 /**
180  * dp_rx_is_special_frame() - check is RX frame special needed
181  *
182  * @nbuf: RX skb pointer
183  * @frame_mask: the mask for speical frame needed
184  *
185  * Check is RX frame wanted matched with mask
186  *
187  * Return: true - special frame needed, false - no
188  */
189 static inline
190 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
191 {
192 	if (((frame_mask & FRAME_MASK_IPV4_ARP) &&
193 	     qdf_nbuf_is_ipv4_arp_pkt(nbuf)) ||
194 	    ((frame_mask & FRAME_MASK_IPV4_DHCP) &&
195 	     qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) ||
196 	    ((frame_mask & FRAME_MASK_IPV4_EAPOL) &&
197 	     qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) ||
198 	    ((frame_mask & FRAME_MASK_IPV6_DHCP) &&
199 	     qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))
200 		return true;
201 
202 	return false;
203 }
204 
205 /**
206  * dp_rx_deliver_special_frame() - Deliver the RX special frame to stack
207  *				   if matches mask
208  *
209  * @soc: Datapath soc handler
210  * @peer: pointer to DP peer
211  * @nbuf: pointer to the skb of RX frame
212  * @frame_mask: the mask for speical frame needed
213  * @rx_tlv_hdr: start of rx tlv header
214  *
215  * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
216  * single nbuf is expected.
217  *
218  * return: true - nbuf has been delivered to stack, false - not.
219  */
220 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
221 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
222 				 uint8_t *rx_tlv_hdr);
223 #else
224 static inline
225 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
226 {
227 	return false;
228 }
229 
230 static inline
231 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
232 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
233 				 uint8_t *rx_tlv_hdr)
234 {
235 	return false;
236 }
237 #endif
238 
239 /* DOC: Offset to obtain LLC hdr
240  *
241  * In the case of Wifi parse error
242  * to reach LLC header from beginning
243  * of VLAN tag we need to skip 8 bytes.
244  * Vlan_tag(4)+length(2)+length added
245  * by HW(2) = 8 bytes.
246  */
247 #define DP_SKIP_VLAN		8
248 
249 /**
250  * struct dp_rx_cached_buf - rx cached buffer
251  * @list: linked list node
252  * @buf: skb buffer
253  */
254 struct dp_rx_cached_buf {
255 	qdf_list_node_t node;
256 	qdf_nbuf_t buf;
257 };
258 
259 /*
260  *dp_rx_xor_block() - xor block of data
261  *@b: destination data block
262  *@a: source data block
263  *@len: length of the data to process
264  *
265  *Returns: None
266  */
267 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
268 {
269 	qdf_size_t i;
270 
271 	for (i = 0; i < len; i++)
272 		b[i] ^= a[i];
273 }
274 
275 /*
276  *dp_rx_rotl() - rotate the bits left
277  *@val: unsigned integer input value
278  *@bits: number of bits
279  *
280  *Returns: Integer with left rotated by number of 'bits'
281  */
282 static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
283 {
284 	return (val << bits) | (val >> (32 - bits));
285 }
286 
287 /*
288  *dp_rx_rotr() - rotate the bits right
289  *@val: unsigned integer input value
290  *@bits: number of bits
291  *
292  *Returns: Integer with right rotated by number of 'bits'
293  */
294 static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
295 {
296 	return (val >> bits) | (val << (32 - bits));
297 }
298 
299 /*
300  * dp_set_rx_queue() - set queue_mapping in skb
301  * @nbuf: skb
302  * @queue_id: rx queue_id
303  *
304  * Return: void
305  */
306 #ifdef QCA_OL_RX_MULTIQ_SUPPORT
307 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
308 {
309 	qdf_nbuf_record_rx_queue(nbuf, queue_id);
310 	return;
311 }
312 #else
313 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
314 {
315 }
316 #endif
317 
318 /*
319  *dp_rx_xswap() - swap the bits left
320  *@val: unsigned integer input value
321  *
322  *Returns: Integer with bits swapped
323  */
324 static inline uint32_t dp_rx_xswap(uint32_t val)
325 {
326 	return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
327 }
328 
329 /*
330  *dp_rx_get_le32_split() - get little endian 32 bits split
331  *@b0: byte 0
332  *@b1: byte 1
333  *@b2: byte 2
334  *@b3: byte 3
335  *
336  *Returns: Integer with split little endian 32 bits
337  */
338 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
339 					uint8_t b3)
340 {
341 	return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
342 }
343 
344 /*
345  *dp_rx_get_le32() - get little endian 32 bits
346  *@b0: byte 0
347  *@b1: byte 1
348  *@b2: byte 2
349  *@b3: byte 3
350  *
351  *Returns: Integer with little endian 32 bits
352  */
353 static inline uint32_t dp_rx_get_le32(const uint8_t *p)
354 {
355 	return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
356 }
357 
358 /*
359  * dp_rx_put_le32() - put little endian 32 bits
360  * @p: destination char array
361  * @v: source 32-bit integer
362  *
363  * Returns: None
364  */
365 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
366 {
367 	p[0] = (v) & 0xff;
368 	p[1] = (v >> 8) & 0xff;
369 	p[2] = (v >> 16) & 0xff;
370 	p[3] = (v >> 24) & 0xff;
371 }
372 
373 /* Extract michal mic block of data */
374 #define dp_rx_michael_block(l, r)	\
375 	do {					\
376 		r ^= dp_rx_rotl(l, 17);	\
377 		l += r;				\
378 		r ^= dp_rx_xswap(l);		\
379 		l += r;				\
380 		r ^= dp_rx_rotl(l, 3);	\
381 		l += r;				\
382 		r ^= dp_rx_rotr(l, 2);	\
383 		l += r;				\
384 	} while (0)
385 
386 /**
387  * struct dp_rx_desc_list_elem_t
388  *
389  * @next		: Next pointer to form free list
390  * @rx_desc		: DP Rx descriptor
391  */
392 union dp_rx_desc_list_elem_t {
393 	union dp_rx_desc_list_elem_t *next;
394 	struct dp_rx_desc rx_desc;
395 };
396 
397 #ifdef RX_DESC_MULTI_PAGE_ALLOC
398 /**
399  * dp_rx_desc_find() - find dp rx descriptor from page ID and offset
400  * @page_id: Page ID
401  * @offset: Offset of the descriptor element
402  *
403  * Return: RX descriptor element
404  */
405 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
406 					      struct rx_desc_pool *rx_pool);
407 
408 static inline
409 struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc,
410 					      struct rx_desc_pool *pool,
411 					      uint32_t cookie)
412 {
413 	uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
414 	uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
415 	uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
416 	struct rx_desc_pool *rx_desc_pool;
417 	union dp_rx_desc_list_elem_t *rx_desc_elem;
418 
419 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
420 		return NULL;
421 
422 	rx_desc_pool = &pool[pool_id];
423 	rx_desc_elem = (union dp_rx_desc_list_elem_t *)
424 		(rx_desc_pool->desc_pages.cacheable_pages[page_id] +
425 		rx_desc_pool->elem_size * offset);
426 
427 	return &rx_desc_elem->rx_desc;
428 }
429 
430 /**
431  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
432  *			 the Rx descriptor on Rx DMA source ring buffer
433  * @soc: core txrx main context
434  * @cookie: cookie used to lookup virtual address
435  *
436  * Return: Pointer to the Rx descriptor
437  */
438 static inline
439 struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc,
440 					       uint32_t cookie)
441 {
442 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie);
443 }
444 
445 /**
446  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
447  *			 the Rx descriptor on monitor ring buffer
448  * @soc: core txrx main context
449  * @cookie: cookie used to lookup virtual address
450  *
451  * Return: Pointer to the Rx descriptor
452  */
453 static inline
454 struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc,
455 					     uint32_t cookie)
456 {
457 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie);
458 }
459 
460 /**
461  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
462  *			 the Rx descriptor on monitor status ring buffer
463  * @soc: core txrx main context
464  * @cookie: cookie used to lookup virtual address
465  *
466  * Return: Pointer to the Rx descriptor
467  */
468 static inline
469 struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc,
470 						uint32_t cookie)
471 {
472 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_status[0], cookie);
473 }
474 #else
475 
476 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
477 			  uint32_t pool_size,
478 			  struct rx_desc_pool *rx_desc_pool);
479 
480 /**
481  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
482  *			 the Rx descriptor on Rx DMA source ring buffer
483  * @soc: core txrx main context
484  * @cookie: cookie used to lookup virtual address
485  *
486  * Return: void *: Virtual Address of the Rx descriptor
487  */
488 static inline
489 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
490 {
491 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
492 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
493 	struct rx_desc_pool *rx_desc_pool;
494 
495 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
496 		return NULL;
497 
498 	rx_desc_pool = &soc->rx_desc_buf[pool_id];
499 
500 	if (qdf_unlikely(index >= rx_desc_pool->pool_size))
501 		return NULL;
502 
503 	return &(soc->rx_desc_buf[pool_id].array[index].rx_desc);
504 }
505 
506 /**
507  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
508  *			 the Rx descriptor on monitor ring buffer
509  * @soc: core txrx main context
510  * @cookie: cookie used to lookup virtual address
511  *
512  * Return: void *: Virtual Address of the Rx descriptor
513  */
514 static inline
515 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
516 {
517 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
518 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
519 	/* TODO */
520 	/* Add sanity for pool_id & index */
521 	return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
522 }
523 
524 /**
525  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
526  *			 the Rx descriptor on monitor status ring buffer
527  * @soc: core txrx main context
528  * @cookie: cookie used to lookup virtual address
529  *
530  * Return: void *: Virtual Address of the Rx descriptor
531  */
532 static inline
533 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
534 {
535 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
536 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
537 	/* TODO */
538 	/* Add sanity for pool_id & index */
539 	return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
540 }
541 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
542 
543 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool);
544 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
545 				 uint32_t pool_size,
546 				 struct rx_desc_pool *rx_desc_pool);
547 
548 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
549 			  uint32_t pool_size,
550 			  struct rx_desc_pool *rx_desc_pool);
551 void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id);
552 
553 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
554 				union dp_rx_desc_list_elem_t **local_desc_list,
555 				union dp_rx_desc_list_elem_t **tail,
556 				uint16_t pool_id,
557 				struct rx_desc_pool *rx_desc_pool);
558 
559 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
560 				struct rx_desc_pool *rx_desc_pool,
561 				uint16_t num_descs,
562 				union dp_rx_desc_list_elem_t **desc_list,
563 				union dp_rx_desc_list_elem_t **tail);
564 
565 
566 QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev);
567 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev);
568 
569 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev);
570 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev);
571 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
572 			    struct rx_desc_pool *rx_desc_pool);
573 
574 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
575 QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev);
576 void dp_rx_pdev_buffers_free(struct dp_pdev *pdev);
577 
578 void dp_rx_pdev_detach(struct dp_pdev *pdev);
579 
580 void dp_print_napi_stats(struct dp_soc *soc);
581 
582 /**
583  * dp_rx_vdev_detach() - detach vdev from dp rx
584  * @vdev: virtual device instance
585  *
586  * Return: QDF_STATUS_SUCCESS: success
587  *         QDF_STATUS_E_RESOURCES: Error return
588  */
589 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev);
590 
591 uint32_t
592 dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
593 	      uint8_t reo_ring_num,
594 	      uint32_t quota);
595 
596 /**
597  * dp_rx_err_process() - Processes error frames routed to REO error ring
598  * @int_ctx: pointer to DP interrupt context
599  * @soc: core txrx main context
600  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
601  * @quota: No. of units (packets) that can be serviced in one shot.
602  *
603  * This function implements error processing and top level demultiplexer
604  * for all the frames routed to REO error ring.
605  *
606  * Return: uint32_t: No. of elements processed
607  */
608 uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
609 			   hal_ring_handle_t hal_ring_hdl, uint32_t quota);
610 
611 /**
612  * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
613  * @int_ctx: pointer to DP interrupt context
614  * @soc: core txrx main context
615  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
616  * @quota: No. of units (packets) that can be serviced in one shot.
617  *
618  * This function implements error processing and top level demultiplexer
619  * for all the frames routed to WBM2HOST sw release ring.
620  *
621  * Return: uint32_t: No. of elements processed
622  */
623 uint32_t
624 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
625 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota);
626 
627 /**
628  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
629  *		     multiple nbufs.
630  * @nbuf: pointer to the first msdu of an amsdu.
631  *
632  * This function implements the creation of RX frag_list for cases
633  * where an MSDU is spread across multiple nbufs.
634  *
635  * Return: returns the head nbuf which contains complete frag_list.
636  */
637 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf);
638 
639 
640 /*
641  * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during
642  *				     de-initialization of wifi module.
643  *
644  * @soc: core txrx main context
645  * @pool_id: pool_id which is one of 3 mac_ids
646  * @rx_desc_pool: rx descriptor pool pointer
647  *
648  * Return: None
649  */
650 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
651 				   struct rx_desc_pool *rx_desc_pool);
652 
653 /*
654  * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during
655  *			    de-initialization of wifi module.
656  *
657  * @soc: core txrx main context
658  * @pool_id: pool_id which is one of 3 mac_ids
659  * @rx_desc_pool: rx descriptor pool pointer
660  *
661  * Return: None
662  */
663 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
664 			  struct rx_desc_pool *rx_desc_pool);
665 
666 /*
667  * dp_rx_desc_pool_free() - free the sw rx desc array called during
668  *			    de-initialization of wifi module.
669  *
670  * @soc: core txrx main context
671  * @rx_desc_pool: rx descriptor pool pointer
672  *
673  * Return: None
674  */
675 void dp_rx_desc_pool_free(struct dp_soc *soc,
676 			  struct rx_desc_pool *rx_desc_pool);
677 
678 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
679 				struct dp_peer *peer);
680 
681 #ifdef RX_DESC_DEBUG_CHECK
682 /*
683  * dp_rx_desc_alloc_dbg_info() - Alloc memory for rx descriptor debug
684  *  structure
685  * @rx_desc: rx descriptor pointer
686  *
687  * Return: None
688  */
689 static inline
690 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
691 {
692 	rx_desc->dbg_info = qdf_mem_malloc(sizeof(struct dp_rx_desc_dbg_info));
693 }
694 
695 /*
696  * dp_rx_desc_free_dbg_info() - Free rx descriptor debug
697  *  structure memory
698  * @rx_desc: rx descriptor pointer
699  *
700  * Return: None
701  */
702 static inline
703 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
704 {
705 	qdf_mem_free(rx_desc->dbg_info);
706 }
707 
708 /*
709  * dp_rx_desc_update_dbg_info() - Update rx descriptor debug info
710  *  structure memory
711  * @rx_desc: rx descriptor pointer
712  *
713  * Return: None
714  */
715 static
716 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
717 				const char *func_name, uint8_t flag)
718 {
719 	struct dp_rx_desc_dbg_info *info = rx_desc->dbg_info;
720 
721 	if (!info)
722 		return;
723 
724 	if (flag == RX_DESC_REPLENISHED) {
725 		qdf_str_lcopy(info->replenish_caller, func_name,
726 			      QDF_MEM_FUNC_NAME_SIZE);
727 		info->replenish_ts = qdf_get_log_timestamp();
728 	} else {
729 		qdf_str_lcopy(info->freelist_caller, func_name,
730 			      QDF_MEM_FUNC_NAME_SIZE);
731 		info->freelist_ts = qdf_get_log_timestamp();
732 	}
733 }
734 #else
735 
736 static inline
737 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
738 {
739 }
740 
741 static inline
742 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
743 {
744 }
745 
746 static inline
747 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
748 				const char *func_name, uint8_t flag)
749 {
750 }
751 #endif /* RX_DESC_DEBUG_CHECK */
752 
753 /**
754  * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
755  *
756  * @head: pointer to the head of local free list
757  * @tail: pointer to the tail of local free list
758  * @new: new descriptor that is added to the free list
759  * @func_name: caller func name
760  *
761  * Return: void:
762  */
763 static inline
764 void __dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
765 				 union dp_rx_desc_list_elem_t **tail,
766 				 struct dp_rx_desc *new, const char *func_name)
767 {
768 	qdf_assert(head && new);
769 
770 	new->nbuf = NULL;
771 	new->in_use = 0;
772 
773 	((union dp_rx_desc_list_elem_t *)new)->next = *head;
774 	*head = (union dp_rx_desc_list_elem_t *)new;
775 	/* reset tail if head->next is NULL */
776 	if (!*tail || !(*head)->next)
777 		*tail = *head;
778 
779 	dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST);
780 }
781 
782 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
783 				   uint8_t mac_id);
784 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
785 		qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id);
786 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
787 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer);
788 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
789 		       uint16_t peer_id, uint8_t tid);
790 
791 
792 #define DP_RX_LIST_APPEND(head, tail, elem) \
793 	do {                                                          \
794 		if (!(head)) {                                        \
795 			(head) = (elem);                              \
796 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\
797 		} else {                                              \
798 			qdf_nbuf_set_next((tail), (elem));            \
799 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++;  \
800 		}                                                     \
801 		(tail) = (elem);                                      \
802 		qdf_nbuf_set_next((tail), NULL);                      \
803 	} while (0)
804 
805 #define DP_RX_MERGE_TWO_LIST(phead, ptail, chead, ctail) \
806 	do {                                                          \
807 		if (!(phead)) {                                       \
808 			(phead) = (chead);                            \
809 		} else {                                              \
810 			qdf_nbuf_set_next((ptail), (chead));          \
811 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(phead) += \
812 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(chead);   \
813 		}                                                     \
814 		(ptail) = (ctail);                                    \
815 		qdf_nbuf_set_next((ptail), NULL);                     \
816 	} while (0)
817 
818 /*for qcn9000 emulation the pcie is complete phy and no address restrictions*/
819 #if !defined(BUILD_X86) || defined(QCA_WIFI_QCN9000)
820 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
821 		qdf_dma_addr_t *paddr, struct rx_desc_pool *rx_desc_pool)
822 {
823 	return QDF_STATUS_SUCCESS;
824 }
825 #else
826 #define MAX_RETRY 100
827 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
828 		qdf_dma_addr_t *paddr, struct rx_desc_pool *rx_desc_pool)
829 {
830 	uint32_t nbuf_retry = 0;
831 	int32_t ret;
832 	const uint32_t x86_phy_addr = 0x50000000;
833 	/*
834 	 * in M2M emulation platforms (x86) the memory below 0x50000000
835 	 * is reserved for target use, so any memory allocated in this
836 	 * region should not be used by host
837 	 */
838 	do {
839 		if (qdf_likely(*paddr > x86_phy_addr))
840 			return QDF_STATUS_SUCCESS;
841 		else {
842 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
843 					"phy addr %pK exceeded 0x50000000 trying again",
844 					paddr);
845 
846 			nbuf_retry++;
847 			if ((*rx_netbuf)) {
848 				qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
849 						QDF_DMA_FROM_DEVICE);
850 				/* Not freeing buffer intentionally.
851 				 * Observed that same buffer is getting
852 				 * re-allocated resulting in longer load time
853 				 * WMI init timeout.
854 				 * This buffer is anyway not useful so skip it.
855 				 **/
856 			}
857 
858 			*rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
859 						    rx_desc_pool->buf_size,
860 						    RX_BUFFER_RESERVATION,
861 						    rx_desc_pool->buf_alignment,
862 						    FALSE);
863 
864 			if (qdf_unlikely(!(*rx_netbuf)))
865 				return QDF_STATUS_E_FAILURE;
866 
867 			ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf,
868 							QDF_DMA_FROM_DEVICE);
869 
870 			if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
871 				qdf_nbuf_free(*rx_netbuf);
872 				*rx_netbuf = NULL;
873 				continue;
874 			}
875 
876 			*paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
877 		}
878 	} while (nbuf_retry < MAX_RETRY);
879 
880 	if ((*rx_netbuf)) {
881 		qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
882 					QDF_DMA_FROM_DEVICE);
883 		qdf_nbuf_free(*rx_netbuf);
884 	}
885 
886 	return QDF_STATUS_E_FAILURE;
887 }
888 #endif
889 
890 /**
891  * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
892  *				   the MSDU Link Descriptor
893  * @soc: core txrx main context
894  * @buf_info: buf_info includes cookie that is used to lookup
895  * virtual address of link descriptor after deriving the page id
896  * and the offset or index of the desc on the associatde page.
897  *
898  * This is the VA of the link descriptor, that HAL layer later uses to
899  * retrieve the list of MSDU's for a given MPDU.
900  *
901  * Return: void *: Virtual Address of the Rx descriptor
902  */
903 static inline
904 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
905 				  struct hal_buf_info *buf_info)
906 {
907 	void *link_desc_va;
908 	struct qdf_mem_multi_page_t *pages;
909 	uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie);
910 
911 	pages = &soc->link_desc_pages;
912 	if (!pages)
913 		return NULL;
914 	if (qdf_unlikely(page_id >= pages->num_pages))
915 		return NULL;
916 	link_desc_va = pages->dma_pages[page_id].page_v_addr_start +
917 		(buf_info->paddr - pages->dma_pages[page_id].page_p_addr);
918 	return link_desc_va;
919 }
920 
921 /**
922  * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of
923  *				   the MSDU Link Descriptor
924  * @pdev: core txrx pdev context
925  * @buf_info: buf_info includes cookie that used to lookup virtual address of
926  * link descriptor. Normally this is just an index into a per pdev array.
927  *
928  * This is the VA of the link descriptor in monitor mode destination ring,
929  * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU.
930  *
931  * Return: void *: Virtual Address of the Rx descriptor
932  */
933 static inline
934 void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev,
935 				  struct hal_buf_info *buf_info,
936 				  int mac_id)
937 {
938 	void *link_desc_va;
939 	struct qdf_mem_multi_page_t *pages;
940 	uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie);
941 
942 	pages = &pdev->soc->mon_link_desc_pages[mac_id];
943 	if (!pages)
944 		return NULL;
945 
946 	if (qdf_unlikely(page_id >= pages->num_pages))
947 		return NULL;
948 
949 	link_desc_va = pages->dma_pages[page_id].page_v_addr_start +
950 		(buf_info->paddr - pages->dma_pages[page_id].page_p_addr);
951 
952 	return link_desc_va;
953 }
954 
955 /**
956  * dp_rx_defrag_concat() - Concatenate the fragments
957  *
958  * @dst: destination pointer to the buffer
959  * @src: source pointer from where the fragment payload is to be copied
960  *
961  * Return: QDF_STATUS
962  */
963 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
964 {
965 	/*
966 	 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
967 	 * to provide space for src, the headroom portion is copied from
968 	 * the original dst buffer to the larger new dst buffer.
969 	 * (This is needed, because the headroom of the dst buffer
970 	 * contains the rx desc.)
971 	 */
972 	if (!qdf_nbuf_cat(dst, src)) {
973 		/*
974 		 * qdf_nbuf_cat does not free the src memory.
975 		 * Free src nbuf before returning
976 		 * For failure case the caller takes of freeing the nbuf
977 		 */
978 		qdf_nbuf_free(src);
979 		return QDF_STATUS_SUCCESS;
980 	}
981 
982 	return QDF_STATUS_E_DEFRAG_ERROR;
983 }
984 
985 #ifndef FEATURE_WDS
986 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
987 {
988 	return QDF_STATUS_SUCCESS;
989 }
990 
991 static inline void
992 dp_rx_wds_srcport_learn(struct dp_soc *soc,
993 			uint8_t *rx_tlv_hdr,
994 			struct dp_peer *ta_peer,
995 			qdf_nbuf_t nbuf,
996 			struct hal_rx_msdu_metadata msdu_metadata)
997 {
998 }
999 #endif
1000 
1001 /*
1002  * dp_rx_desc_dump() - dump the sw rx descriptor
1003  *
1004  * @rx_desc: sw rx descriptor
1005  */
1006 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc)
1007 {
1008 	dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d",
1009 		rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id,
1010 		rx_desc->in_use, rx_desc->unmapped);
1011 }
1012 
1013 /*
1014  * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
1015  *					In qwrap mode, packets originated from
1016  *					any vdev should not loopback and
1017  *					should be dropped.
1018  * @vdev: vdev on which rx packet is received
1019  * @nbuf: rx pkt
1020  *
1021  */
1022 #if ATH_SUPPORT_WRAP
1023 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1024 						qdf_nbuf_t nbuf)
1025 {
1026 	struct dp_vdev *psta_vdev;
1027 	struct dp_pdev *pdev = vdev->pdev;
1028 	uint8_t *data = qdf_nbuf_data(nbuf);
1029 
1030 	if (qdf_unlikely(vdev->proxysta_vdev)) {
1031 		/* In qwrap isolation mode, allow loopback packets as all
1032 		 * packets go to RootAP and Loopback on the mpsta.
1033 		 */
1034 		if (vdev->isolation_vdev)
1035 			return false;
1036 		TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
1037 			if (qdf_unlikely(psta_vdev->proxysta_vdev &&
1038 					 !qdf_mem_cmp(psta_vdev->mac_addr.raw,
1039 						      &data[QDF_MAC_ADDR_SIZE],
1040 						      QDF_MAC_ADDR_SIZE))) {
1041 				/* Drop packet if source address is equal to
1042 				 * any of the vdev addresses.
1043 				 */
1044 				return true;
1045 			}
1046 		}
1047 	}
1048 	return false;
1049 }
1050 #else
1051 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1052 						qdf_nbuf_t nbuf)
1053 {
1054 	return false;
1055 }
1056 #endif
1057 
1058 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
1059 	defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\
1060 	defined(WLAN_SUPPORT_RX_FLOW_TAG)
1061 #include "dp_rx_tag.h"
1062 #endif
1063 
1064 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
1065 /**
1066  * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV
1067  *                              and set the corresponding tag in QDF packet
1068  * @soc: core txrx main context
1069  * @vdev: vdev on which the packet is received
1070  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1071  * @rx_tlv_hdr: rBbase address where the RX TLVs starts
1072  * @ring_index: REO ring number, not used for error & monitor ring
1073  * @is_reo_exception: flag to indicate if rx from REO ring or exception ring
1074  * @is_update_stats: flag to indicate whether to update stats or not
1075  * Return: void
1076  */
1077 static inline void
1078 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1079 			  qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
1080 			  uint16_t ring_index,
1081 			  bool is_reo_exception, bool is_update_stats)
1082 {
1083 }
1084 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
1085 
1086 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
1087 /**
1088  * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV
1089  *                           and set the corresponding tag in QDF packet
1090  * @soc: core txrx main context
1091  * @vdev: vdev on which the packet is received
1092  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1093  * @rx_tlv_hdr: base address where the RX TLVs starts
1094  * @is_update_stats: flag to indicate whether to update stats or not
1095  *
1096  * Return: void
1097  */
1098 static inline void
1099 dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1100 		      qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats)
1101 {
1102 }
1103 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
1104 
1105 #if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\
1106 	!defined(WLAN_SUPPORT_RX_FLOW_TAG)
1107 /**
1108  * dp_rx_mon_update_protocol_flow_tag() - Performs necessary checks for monitor
1109  *                                       mode and then tags appropriate packets
1110  * @soc: core txrx main context
1111  * @vdev: pdev on which packet is received
1112  * @msdu: QDF packet buffer on which the protocol tag should be set
1113  * @rx_desc: base address where the RX TLVs start
1114  * Return: void
1115  */
1116 static inline
1117 void dp_rx_mon_update_protocol_flow_tag(struct dp_soc *soc,
1118 					struct dp_pdev *dp_pdev,
1119 					qdf_nbuf_t msdu, void *rx_desc)
1120 {
1121 }
1122 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG || WLAN_SUPPORT_RX_FLOW_TAG */
1123 
1124 /*
1125  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
1126  *			       called during dp rx initialization
1127  *			       and at the end of dp_rx_process.
1128  *
1129  * @soc: core txrx main context
1130  * @mac_id: mac_id which is one of 3 mac_ids
1131  * @dp_rxdma_srng: dp rxdma circular ring
1132  * @rx_desc_pool: Pointer to free Rx descriptor pool
1133  * @num_req_buffers: number of buffer to be replenished
1134  * @desc_list: list of descs if called from dp_rx_process
1135  *	       or NULL during dp rx initialization or out of buffer
1136  *	       interrupt.
1137  * @tail: tail of descs list
1138  * @func_name: name of the caller function
1139  * Return: return success or failure
1140  */
1141 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1142 				 struct dp_srng *dp_rxdma_srng,
1143 				 struct rx_desc_pool *rx_desc_pool,
1144 				 uint32_t num_req_buffers,
1145 				 union dp_rx_desc_list_elem_t **desc_list,
1146 				 union dp_rx_desc_list_elem_t **tail,
1147 				 const char *func_name);
1148 
1149 /*
1150  * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs
1151  *                               called during dp rx initialization
1152  *
1153  * @soc: core txrx main context
1154  * @mac_id: mac_id which is one of 3 mac_ids
1155  * @dp_rxdma_srng: dp rxdma circular ring
1156  * @rx_desc_pool: Pointer to free Rx descriptor pool
1157  * @num_req_buffers: number of buffer to be replenished
1158  *
1159  * Return: return success or failure
1160  */
1161 QDF_STATUS
1162 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
1163 			  struct dp_srng *dp_rxdma_srng,
1164 			  struct rx_desc_pool *rx_desc_pool,
1165 			  uint32_t num_req_buffers);
1166 
1167 /**
1168  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
1169  *			      (WBM), following error handling
1170  *
1171  * @soc: core DP main context
1172  * @buf_addr_info: opaque pointer to the REO error ring descriptor
1173  * @buf_addr_info: void pointer to the buffer_addr_info
1174  * @bm_action: put to idle_list or release to msdu_list
1175  *
1176  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
1177  */
1178 QDF_STATUS
1179 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
1180 		       uint8_t bm_action);
1181 
1182 /**
1183  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
1184  *					(WBM) by address
1185  *
1186  * @soc: core DP main context
1187  * @link_desc_addr: link descriptor addr
1188  *
1189  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
1190  */
1191 QDF_STATUS
1192 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
1193 			       hal_buff_addrinfo_t link_desc_addr,
1194 			       uint8_t bm_action);
1195 
1196 /**
1197  * dp_rxdma_err_process() - RxDMA error processing functionality
1198  * @soc: core txrx main contex
1199  * @mac_id: mac id which is one of 3 mac_ids
1200  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1201  * @quota: No. of units (packets) that can be serviced in one shot.
1202  *
1203  * Return: num of buffers processed
1204  */
1205 uint32_t
1206 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1207 		     uint32_t mac_id, uint32_t quota);
1208 
1209 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1210 				uint8_t *rx_tlv_hdr, struct dp_peer *peer);
1211 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1212 					uint8_t *rx_tlv_hdr);
1213 
1214 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
1215 			   struct dp_peer *peer);
1216 
1217 qdf_nbuf_t
1218 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev);
1219 
1220 /*
1221  * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
1222  *
1223  * @soc: core txrx main context
1224  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1225  * @ring_desc: opaque pointer to the RX ring descriptor
1226  * @rx_desc: host rs descriptor
1227  *
1228  * Return: void
1229  */
1230 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
1231 				hal_ring_handle_t hal_ring_hdl,
1232 				hal_ring_desc_t ring_desc,
1233 				struct dp_rx_desc *rx_desc);
1234 
1235 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
1236 #ifdef RX_DESC_DEBUG_CHECK
1237 /**
1238  * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc
1239  * @rx_desc: rx descriptor pointer
1240  *
1241  * Return: true, if magic is correct, else false.
1242  */
1243 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1244 {
1245 	if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC))
1246 		return false;
1247 
1248 	rx_desc->magic = 0;
1249 	return true;
1250 }
1251 
1252 /**
1253  * dp_rx_desc_prep() - prepare rx desc
1254  * @rx_desc: rx descriptor pointer to be prepared
1255  * @nbuf: nbuf to be associated with rx_desc
1256  *
1257  * Note: assumption is that we are associating a nbuf which is mapped
1258  *
1259  * Return: none
1260  */
1261 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
1262 {
1263 	rx_desc->magic = DP_RX_DESC_MAGIC;
1264 	rx_desc->nbuf = nbuf;
1265 	rx_desc->unmapped = 0;
1266 }
1267 
1268 #else
1269 
1270 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1271 {
1272 	return true;
1273 }
1274 
1275 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
1276 {
1277 	rx_desc->nbuf = nbuf;
1278 	rx_desc->unmapped = 0;
1279 }
1280 #endif /* RX_DESC_DEBUG_CHECK */
1281 
1282 void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1283 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer,
1284 			     uint8_t err_code, uint8_t mac_id);
1285 
1286 #ifndef QCA_MULTIPASS_SUPPORT
1287 static inline
1288 bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, uint8_t tid)
1289 {
1290 	return false;
1291 }
1292 #else
1293 bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf,
1294 			     uint8_t tid);
1295 #endif
1296 
1297 #ifndef WLAN_RX_PKT_CAPTURE_ENH
1298 static inline
1299 QDF_STATUS dp_peer_set_rx_capture_enabled(struct dp_pdev *pdev,
1300 					  struct dp_peer *peer_handle,
1301 					  bool value, uint8_t *mac_addr)
1302 {
1303 	return QDF_STATUS_SUCCESS;
1304 }
1305 #endif
1306 
1307 /**
1308  * dp_rx_deliver_to_stack() - deliver pkts to network stack
1309  * Caller to hold peer refcount and check for valid peer
1310  * @soc: soc
1311  * @vdev: vdev
1312  * @peer: peer
1313  * @nbuf_head: skb list head
1314  * @nbuf_tail: skb list tail
1315  *
1316  * Return: None
1317  */
1318 void dp_rx_deliver_to_stack(struct dp_soc *soc,
1319 			    struct dp_vdev *vdev,
1320 			    struct dp_peer *peer,
1321 			    qdf_nbuf_t nbuf_head,
1322 			    qdf_nbuf_t nbuf_tail);
1323 
1324 #ifdef QCA_OL_RX_LOCK_LESS_ACCESS
1325 /*
1326  * dp_rx_ring_access_start()- Wrapper function to log access start of a hal ring
1327  * @int_ctx: pointer to DP interrupt context
1328  * @dp_soc - DP soc structure pointer
1329  * @hal_ring_hdl - HAL ring handle
1330  *
1331  * Return: 0 on success; error on failure
1332  */
1333 static inline int
1334 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
1335 			hal_ring_handle_t hal_ring_hdl)
1336 {
1337 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
1338 }
1339 
1340 /*
1341  * dp_rx_ring_access_end()- Wrapper function to log access end of a hal ring
1342  * @int_ctx: pointer to DP interrupt context
1343  * @dp_soc - DP soc structure pointer
1344  * @hal_ring_hdl - HAL ring handle
1345  *
1346  * Return - None
1347  */
1348 static inline void
1349 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
1350 		      hal_ring_handle_t hal_ring_hdl)
1351 {
1352 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
1353 }
1354 #else
1355 static inline int
1356 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
1357 			hal_ring_handle_t hal_ring_hdl)
1358 {
1359 	return dp_srng_access_start(int_ctx, soc, hal_ring_hdl);
1360 }
1361 
1362 static inline void
1363 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
1364 		      hal_ring_handle_t hal_ring_hdl)
1365 {
1366 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
1367 }
1368 #endif
1369 
1370 /*
1371  * dp_rx_wbm_sg_list_reset() - Initialize sg list
1372  *
1373  * This api should be called at soc init and afterevery sg processing.
1374  *@soc: DP SOC handle
1375  */
1376 static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc)
1377 {
1378 	if (soc) {
1379 		soc->wbm_sg_param.wbm_is_first_msdu_in_sg = false;
1380 		soc->wbm_sg_param.wbm_sg_nbuf_head = NULL;
1381 		soc->wbm_sg_param.wbm_sg_nbuf_tail = NULL;
1382 		soc->wbm_sg_param.wbm_sg_desc_msdu_len = 0;
1383 	}
1384 }
1385 
1386 /*
1387  * dp_rx_wbm_sg_list_deinit() - De-initialize sg list
1388  *
1389  * This api should be called in down path, to avoid any leak.
1390  *@soc: DP SOC handle
1391  */
1392 static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc)
1393 {
1394 	if (soc) {
1395 		if (soc->wbm_sg_param.wbm_sg_nbuf_head)
1396 			qdf_nbuf_list_free(soc->wbm_sg_param.wbm_sg_nbuf_head);
1397 
1398 		dp_rx_wbm_sg_list_reset(soc);
1399 	}
1400 }
1401 #endif /* _DP_RX_H */
1402