xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h (revision d63e03e207713f1d4d8a384c065ef4e076216f2e)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_RX_H
20 #define _DP_RX_H
21 
22 #include "hal_rx.h"
23 #include "dp_peer.h"
24 #include "dp_internal.h"
25 
26 #ifdef RXDMA_OPTIMIZATION
27 #ifndef RX_DATA_BUFFER_ALIGNMENT
28 #define RX_DATA_BUFFER_ALIGNMENT        128
29 #endif
30 #ifndef RX_MONITOR_BUFFER_ALIGNMENT
31 #define RX_MONITOR_BUFFER_ALIGNMENT     128
32 #endif
33 #else /* RXDMA_OPTIMIZATION */
34 #define RX_DATA_BUFFER_ALIGNMENT        4
35 #define RX_MONITOR_BUFFER_ALIGNMENT     4
36 #endif /* RXDMA_OPTIMIZATION */
37 
38 #ifdef QCA_HOST2FW_RXBUF_RING
39 #define DP_WBM2SW_RBM(sw0_bm_id)	HAL_RX_BUF_RBM_SW1_BM(sw0_bm_id)
40 /* RBM value used for re-injecting defragmented packets into REO */
41 #define DP_DEFRAG_RBM(sw0_bm_id)	HAL_RX_BUF_RBM_SW3_BM(sw0_bm_id)
42 #else
43 #define DP_WBM2SW_RBM(sw0_bm_id)	HAL_RX_BUF_RBM_SW3_BM(sw0_bm_id)
44 #define DP_DEFRAG_RBM(sw0_bm_id)	DP_WBM2SW_RBM(sw0_bm_id)
45 #endif /* QCA_HOST2FW_RXBUF_RING */
46 
47 #define RX_BUFFER_RESERVATION   0
48 
49 #define DP_PEER_METADATA_PEER_ID_MASK	0x0000ffff
50 #define DP_PEER_METADATA_PEER_ID_SHIFT	0
51 #define DP_PEER_METADATA_VDEV_ID_MASK	0x003f0000
52 #define DP_PEER_METADATA_VDEV_ID_SHIFT	16
53 #define DP_PEER_METADATA_OFFLOAD_MASK	0x01000000
54 #define DP_PEER_METADATA_OFFLOAD_SHIFT	24
55 
56 
57 #define DP_DEFAULT_NOISEFLOOR	(-96)
58 
59 #define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata)		\
60 	(((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK)	\
61 			>> DP_PEER_METADATA_PEER_ID_SHIFT)
62 
63 #define DP_PEER_METADATA_VDEV_ID_GET(_peer_metadata)		\
64 	(((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK)	\
65 			>> DP_PEER_METADATA_VDEV_ID_SHIFT)
66 
67 #define DP_PEER_METADATA_OFFLOAD_GET(_peer_metadata)		\
68 	(((_peer_metadata) & DP_PEER_METADATA_OFFLOAD_MASK)	\
69 			>> DP_PEER_METADATA_OFFLOAD_SHIFT)
70 
71 #define DP_RX_DESC_MAGIC 0xdec0de
72 
73 #define dp_rx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX, params)
74 #define dp_rx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX, params)
75 #define dp_rx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX, params)
76 #define dp_rx_info(params...) \
77 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params)
78 #define dp_rx_info_rl(params...) \
79 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params)
80 #define dp_rx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX, params)
81 
82 /**
83  * enum dp_rx_desc_state
84  *
85  * @RX_DESC_REPLENISH: rx desc replenished
86  * @RX_DESC_FREELIST: rx desc in freelist
87  */
88 enum dp_rx_desc_state {
89 	RX_DESC_REPLENISHED,
90 	RX_DESC_IN_FREELIST,
91 };
92 
93 #ifndef QCA_HOST_MODE_WIFI_DISABLED
94 /**
95  * struct dp_rx_desc_dbg_info
96  *
97  * @freelist_caller: name of the function that put the
98  *  the rx desc in freelist
99  * @freelist_ts: timestamp when the rx desc is put in
100  *  a freelist
101  * @replenish_caller: name of the function that last
102  *  replenished the rx desc
103  * @replenish_ts: last replenish timestamp
104  * @prev_nbuf: previous nbuf info
105  * @prev_nbuf_data_addr: previous nbuf data address
106  */
107 struct dp_rx_desc_dbg_info {
108 	char freelist_caller[QDF_MEM_FUNC_NAME_SIZE];
109 	uint64_t freelist_ts;
110 	char replenish_caller[QDF_MEM_FUNC_NAME_SIZE];
111 	uint64_t replenish_ts;
112 	qdf_nbuf_t prev_nbuf;
113 	uint8_t *prev_nbuf_data_addr;
114 };
115 
116 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
117 
118 /**
119  * struct dp_rx_desc
120  *
121  * @nbuf		: VA of the "skb" posted
122  * @rx_buf_start	: VA of the original Rx buffer, before
123  *			  movement of any skb->data pointer
124  * @paddr_buf_start     : PA of the original Rx buffer, before
125  *                        movement of any frag pointer
126  * @cookie		: index into the sw array which holds
127  *			  the sw Rx descriptors
128  *			  Cookie space is 21 bits:
129  *			  lower 18 bits -- index
130  *			  upper  3 bits -- pool_id
131  * @pool_id		: pool Id for which this allocated.
132  *			  Can only be used if there is no flow
133  *			  steering
134  * @in_use		  rx_desc is in use
135  * @unmapped		  used to mark rx_desc an unmapped if the corresponding
136  *			  nbuf is already unmapped
137  * @in_err_state	: Nbuf sanity failed for this descriptor.
138  * @nbuf_data_addr	: VA of nbuf data posted
139  */
140 struct dp_rx_desc {
141 	qdf_nbuf_t nbuf;
142 	uint8_t *rx_buf_start;
143 	qdf_dma_addr_t paddr_buf_start;
144 	uint32_t cookie;
145 	uint8_t	 pool_id;
146 #ifdef RX_DESC_DEBUG_CHECK
147 	uint32_t magic;
148 	uint8_t *nbuf_data_addr;
149 	struct dp_rx_desc_dbg_info *dbg_info;
150 #endif
151 	uint8_t	in_use:1,
152 		unmapped:1,
153 		in_err_state:1;
154 };
155 
156 #ifndef QCA_HOST_MODE_WIFI_DISABLED
157 #ifdef ATH_RX_PRI_SAVE
158 #define DP_RX_TID_SAVE(_nbuf, _tid) \
159 	(qdf_nbuf_set_priority(_nbuf, _tid))
160 #else
161 #define DP_RX_TID_SAVE(_nbuf, _tid)
162 #endif
163 
164 /* RX Descriptor Multi Page memory alloc related */
165 #define DP_RX_DESC_OFFSET_NUM_BITS 8
166 #define DP_RX_DESC_PAGE_ID_NUM_BITS 8
167 #define DP_RX_DESC_POOL_ID_NUM_BITS 4
168 
169 #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS
170 #define DP_RX_DESC_POOL_ID_SHIFT \
171 		(DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS)
172 #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \
173 	(((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT)
174 #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK	\
175 			(((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \
176 			 DP_RX_DESC_PAGE_ID_SHIFT)
177 #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \
178 			((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1)
179 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie)		\
180 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >>	\
181 			DP_RX_DESC_POOL_ID_SHIFT)
182 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie)		\
183 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >>	\
184 			DP_RX_DESC_PAGE_ID_SHIFT)
185 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie)		\
186 	((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK)
187 
188 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
189 
190 #define RX_DESC_COOKIE_INDEX_SHIFT		0
191 #define RX_DESC_COOKIE_INDEX_MASK		0x3ffff /* 18 bits */
192 #define RX_DESC_COOKIE_POOL_ID_SHIFT		18
193 #define RX_DESC_COOKIE_POOL_ID_MASK		0x1c0000
194 
195 #define DP_RX_DESC_COOKIE_MAX	\
196 	(RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK)
197 
198 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie)		\
199 	(((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >>	\
200 			RX_DESC_COOKIE_POOL_ID_SHIFT)
201 
202 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie)		\
203 	(((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >>	\
204 			RX_DESC_COOKIE_INDEX_SHIFT)
205 
206 #define dp_rx_add_to_free_desc_list(head, tail, new) \
207 	__dp_rx_add_to_free_desc_list(head, tail, new, __func__)
208 
209 #define dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
210 				num_buffers, desc_list, tail) \
211 	__dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
212 				  num_buffers, desc_list, tail, __func__)
213 
214 #ifdef DP_RX_SPECIAL_FRAME_NEED
215 /**
216  * dp_rx_is_special_frame() - check is RX frame special needed
217  *
218  * @nbuf: RX skb pointer
219  * @frame_mask: the mask for speical frame needed
220  *
221  * Check is RX frame wanted matched with mask
222  *
223  * Return: true - special frame needed, false - no
224  */
225 static inline
226 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
227 {
228 	if (((frame_mask & FRAME_MASK_IPV4_ARP) &&
229 	     qdf_nbuf_is_ipv4_arp_pkt(nbuf)) ||
230 	    ((frame_mask & FRAME_MASK_IPV4_DHCP) &&
231 	     qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) ||
232 	    ((frame_mask & FRAME_MASK_IPV4_EAPOL) &&
233 	     qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) ||
234 	    ((frame_mask & FRAME_MASK_IPV6_DHCP) &&
235 	     qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))
236 		return true;
237 
238 	return false;
239 }
240 
241 /**
242  * dp_rx_deliver_special_frame() - Deliver the RX special frame to stack
243  *				   if matches mask
244  *
245  * @soc: Datapath soc handler
246  * @peer: pointer to DP peer
247  * @nbuf: pointer to the skb of RX frame
248  * @frame_mask: the mask for speical frame needed
249  * @rx_tlv_hdr: start of rx tlv header
250  *
251  * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
252  * single nbuf is expected.
253  *
254  * return: true - nbuf has been delivered to stack, false - not.
255  */
256 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
257 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
258 				 uint8_t *rx_tlv_hdr);
259 #else
260 static inline
261 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
262 {
263 	return false;
264 }
265 
266 static inline
267 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
268 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
269 				 uint8_t *rx_tlv_hdr)
270 {
271 	return false;
272 }
273 #endif
274 
275 /* DOC: Offset to obtain LLC hdr
276  *
277  * In the case of Wifi parse error
278  * to reach LLC header from beginning
279  * of VLAN tag we need to skip 8 bytes.
280  * Vlan_tag(4)+length(2)+length added
281  * by HW(2) = 8 bytes.
282  */
283 #define DP_SKIP_VLAN		8
284 
285 #ifndef QCA_HOST_MODE_WIFI_DISABLED
286 
287 /**
288  * struct dp_rx_cached_buf - rx cached buffer
289  * @list: linked list node
290  * @buf: skb buffer
291  */
292 struct dp_rx_cached_buf {
293 	qdf_list_node_t node;
294 	qdf_nbuf_t buf;
295 };
296 
297 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
298 
299 /*
300  *dp_rx_xor_block() - xor block of data
301  *@b: destination data block
302  *@a: source data block
303  *@len: length of the data to process
304  *
305  *Returns: None
306  */
307 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
308 {
309 	qdf_size_t i;
310 
311 	for (i = 0; i < len; i++)
312 		b[i] ^= a[i];
313 }
314 
315 /*
316  *dp_rx_rotl() - rotate the bits left
317  *@val: unsigned integer input value
318  *@bits: number of bits
319  *
320  *Returns: Integer with left rotated by number of 'bits'
321  */
322 static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
323 {
324 	return (val << bits) | (val >> (32 - bits));
325 }
326 
327 /*
328  *dp_rx_rotr() - rotate the bits right
329  *@val: unsigned integer input value
330  *@bits: number of bits
331  *
332  *Returns: Integer with right rotated by number of 'bits'
333  */
334 static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
335 {
336 	return (val >> bits) | (val << (32 - bits));
337 }
338 
339 /*
340  * dp_set_rx_queue() - set queue_mapping in skb
341  * @nbuf: skb
342  * @queue_id: rx queue_id
343  *
344  * Return: void
345  */
346 #ifdef QCA_OL_RX_MULTIQ_SUPPORT
347 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
348 {
349 	qdf_nbuf_record_rx_queue(nbuf, queue_id);
350 	return;
351 }
352 #else
353 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
354 {
355 }
356 #endif
357 
358 /*
359  *dp_rx_xswap() - swap the bits left
360  *@val: unsigned integer input value
361  *
362  *Returns: Integer with bits swapped
363  */
364 static inline uint32_t dp_rx_xswap(uint32_t val)
365 {
366 	return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
367 }
368 
369 /*
370  *dp_rx_get_le32_split() - get little endian 32 bits split
371  *@b0: byte 0
372  *@b1: byte 1
373  *@b2: byte 2
374  *@b3: byte 3
375  *
376  *Returns: Integer with split little endian 32 bits
377  */
378 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
379 					uint8_t b3)
380 {
381 	return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
382 }
383 
384 /*
385  *dp_rx_get_le32() - get little endian 32 bits
386  *@b0: byte 0
387  *@b1: byte 1
388  *@b2: byte 2
389  *@b3: byte 3
390  *
391  *Returns: Integer with little endian 32 bits
392  */
393 static inline uint32_t dp_rx_get_le32(const uint8_t *p)
394 {
395 	return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
396 }
397 
398 /*
399  * dp_rx_put_le32() - put little endian 32 bits
400  * @p: destination char array
401  * @v: source 32-bit integer
402  *
403  * Returns: None
404  */
405 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
406 {
407 	p[0] = (v) & 0xff;
408 	p[1] = (v >> 8) & 0xff;
409 	p[2] = (v >> 16) & 0xff;
410 	p[3] = (v >> 24) & 0xff;
411 }
412 
413 /* Extract michal mic block of data */
414 #define dp_rx_michael_block(l, r)	\
415 	do {					\
416 		r ^= dp_rx_rotl(l, 17);	\
417 		l += r;				\
418 		r ^= dp_rx_xswap(l);		\
419 		l += r;				\
420 		r ^= dp_rx_rotl(l, 3);	\
421 		l += r;				\
422 		r ^= dp_rx_rotr(l, 2);	\
423 		l += r;				\
424 	} while (0)
425 
426 /**
427  * struct dp_rx_desc_list_elem_t
428  *
429  * @next		: Next pointer to form free list
430  * @rx_desc		: DP Rx descriptor
431  */
432 union dp_rx_desc_list_elem_t {
433 	union dp_rx_desc_list_elem_t *next;
434 	struct dp_rx_desc rx_desc;
435 };
436 
437 #ifdef RX_DESC_MULTI_PAGE_ALLOC
438 /**
439  * dp_rx_desc_find() - find dp rx descriptor from page ID and offset
440  * @page_id: Page ID
441  * @offset: Offset of the descriptor element
442  *
443  * Return: RX descriptor element
444  */
445 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
446 					      struct rx_desc_pool *rx_pool);
447 
448 static inline
449 struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc,
450 					      struct rx_desc_pool *pool,
451 					      uint32_t cookie)
452 {
453 	uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
454 	uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
455 	uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
456 	struct rx_desc_pool *rx_desc_pool;
457 	union dp_rx_desc_list_elem_t *rx_desc_elem;
458 
459 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
460 		return NULL;
461 
462 	rx_desc_pool = &pool[pool_id];
463 	rx_desc_elem = (union dp_rx_desc_list_elem_t *)
464 		(rx_desc_pool->desc_pages.cacheable_pages[page_id] +
465 		rx_desc_pool->elem_size * offset);
466 
467 	return &rx_desc_elem->rx_desc;
468 }
469 
470 /**
471  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
472  *			 the Rx descriptor on Rx DMA source ring buffer
473  * @soc: core txrx main context
474  * @cookie: cookie used to lookup virtual address
475  *
476  * Return: Pointer to the Rx descriptor
477  */
478 static inline
479 struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc,
480 					       uint32_t cookie)
481 {
482 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie);
483 }
484 
485 /**
486  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
487  *			 the Rx descriptor on monitor ring buffer
488  * @soc: core txrx main context
489  * @cookie: cookie used to lookup virtual address
490  *
491  * Return: Pointer to the Rx descriptor
492  */
493 static inline
494 struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc,
495 					     uint32_t cookie)
496 {
497 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie);
498 }
499 
500 /**
501  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
502  *			 the Rx descriptor on monitor status ring buffer
503  * @soc: core txrx main context
504  * @cookie: cookie used to lookup virtual address
505  *
506  * Return: Pointer to the Rx descriptor
507  */
508 static inline
509 struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc,
510 						uint32_t cookie)
511 {
512 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_status[0], cookie);
513 }
514 #else
515 
516 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
517 			  uint32_t pool_size,
518 			  struct rx_desc_pool *rx_desc_pool);
519 
520 /**
521  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
522  *			 the Rx descriptor on Rx DMA source ring buffer
523  * @soc: core txrx main context
524  * @cookie: cookie used to lookup virtual address
525  *
526  * Return: void *: Virtual Address of the Rx descriptor
527  */
528 static inline
529 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
530 {
531 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
532 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
533 	struct rx_desc_pool *rx_desc_pool;
534 
535 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
536 		return NULL;
537 
538 	rx_desc_pool = &soc->rx_desc_buf[pool_id];
539 
540 	if (qdf_unlikely(index >= rx_desc_pool->pool_size))
541 		return NULL;
542 
543 	return &rx_desc_pool->array[index].rx_desc;
544 }
545 
546 /**
547  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
548  *			 the Rx descriptor on monitor ring buffer
549  * @soc: core txrx main context
550  * @cookie: cookie used to lookup virtual address
551  *
552  * Return: void *: Virtual Address of the Rx descriptor
553  */
554 static inline
555 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
556 {
557 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
558 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
559 	/* TODO */
560 	/* Add sanity for pool_id & index */
561 	return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
562 }
563 
564 /**
565  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
566  *			 the Rx descriptor on monitor status ring buffer
567  * @soc: core txrx main context
568  * @cookie: cookie used to lookup virtual address
569  *
570  * Return: void *: Virtual Address of the Rx descriptor
571  */
572 static inline
573 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
574 {
575 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
576 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
577 	/* TODO */
578 	/* Add sanity for pool_id & index */
579 	return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
580 }
581 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
582 
583 #ifndef QCA_HOST_MODE_WIFI_DISABLED
584 
585 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
586 {
587 	return vdev->ap_bridge_enabled;
588 }
589 
590 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
591 static inline QDF_STATUS
592 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
593 {
594 	if (qdf_unlikely(HAL_RX_REO_BUF_COOKIE_INVALID_GET(ring_desc)))
595 		return QDF_STATUS_E_FAILURE;
596 
597 	HAL_RX_REO_BUF_COOKIE_INVALID_SET(ring_desc);
598 	return QDF_STATUS_SUCCESS;
599 }
600 
601 /**
602  * dp_rx_cookie_reset_invalid_bit() - Reset the invalid bit of the cookie
603  *  field in ring descriptor
604  * @ring_desc: ring descriptor
605  *
606  * Return: None
607  */
608 static inline void
609 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc)
610 {
611 	HAL_RX_REO_BUF_COOKIE_INVALID_RESET(ring_desc);
612 }
613 #else
614 static inline QDF_STATUS
615 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
616 {
617 	return QDF_STATUS_SUCCESS;
618 }
619 
620 static inline void
621 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc)
622 {
623 }
624 #endif
625 
626 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
627 
628 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool);
629 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
630 				 uint32_t pool_size,
631 				 struct rx_desc_pool *rx_desc_pool);
632 
633 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
634 			  uint32_t pool_size,
635 			  struct rx_desc_pool *rx_desc_pool);
636 
637 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
638 				union dp_rx_desc_list_elem_t **local_desc_list,
639 				union dp_rx_desc_list_elem_t **tail,
640 				uint16_t pool_id,
641 				struct rx_desc_pool *rx_desc_pool);
642 
643 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
644 				struct rx_desc_pool *rx_desc_pool,
645 				uint16_t num_descs,
646 				union dp_rx_desc_list_elem_t **desc_list,
647 				union dp_rx_desc_list_elem_t **tail);
648 
649 QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev);
650 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev);
651 
652 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev);
653 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev);
654 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
655 			    struct rx_desc_pool *rx_desc_pool,
656 			    uint32_t pool_id);
657 
658 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
659 QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev);
660 void dp_rx_pdev_buffers_free(struct dp_pdev *pdev);
661 
662 void dp_rx_pdev_detach(struct dp_pdev *pdev);
663 
664 void dp_print_napi_stats(struct dp_soc *soc);
665 
666 /**
667  * dp_rx_vdev_detach() - detach vdev from dp rx
668  * @vdev: virtual device instance
669  *
670  * Return: QDF_STATUS_SUCCESS: success
671  *         QDF_STATUS_E_RESOURCES: Error return
672  */
673 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev);
674 
675 #ifndef QCA_HOST_MODE_WIFI_DISABLED
676 
677 uint32_t
678 dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
679 	      uint8_t reo_ring_num,
680 	      uint32_t quota);
681 
682 /**
683  * dp_rx_err_process() - Processes error frames routed to REO error ring
684  * @int_ctx: pointer to DP interrupt context
685  * @soc: core txrx main context
686  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
687  * @quota: No. of units (packets) that can be serviced in one shot.
688  *
689  * This function implements error processing and top level demultiplexer
690  * for all the frames routed to REO error ring.
691  *
692  * Return: uint32_t: No. of elements processed
693  */
694 uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
695 			   hal_ring_handle_t hal_ring_hdl, uint32_t quota);
696 
697 /**
698  * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
699  * @int_ctx: pointer to DP interrupt context
700  * @soc: core txrx main context
701  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
702  * @quota: No. of units (packets) that can be serviced in one shot.
703  *
704  * This function implements error processing and top level demultiplexer
705  * for all the frames routed to WBM2HOST sw release ring.
706  *
707  * Return: uint32_t: No. of elements processed
708  */
709 uint32_t
710 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
711 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota);
712 
713 /**
714  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
715  *		     multiple nbufs.
716  * @soc: core txrx main context
717  * @nbuf: pointer to the first msdu of an amsdu.
718  *
719  * This function implements the creation of RX frag_list for cases
720  * where an MSDU is spread across multiple nbufs.
721  *
722  * Return: returns the head nbuf which contains complete frag_list.
723  */
724 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf);
725 
726 
727 /*
728  * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during
729  *				     de-initialization of wifi module.
730  *
731  * @soc: core txrx main context
732  * @pool_id: pool_id which is one of 3 mac_ids
733  * @rx_desc_pool: rx descriptor pool pointer
734  *
735  * Return: None
736  */
737 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
738 				   struct rx_desc_pool *rx_desc_pool);
739 
740 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
741 
742 /*
743  * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during
744  *			    de-initialization of wifi module.
745  *
746  * @soc: core txrx main context
747  * @pool_id: pool_id which is one of 3 mac_ids
748  * @rx_desc_pool: rx descriptor pool pointer
749  *
750  * Return: None
751  */
752 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
753 			  struct rx_desc_pool *rx_desc_pool);
754 
755 #ifdef DP_RX_MON_MEM_FRAG
756 /*
757  * dp_rx_desc_frag_free() - free the sw rx desc frag called during
758  *			    de-initialization of wifi module.
759  *
760  * @soc: core txrx main context
761  * @rx_desc_pool: rx descriptor pool pointer
762  *
763  * Return: None
764  */
765 void dp_rx_desc_frag_free(struct dp_soc *soc,
766 			  struct rx_desc_pool *rx_desc_pool);
767 #else
768 static inline
769 void dp_rx_desc_frag_free(struct dp_soc *soc,
770 			  struct rx_desc_pool *rx_desc_pool)
771 {
772 }
773 #endif
774 /*
775  * dp_rx_desc_pool_free() - free the sw rx desc array called during
776  *			    de-initialization of wifi module.
777  *
778  * @soc: core txrx main context
779  * @rx_desc_pool: rx descriptor pool pointer
780  *
781  * Return: None
782  */
783 void dp_rx_desc_pool_free(struct dp_soc *soc,
784 			  struct rx_desc_pool *rx_desc_pool);
785 
786 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
787 				struct dp_peer *peer);
788 
789 #ifdef RX_DESC_LOGGING
790 /*
791  * dp_rx_desc_alloc_dbg_info() - Alloc memory for rx descriptor debug
792  *  structure
793  * @rx_desc: rx descriptor pointer
794  *
795  * Return: None
796  */
797 static inline
798 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
799 {
800 	rx_desc->dbg_info = qdf_mem_malloc(sizeof(struct dp_rx_desc_dbg_info));
801 }
802 
803 /*
804  * dp_rx_desc_free_dbg_info() - Free rx descriptor debug
805  *  structure memory
806  * @rx_desc: rx descriptor pointer
807  *
808  * Return: None
809  */
810 static inline
811 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
812 {
813 	qdf_mem_free(rx_desc->dbg_info);
814 }
815 
816 /*
817  * dp_rx_desc_update_dbg_info() - Update rx descriptor debug info
818  *  structure memory
819  * @rx_desc: rx descriptor pointer
820  *
821  * Return: None
822  */
823 static
824 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
825 				const char *func_name, uint8_t flag)
826 {
827 	struct dp_rx_desc_dbg_info *info = rx_desc->dbg_info;
828 
829 	if (!info)
830 		return;
831 
832 	if (flag == RX_DESC_REPLENISHED) {
833 		qdf_str_lcopy(info->replenish_caller, func_name,
834 			      QDF_MEM_FUNC_NAME_SIZE);
835 		info->replenish_ts = qdf_get_log_timestamp();
836 	} else {
837 		qdf_str_lcopy(info->freelist_caller, func_name,
838 			      QDF_MEM_FUNC_NAME_SIZE);
839 		info->freelist_ts = qdf_get_log_timestamp();
840 		info->prev_nbuf = rx_desc->nbuf;
841 		info->prev_nbuf_data_addr = rx_desc->nbuf_data_addr;
842 		rx_desc->nbuf_data_addr = NULL;
843 	}
844 }
845 #else
846 
847 static inline
848 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
849 {
850 }
851 
852 static inline
853 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
854 {
855 }
856 
857 static inline
858 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
859 				const char *func_name, uint8_t flag)
860 {
861 }
862 #endif /* RX_DESC_LOGGING */
863 
864 /**
865  * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
866  *
867  * @head: pointer to the head of local free list
868  * @tail: pointer to the tail of local free list
869  * @new: new descriptor that is added to the free list
870  * @func_name: caller func name
871  *
872  * Return: void:
873  */
874 static inline
875 void __dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
876 				 union dp_rx_desc_list_elem_t **tail,
877 				 struct dp_rx_desc *new, const char *func_name)
878 {
879 	qdf_assert(head && new);
880 
881 	dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST);
882 
883 	new->nbuf = NULL;
884 	new->in_use = 0;
885 
886 	((union dp_rx_desc_list_elem_t *)new)->next = *head;
887 	*head = (union dp_rx_desc_list_elem_t *)new;
888 	/* reset tail if head->next is NULL */
889 	if (!*tail || !(*head)->next)
890 		*tail = *head;
891 }
892 
893 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
894 				   uint8_t mac_id);
895 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
896 		qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id);
897 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
898 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer);
899 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
900 		       uint16_t peer_id, uint8_t tid);
901 
902 #define DP_RX_HEAD_APPEND(head, elem) \
903 	do {                                                            \
904 		qdf_nbuf_set_next((elem), (head));			\
905 		(head) = (elem);                                        \
906 	} while (0)
907 
908 
909 #define DP_RX_LIST_APPEND(head, tail, elem) \
910 	do {                                                          \
911 		if (!(head)) {                                        \
912 			(head) = (elem);                              \
913 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\
914 		} else {                                              \
915 			qdf_nbuf_set_next((tail), (elem));            \
916 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++;  \
917 		}                                                     \
918 		(tail) = (elem);                                      \
919 		qdf_nbuf_set_next((tail), NULL);                      \
920 	} while (0)
921 
922 #define DP_RX_MERGE_TWO_LIST(phead, ptail, chead, ctail) \
923 	do {                                                          \
924 		if (!(phead)) {                                       \
925 			(phead) = (chead);                            \
926 		} else {                                              \
927 			qdf_nbuf_set_next((ptail), (chead));          \
928 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(phead) += \
929 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(chead);   \
930 		}                                                     \
931 		(ptail) = (ctail);                                    \
932 		qdf_nbuf_set_next((ptail), NULL);                     \
933 	} while (0)
934 
935 #if defined(QCA_PADDR_CHECK_ON_3TH_PLATFORM)
936 /*
937  * on some third-party platform, the memory below 0x2000
938  * is reserved for target use, so any memory allocated in this
939  * region should not be used by host
940  */
941 #define MAX_RETRY 50
942 #define DP_PHY_ADDR_RESERVED	0x2000
943 #elif defined(BUILD_X86)
944 /*
945  * in M2M emulation platforms (x86) the memory below 0x50000000
946  * is reserved for target use, so any memory allocated in this
947  * region should not be used by host
948  */
949 #define MAX_RETRY 100
950 #define DP_PHY_ADDR_RESERVED	0x50000000
951 #endif
952 
953 #if defined(QCA_PADDR_CHECK_ON_3TH_PLATFORM) || defined(BUILD_X86)
954 /**
955  * dp_check_paddr() - check if current phy address is valid or not
956  * @dp_soc: core txrx main context
957  * @rx_netbuf: skb buffer
958  * @paddr: physical address
959  * @rx_desc_pool: struct of rx descriptor pool
960  * check if the physical address of the nbuf->data is less
961  * than DP_PHY_ADDR_RESERVED then free the nbuf and try
962  * allocating new nbuf. We can try for 100 times.
963  *
964  * This is a temp WAR till we fix it properly.
965  *
966  * Return: success or failure.
967  */
968 static inline
969 int dp_check_paddr(struct dp_soc *dp_soc,
970 		   qdf_nbuf_t *rx_netbuf,
971 		   qdf_dma_addr_t *paddr,
972 		   struct rx_desc_pool *rx_desc_pool)
973 {
974 	uint32_t nbuf_retry = 0;
975 	int32_t ret;
976 
977 	if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED))
978 		return QDF_STATUS_SUCCESS;
979 
980 	do {
981 		dp_debug("invalid phy addr 0x%llx, trying again",
982 			 (uint64_t)(*paddr));
983 		nbuf_retry++;
984 		if ((*rx_netbuf)) {
985 			/* Not freeing buffer intentionally.
986 			 * Observed that same buffer is getting
987 			 * re-allocated resulting in longer load time
988 			 * WMI init timeout.
989 			 * This buffer is anyway not useful so skip it.
990 			 *.Add such buffer to invalid list and free
991 			 *.them when driver unload.
992 			 **/
993 			qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
994 						     *rx_netbuf,
995 						     QDF_DMA_FROM_DEVICE,
996 						     rx_desc_pool->buf_size);
997 			qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue,
998 					   *rx_netbuf);
999 		}
1000 
1001 		*rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
1002 					    rx_desc_pool->buf_size,
1003 					    RX_BUFFER_RESERVATION,
1004 					    rx_desc_pool->buf_alignment,
1005 					    FALSE);
1006 
1007 		if (qdf_unlikely(!(*rx_netbuf)))
1008 			return QDF_STATUS_E_FAILURE;
1009 
1010 		ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
1011 						 *rx_netbuf,
1012 						 QDF_DMA_FROM_DEVICE,
1013 						 rx_desc_pool->buf_size);
1014 
1015 		if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
1016 			qdf_nbuf_free(*rx_netbuf);
1017 			*rx_netbuf = NULL;
1018 			continue;
1019 		}
1020 
1021 		*paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
1022 
1023 		if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED))
1024 			return QDF_STATUS_SUCCESS;
1025 
1026 	} while (nbuf_retry < MAX_RETRY);
1027 
1028 	if ((*rx_netbuf)) {
1029 		qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
1030 					     *rx_netbuf,
1031 					     QDF_DMA_FROM_DEVICE,
1032 					     rx_desc_pool->buf_size);
1033 		qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue,
1034 				   *rx_netbuf);
1035 	}
1036 
1037 	return QDF_STATUS_E_FAILURE;
1038 }
1039 
1040 #else
1041 static inline
1042 int dp_check_paddr(struct dp_soc *dp_soc,
1043 		   qdf_nbuf_t *rx_netbuf,
1044 		   qdf_dma_addr_t *paddr,
1045 		   struct rx_desc_pool *rx_desc_pool)
1046 {
1047 	return QDF_STATUS_SUCCESS;
1048 }
1049 
1050 #endif
1051 
1052 /**
1053  * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
1054  *				   the MSDU Link Descriptor
1055  * @soc: core txrx main context
1056  * @buf_info: buf_info includes cookie that is used to lookup
1057  * virtual address of link descriptor after deriving the page id
1058  * and the offset or index of the desc on the associatde page.
1059  *
1060  * This is the VA of the link descriptor, that HAL layer later uses to
1061  * retrieve the list of MSDU's for a given MPDU.
1062  *
1063  * Return: void *: Virtual Address of the Rx descriptor
1064  */
1065 static inline
1066 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
1067 				  struct hal_buf_info *buf_info)
1068 {
1069 	void *link_desc_va;
1070 	struct qdf_mem_multi_page_t *pages;
1071 	uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie);
1072 
1073 	pages = &soc->link_desc_pages;
1074 	if (!pages)
1075 		return NULL;
1076 	if (qdf_unlikely(page_id >= pages->num_pages))
1077 		return NULL;
1078 	link_desc_va = pages->dma_pages[page_id].page_v_addr_start +
1079 		(buf_info->paddr - pages->dma_pages[page_id].page_p_addr);
1080 	return link_desc_va;
1081 }
1082 
1083 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1084 /*
1085  * dp_rx_intrabss_fwd() - API for intrabss fwd. For EAPOL
1086  *  pkt with DA not equal to vdev mac addr, fwd is not allowed.
1087  * @soc: core txrx main context
1088  * @ta_peer: source peer entry
1089  * @rx_tlv_hdr: start address of rx tlvs
1090  * @nbuf: nbuf that has to be intrabss forwarded
1091  * @msdu_metadata: msdu metadata
1092  *
1093  * Return: true if it is forwarded else false
1094  */
1095 
1096 bool dp_rx_intrabss_fwd(struct dp_soc *soc,
1097 			struct dp_peer *ta_peer,
1098 			uint8_t *rx_tlv_hdr,
1099 			qdf_nbuf_t nbuf,
1100 			struct hal_rx_msdu_metadata msdu_metadata);
1101 
1102 #ifdef DISABLE_EAPOL_INTRABSS_FWD
1103 /*
1104  * dp_rx_intrabss_fwd_wrapper() - Wrapper API for intrabss fwd. For EAPOL
1105  *  pkt with DA not equal to vdev mac addr, fwd is not allowed.
1106  * @soc: core txrx main context
1107  * @ta_peer: source peer entry
1108  * @rx_tlv_hdr: start address of rx tlvs
1109  * @nbuf: nbuf that has to be intrabss forwarded
1110  * @msdu_metadata: msdu metadata
1111  *
1112  * Return: true if it is forwarded else false
1113  */
1114 static inline
1115 bool dp_rx_intrabss_fwd_wrapper(struct dp_soc *soc, struct dp_peer *ta_peer,
1116 				uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
1117 				struct hal_rx_msdu_metadata msdu_metadata)
1118 {
1119 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) &&
1120 			 qdf_mem_cmp(qdf_nbuf_data(nbuf) +
1121 				     QDF_NBUF_DEST_MAC_OFFSET,
1122 				     ta_peer->vdev->mac_addr.raw,
1123 				     QDF_MAC_ADDR_SIZE))) {
1124 		qdf_nbuf_free(nbuf);
1125 		DP_STATS_INC(soc, rx.err.intrabss_eapol_drop, 1);
1126 		return true;
1127 	}
1128 
1129 	return dp_rx_intrabss_fwd(soc, ta_peer, rx_tlv_hdr, nbuf,
1130 				  msdu_metadata);
1131 }
1132 
1133 #define DP_RX_INTRABSS_FWD(soc, peer, rx_tlv_hdr, nbuf, msdu_metadata) \
1134 		dp_rx_intrabss_fwd_wrapper(soc, peer, rx_tlv_hdr, nbuf, \
1135 					   msdu_metadata)
1136 #else /* DISABLE_EAPOL_INTRABSS_FWD */
1137 #define DP_RX_INTRABSS_FWD(soc, peer, rx_tlv_hdr, nbuf, msdu_metadata) \
1138 		dp_rx_intrabss_fwd(soc, peer, rx_tlv_hdr, nbuf, msdu_metadata)
1139 #endif /* DISABLE_EAPOL_INTRABSS_FWD */
1140 
1141 /**
1142  * dp_rx_defrag_concat() - Concatenate the fragments
1143  *
1144  * @dst: destination pointer to the buffer
1145  * @src: source pointer from where the fragment payload is to be copied
1146  *
1147  * Return: QDF_STATUS
1148  */
1149 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
1150 {
1151 	/*
1152 	 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
1153 	 * to provide space for src, the headroom portion is copied from
1154 	 * the original dst buffer to the larger new dst buffer.
1155 	 * (This is needed, because the headroom of the dst buffer
1156 	 * contains the rx desc.)
1157 	 */
1158 	if (!qdf_nbuf_cat(dst, src)) {
1159 		/*
1160 		 * qdf_nbuf_cat does not free the src memory.
1161 		 * Free src nbuf before returning
1162 		 * For failure case the caller takes of freeing the nbuf
1163 		 */
1164 		qdf_nbuf_free(src);
1165 		return QDF_STATUS_SUCCESS;
1166 	}
1167 
1168 	return QDF_STATUS_E_DEFRAG_ERROR;
1169 }
1170 
1171 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1172 
1173 #ifndef FEATURE_WDS
1174 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
1175 		    struct dp_peer *ta_peer, qdf_nbuf_t nbuf);
1176 
1177 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
1178 {
1179 	return QDF_STATUS_SUCCESS;
1180 }
1181 
1182 static inline void
1183 dp_rx_wds_srcport_learn(struct dp_soc *soc,
1184 			uint8_t *rx_tlv_hdr,
1185 			struct dp_peer *ta_peer,
1186 			qdf_nbuf_t nbuf,
1187 			struct hal_rx_msdu_metadata msdu_metadata)
1188 {
1189 }
1190 #endif
1191 
1192 /*
1193  * dp_rx_desc_dump() - dump the sw rx descriptor
1194  *
1195  * @rx_desc: sw rx descriptor
1196  */
1197 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc)
1198 {
1199 	dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d",
1200 		rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id,
1201 		rx_desc->in_use, rx_desc->unmapped);
1202 }
1203 
1204 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1205 
1206 /*
1207  * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
1208  *					In qwrap mode, packets originated from
1209  *					any vdev should not loopback and
1210  *					should be dropped.
1211  * @vdev: vdev on which rx packet is received
1212  * @nbuf: rx pkt
1213  *
1214  */
1215 #if ATH_SUPPORT_WRAP
1216 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1217 						qdf_nbuf_t nbuf)
1218 {
1219 	struct dp_vdev *psta_vdev;
1220 	struct dp_pdev *pdev = vdev->pdev;
1221 	uint8_t *data = qdf_nbuf_data(nbuf);
1222 
1223 	if (qdf_unlikely(vdev->proxysta_vdev)) {
1224 		/* In qwrap isolation mode, allow loopback packets as all
1225 		 * packets go to RootAP and Loopback on the mpsta.
1226 		 */
1227 		if (vdev->isolation_vdev)
1228 			return false;
1229 		TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
1230 			if (qdf_unlikely(psta_vdev->proxysta_vdev &&
1231 					 !qdf_mem_cmp(psta_vdev->mac_addr.raw,
1232 						      &data[QDF_MAC_ADDR_SIZE],
1233 						      QDF_MAC_ADDR_SIZE))) {
1234 				/* Drop packet if source address is equal to
1235 				 * any of the vdev addresses.
1236 				 */
1237 				return true;
1238 			}
1239 		}
1240 	}
1241 	return false;
1242 }
1243 #else
1244 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1245 						qdf_nbuf_t nbuf)
1246 {
1247 	return false;
1248 }
1249 #endif
1250 
1251 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1252 
1253 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
1254 	defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\
1255 	defined(WLAN_SUPPORT_RX_FLOW_TAG)
1256 #include "dp_rx_tag.h"
1257 #endif
1258 
1259 #if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\
1260 	!defined(WLAN_SUPPORT_RX_FLOW_TAG)
1261 /**
1262  * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV
1263  *                              and set the corresponding tag in QDF packet
1264  * @soc: core txrx main context
1265  * @vdev: vdev on which the packet is received
1266  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1267  * @rx_tlv_hdr: rBbase address where the RX TLVs starts
1268  * @ring_index: REO ring number, not used for error & monitor ring
1269  * @is_reo_exception: flag to indicate if rx from REO ring or exception ring
1270  * @is_update_stats: flag to indicate whether to update stats or not
1271  * Return: void
1272  */
1273 static inline void
1274 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1275 			  qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
1276 			  uint16_t ring_index,
1277 			  bool is_reo_exception, bool is_update_stats)
1278 {
1279 }
1280 #endif
1281 
1282 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
1283 /**
1284  * dp_rx_err_cce_drop() - Reads CCE metadata from the RX MSDU end TLV
1285  *                        and returns whether cce metadata matches
1286  * @soc: core txrx main context
1287  * @vdev: vdev on which the packet is received
1288  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1289  * @rx_tlv_hdr: rBbase address where the RX TLVs starts
1290  * Return: bool
1291  */
1292 static inline bool
1293 dp_rx_err_cce_drop(struct dp_soc *soc, struct dp_vdev *vdev,
1294 		   qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
1295 {
1296 	return false;
1297 }
1298 
1299 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
1300 
1301 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
1302 /**
1303  * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV
1304  *                           and set the corresponding tag in QDF packet
1305  * @soc: core txrx main context
1306  * @vdev: vdev on which the packet is received
1307  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1308  * @rx_tlv_hdr: base address where the RX TLVs starts
1309  * @is_update_stats: flag to indicate whether to update stats or not
1310  *
1311  * Return: void
1312  */
1313 static inline void
1314 dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1315 		      qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats)
1316 {
1317 }
1318 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
1319 
1320 /*
1321  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
1322  *			       called during dp rx initialization
1323  *			       and at the end of dp_rx_process.
1324  *
1325  * @soc: core txrx main context
1326  * @mac_id: mac_id which is one of 3 mac_ids
1327  * @dp_rxdma_srng: dp rxdma circular ring
1328  * @rx_desc_pool: Pointer to free Rx descriptor pool
1329  * @num_req_buffers: number of buffer to be replenished
1330  * @desc_list: list of descs if called from dp_rx_process
1331  *	       or NULL during dp rx initialization or out of buffer
1332  *	       interrupt.
1333  * @tail: tail of descs list
1334  * @func_name: name of the caller function
1335  * Return: return success or failure
1336  */
1337 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1338 				 struct dp_srng *dp_rxdma_srng,
1339 				 struct rx_desc_pool *rx_desc_pool,
1340 				 uint32_t num_req_buffers,
1341 				 union dp_rx_desc_list_elem_t **desc_list,
1342 				 union dp_rx_desc_list_elem_t **tail,
1343 				 const char *func_name);
1344 
1345 /*
1346  * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs
1347  *                               called during dp rx initialization
1348  *
1349  * @soc: core txrx main context
1350  * @mac_id: mac_id which is one of 3 mac_ids
1351  * @dp_rxdma_srng: dp rxdma circular ring
1352  * @rx_desc_pool: Pointer to free Rx descriptor pool
1353  * @num_req_buffers: number of buffer to be replenished
1354  *
1355  * Return: return success or failure
1356  */
1357 QDF_STATUS
1358 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
1359 			  struct dp_srng *dp_rxdma_srng,
1360 			  struct rx_desc_pool *rx_desc_pool,
1361 			  uint32_t num_req_buffers);
1362 
1363 /**
1364  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
1365  *			      (WBM), following error handling
1366  *
1367  * @soc: core DP main context
1368  * @buf_addr_info: opaque pointer to the REO error ring descriptor
1369  * @buf_addr_info: void pointer to the buffer_addr_info
1370  * @bm_action: put to idle_list or release to msdu_list
1371  *
1372  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
1373  */
1374 QDF_STATUS
1375 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
1376 		       uint8_t bm_action);
1377 
1378 /**
1379  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
1380  *					(WBM) by address
1381  *
1382  * @soc: core DP main context
1383  * @link_desc_addr: link descriptor addr
1384  *
1385  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
1386  */
1387 QDF_STATUS
1388 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
1389 			       hal_buff_addrinfo_t link_desc_addr,
1390 			       uint8_t bm_action);
1391 
1392 /**
1393  * dp_rxdma_err_process() - RxDMA error processing functionality
1394  * @soc: core txrx main contex
1395  * @mac_id: mac id which is one of 3 mac_ids
1396  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1397  * @quota: No. of units (packets) that can be serviced in one shot.
1398  *
1399  * Return: num of buffers processed
1400  */
1401 uint32_t
1402 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1403 		     uint32_t mac_id, uint32_t quota);
1404 
1405 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1406 				uint8_t *rx_tlv_hdr, struct dp_peer *peer);
1407 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1408 					uint8_t *rx_tlv_hdr);
1409 
1410 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
1411 			   struct dp_peer *peer);
1412 
1413 /*
1414  * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
1415  *
1416  * @soc: core txrx main context
1417  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1418  * @ring_desc: opaque pointer to the RX ring descriptor
1419  * @rx_desc: host rx descriptor
1420  *
1421  * Return: void
1422  */
1423 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
1424 				hal_ring_handle_t hal_ring_hdl,
1425 				hal_ring_desc_t ring_desc,
1426 				struct dp_rx_desc *rx_desc);
1427 
1428 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
1429 
1430 #ifdef QCA_PEER_EXT_STATS
1431 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
1432 			     qdf_nbuf_t nbuf);
1433 #endif /* QCA_PEER_EXT_STATS */
1434 
1435 #ifdef RX_DESC_DEBUG_CHECK
1436 /**
1437  * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc
1438  * @rx_desc: rx descriptor pointer
1439  *
1440  * Return: true, if magic is correct, else false.
1441  */
1442 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1443 {
1444 	if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC))
1445 		return false;
1446 
1447 	rx_desc->magic = 0;
1448 	return true;
1449 }
1450 
1451 /**
1452  * dp_rx_desc_prep() - prepare rx desc
1453  * @rx_desc: rx descriptor pointer to be prepared
1454  * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
1455  *
1456  * Note: assumption is that we are associating a nbuf which is mapped
1457  *
1458  * Return: none
1459  */
1460 static inline
1461 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
1462 		     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1463 {
1464 	rx_desc->magic = DP_RX_DESC_MAGIC;
1465 	rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
1466 	rx_desc->unmapped = 0;
1467 	rx_desc->nbuf_data_addr = (uint8_t *)qdf_nbuf_data(rx_desc->nbuf);
1468 }
1469 
1470 /**
1471  * dp_rx_desc_frag_prep() - prepare rx desc
1472  * @rx_desc: rx descriptor pointer to be prepared
1473  * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
1474  *
1475  * Note: assumption is that we frag address is mapped
1476  *
1477  * Return: none
1478  */
1479 #ifdef DP_RX_MON_MEM_FRAG
1480 static inline
1481 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1482 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1483 {
1484 	rx_desc->magic = DP_RX_DESC_MAGIC;
1485 	rx_desc->rx_buf_start =
1486 		(uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
1487 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
1488 	rx_desc->unmapped = 0;
1489 }
1490 #else
1491 static inline
1492 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1493 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1494 {
1495 }
1496 #endif /* DP_RX_MON_MEM_FRAG */
1497 
1498 /**
1499  * dp_rx_desc_paddr_sanity_check() - paddr sanity for ring desc vs rx_desc
1500  * @rx_desc: rx descriptor
1501  * @ring_paddr: paddr obatined from the ring
1502  *
1503  * Returns: QDF_STATUS
1504  */
1505 static inline
1506 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
1507 				   uint64_t ring_paddr)
1508 {
1509 	return (ring_paddr == qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0));
1510 }
1511 #else
1512 
1513 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1514 {
1515 	return true;
1516 }
1517 
1518 static inline
1519 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
1520 		     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1521 {
1522 	rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
1523 	rx_desc->unmapped = 0;
1524 }
1525 
1526 #ifdef DP_RX_MON_MEM_FRAG
1527 static inline
1528 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1529 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1530 {
1531 	rx_desc->rx_buf_start =
1532 		(uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
1533 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
1534 	rx_desc->unmapped = 0;
1535 }
1536 #else
1537 static inline
1538 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1539 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1540 {
1541 }
1542 #endif /* DP_RX_MON_MEM_FRAG */
1543 
1544 static inline
1545 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
1546 				   uint64_t ring_paddr)
1547 {
1548 	return true;
1549 }
1550 #endif /* RX_DESC_DEBUG_CHECK */
1551 
1552 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
1553 				bool is_mon_dest_desc);
1554 
1555 void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1556 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer,
1557 			     uint8_t err_code, uint8_t mac_id);
1558 
1559 #ifndef QCA_MULTIPASS_SUPPORT
1560 static inline
1561 bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, uint8_t tid)
1562 {
1563 	return false;
1564 }
1565 #else
1566 bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf,
1567 			     uint8_t tid);
1568 #endif
1569 
1570 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1571 
1572 #ifndef WLAN_RX_PKT_CAPTURE_ENH
1573 static inline
1574 QDF_STATUS dp_peer_set_rx_capture_enabled(struct dp_pdev *pdev,
1575 					  struct dp_peer *peer_handle,
1576 					  bool value, uint8_t *mac_addr)
1577 {
1578 	return QDF_STATUS_SUCCESS;
1579 }
1580 #endif
1581 
1582 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1583 
1584 /**
1585  * dp_rx_deliver_to_stack() - deliver pkts to network stack
1586  * Caller to hold peer refcount and check for valid peer
1587  * @soc: soc
1588  * @vdev: vdev
1589  * @peer: peer
1590  * @nbuf_head: skb list head
1591  * @nbuf_tail: skb list tail
1592  *
1593  * Return: QDF_STATUS
1594  */
1595 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc,
1596 				  struct dp_vdev *vdev,
1597 				  struct dp_peer *peer,
1598 				  qdf_nbuf_t nbuf_head,
1599 				  qdf_nbuf_t nbuf_tail);
1600 
1601 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
1602 /**
1603  * dp_rx_eapol_deliver_to_stack() - deliver pkts to network stack
1604  * caller to hold peer refcount and check for valid peer
1605  * @soc: soc
1606  * @vdev: vdev
1607  * @peer: peer
1608  * @nbuf_head: skb list head
1609  * @nbuf_tail: skb list tail
1610  *
1611  * return: QDF_STATUS
1612  */
1613 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
1614 					struct dp_vdev *vdev,
1615 					struct dp_peer *peer,
1616 					qdf_nbuf_t nbuf_head,
1617 					qdf_nbuf_t nbuf_tail);
1618 #endif
1619 
1620 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1621 
1622 #ifdef QCA_OL_RX_LOCK_LESS_ACCESS
1623 /*
1624  * dp_rx_ring_access_start()- Wrapper function to log access start of a hal ring
1625  * @int_ctx: pointer to DP interrupt context
1626  * @dp_soc - DP soc structure pointer
1627  * @hal_ring_hdl - HAL ring handle
1628  *
1629  * Return: 0 on success; error on failure
1630  */
1631 static inline int
1632 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
1633 			hal_ring_handle_t hal_ring_hdl)
1634 {
1635 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
1636 }
1637 
1638 /*
1639  * dp_rx_ring_access_end()- Wrapper function to log access end of a hal ring
1640  * @int_ctx: pointer to DP interrupt context
1641  * @dp_soc - DP soc structure pointer
1642  * @hal_ring_hdl - HAL ring handle
1643  *
1644  * Return - None
1645  */
1646 static inline void
1647 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
1648 		      hal_ring_handle_t hal_ring_hdl)
1649 {
1650 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
1651 }
1652 #else
1653 static inline int
1654 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
1655 			hal_ring_handle_t hal_ring_hdl)
1656 {
1657 	return dp_srng_access_start(int_ctx, soc, hal_ring_hdl);
1658 }
1659 
1660 static inline void
1661 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
1662 		      hal_ring_handle_t hal_ring_hdl)
1663 {
1664 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
1665 }
1666 #endif
1667 
1668 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1669 
1670 /*
1671  * dp_rx_wbm_sg_list_reset() - Initialize sg list
1672  *
1673  * This api should be called at soc init and afterevery sg processing.
1674  *@soc: DP SOC handle
1675  */
1676 static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc)
1677 {
1678 	if (soc) {
1679 		soc->wbm_sg_param.wbm_is_first_msdu_in_sg = false;
1680 		soc->wbm_sg_param.wbm_sg_nbuf_head = NULL;
1681 		soc->wbm_sg_param.wbm_sg_nbuf_tail = NULL;
1682 		soc->wbm_sg_param.wbm_sg_desc_msdu_len = 0;
1683 	}
1684 }
1685 
1686 /*
1687  * dp_rx_wbm_sg_list_deinit() - De-initialize sg list
1688  *
1689  * This api should be called in down path, to avoid any leak.
1690  *@soc: DP SOC handle
1691  */
1692 static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc)
1693 {
1694 	if (soc) {
1695 		if (soc->wbm_sg_param.wbm_sg_nbuf_head)
1696 			qdf_nbuf_list_free(soc->wbm_sg_param.wbm_sg_nbuf_head);
1697 
1698 		dp_rx_wbm_sg_list_reset(soc);
1699 	}
1700 }
1701 
1702 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1703 
1704 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
1705 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
1706 	do {								   \
1707 		if (!soc->rx_buff_pool[rx_desc->pool_id].is_initialized) { \
1708 			DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf);	   \
1709 			break;						   \
1710 		}							   \
1711 		DP_RX_LIST_APPEND(ebuf_head, ebuf_tail, rx_desc->nbuf);	   \
1712 		if (!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)) {	   \
1713 			if (!dp_rx_buffer_pool_refill(soc, ebuf_head,	   \
1714 						      rx_desc->pool_id))   \
1715 				DP_RX_MERGE_TWO_LIST(head, tail,	   \
1716 						     ebuf_head, ebuf_tail);\
1717 			ebuf_head = NULL;				   \
1718 			ebuf_tail = NULL;				   \
1719 		}							   \
1720 	} while (0)
1721 #else
1722 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
1723 	DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf)
1724 #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
1725 
1726 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1727 
1728 /*
1729  * dp_rx_link_desc_refill_duplicate_check() - check if link desc duplicate
1730 					      to refill
1731  * @soc: DP SOC handle
1732  * @buf_info: the last link desc buf info
1733  * @ring_buf_info: current buf address pointor including link desc
1734  *
1735  * return: none.
1736  */
1737 void dp_rx_link_desc_refill_duplicate_check(
1738 				struct dp_soc *soc,
1739 				struct hal_buf_info *buf_info,
1740 				hal_buff_addrinfo_t ring_buf_info);
1741 
1742 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
1743 /**
1744  * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture
1745  * @soc : dp_soc handle
1746  * @pdev: dp_pdev handle
1747  * @peer_id: peer_id of the peer for which completion came
1748  * @ppdu_id: ppdu_id
1749  * @netbuf: Buffer pointer
1750  *
1751  * This function is used to deliver rx packet to packet capture
1752  */
1753 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc,  struct dp_pdev *pdev,
1754 				  uint16_t peer_id, uint32_t is_offload,
1755 				  qdf_nbuf_t netbuf);
1756 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
1757 					  uint32_t is_offload);
1758 #else
1759 static inline void
1760 dp_rx_deliver_to_pkt_capture(struct dp_soc *soc,  struct dp_pdev *pdev,
1761 			     uint16_t peer_id, uint32_t is_offload,
1762 			     qdf_nbuf_t netbuf)
1763 {
1764 }
1765 
1766 static inline void
1767 dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
1768 				     uint32_t is_offload)
1769 {
1770 }
1771 #endif
1772 
1773 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1774 #ifdef FEATURE_MEC
1775 /**
1776  * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
1777  *			      back on same vap or a different vap.
1778  * @soc: core DP main context
1779  * @peer: dp peer handler
1780  * @rx_tlv_hdr: start of the rx TLV header
1781  * @nbuf: pkt buffer
1782  *
1783  * Return: bool (true if it is a looped back pkt else false)
1784  *
1785  */
1786 bool dp_rx_mcast_echo_check(struct dp_soc *soc,
1787 			    struct dp_peer *peer,
1788 			    uint8_t *rx_tlv_hdr,
1789 			    qdf_nbuf_t nbuf);
1790 #else
1791 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
1792 					  struct dp_peer *peer,
1793 					  uint8_t *rx_tlv_hdr,
1794 					  qdf_nbuf_t nbuf)
1795 {
1796 	return false;
1797 }
1798 #endif /* FEATURE_MEC */
1799 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1800 
1801 #ifdef RECEIVE_OFFLOAD
1802 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
1803 			 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt);
1804 #else
1805 static inline
1806 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
1807 			 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
1808 {
1809 }
1810 #endif
1811 
1812 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
1813 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer,
1814 			     uint8_t ring_id,
1815 			     struct cdp_tid_rx_stats *tid_stats);
1816 
1817 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf);
1818 
1819 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
1820 				    hal_ring_handle_t hal_ring_hdl,
1821 				    uint32_t num_entries,
1822 				    bool *near_full);
1823 
1824 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1825 void dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
1826 			     hal_ring_desc_t ring_desc);
1827 #else
1828 static inline void
1829 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
1830 			hal_ring_desc_t ring_desc)
1831 {
1832 }
1833 #endif
1834 
1835 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1836 #ifdef RX_DESC_SANITY_WAR
1837 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
1838 			     hal_ring_handle_t hal_ring_hdl,
1839 			     hal_ring_desc_t ring_desc,
1840 			     struct dp_rx_desc *rx_desc);
1841 #else
1842 static inline
1843 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
1844 			     hal_ring_handle_t hal_ring_hdl,
1845 			     hal_ring_desc_t ring_desc,
1846 			     struct dp_rx_desc *rx_desc)
1847 {
1848 	return QDF_STATUS_SUCCESS;
1849 }
1850 #endif
1851 
1852 #ifdef DP_RX_DROP_RAW_FRM
1853 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf);
1854 #else
1855 static inline
1856 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
1857 {
1858 	return false;
1859 }
1860 #endif
1861 
1862 #ifdef RX_DESC_DEBUG_CHECK
1863 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
1864 					hal_ring_desc_t ring_desc,
1865 					struct dp_rx_desc *rx_desc);
1866 #else
1867 static inline
1868 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
1869 					hal_ring_desc_t ring_desc,
1870 					struct dp_rx_desc *rx_desc)
1871 {
1872 	return QDF_STATUS_SUCCESS;
1873 }
1874 #endif
1875 
1876 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1877 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf);
1878 #else
1879 static inline
1880 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf)
1881 {
1882 }
1883 #endif
1884 
1885 /**
1886  * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
1887  * @nbuf: pointer to the first msdu of an amsdu.
1888  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1889  *
1890  * The ipsumed field of the skb is set based on whether HW validated the
1891  * IP/TCP/UDP checksum.
1892  *
1893  * Return: void
1894  */
1895 static inline
1896 void dp_rx_cksum_offload(struct dp_pdev *pdev,
1897 			 qdf_nbuf_t nbuf,
1898 			 uint8_t *rx_tlv_hdr)
1899 {
1900 	qdf_nbuf_rx_cksum_t cksum = {0};
1901 	//TODO - Move this to ring desc api
1902 	//HAL_RX_MSDU_DESC_IP_CHKSUM_FAIL_GET
1903 	//HAL_RX_MSDU_DESC_TCP_UDP_CHKSUM_FAIL_GET
1904 	uint32_t ip_csum_err, tcp_udp_csum_er;
1905 
1906 	hal_rx_tlv_csum_err_get(pdev->soc->hal_soc, rx_tlv_hdr, &ip_csum_err,
1907 				&tcp_udp_csum_er);
1908 
1909 	if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) {
1910 		cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
1911 		qdf_nbuf_set_rx_cksum(nbuf, &cksum);
1912 	} else {
1913 		DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err);
1914 		DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er);
1915 	}
1916 }
1917 
1918 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1919 
1920 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1921 static inline
1922 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
1923 				   int max_reap_limit)
1924 {
1925 	bool limit_hit = false;
1926 
1927 	limit_hit =
1928 		(num_reaped >= max_reap_limit) ? true : false;
1929 
1930 	if (limit_hit)
1931 		DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1)
1932 
1933 	return limit_hit;
1934 }
1935 
1936 static inline
1937 bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1938 {
1939 	return soc->wlan_cfg_ctx->rx_enable_eol_data_check;
1940 }
1941 
1942 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc)
1943 {
1944 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
1945 
1946 	return cfg->rx_reap_loop_pkt_limit;
1947 }
1948 #else
1949 static inline
1950 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
1951 				   int max_reap_limit)
1952 {
1953 	return false;
1954 }
1955 
1956 static inline
1957 bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1958 {
1959 	return false;
1960 }
1961 
1962 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc)
1963 {
1964 	return 0;
1965 }
1966 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
1967 
1968 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf);
1969 
1970 #ifdef QCA_SUPPORT_WDS_EXTENDED
1971 /**
1972  * dp_rx_is_list_ready() - Make different lists for 4-address
1973 			   and 3-address frames
1974  * @nbuf_head: skb list head
1975  * @vdev: vdev
1976  * @peer: peer
1977  * @peer_id: peer id of new received frame
1978  * @vdev_id: vdev_id of new received frame
1979  *
1980  * Return: true if peer_ids are different.
1981  */
1982 static inline bool
1983 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
1984 		    struct dp_vdev *vdev,
1985 		    struct dp_peer *peer,
1986 		    uint16_t peer_id,
1987 		    uint8_t vdev_id)
1988 {
1989 	if (nbuf_head && peer && (peer->peer_id != peer_id))
1990 		return true;
1991 
1992 	return false;
1993 }
1994 #else
1995 static inline bool
1996 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
1997 		    struct dp_vdev *vdev,
1998 		    struct dp_peer *peer,
1999 		    uint16_t peer_id,
2000 		    uint8_t vdev_id)
2001 {
2002 	if (nbuf_head && vdev && (vdev->vdev_id != vdev_id))
2003 		return true;
2004 
2005 	return false;
2006 }
2007 #endif
2008 
2009 /**
2010  * dp_rx_desc_pool_init_generic() - Generic Rx descriptors initialization
2011  * @soc: SOC handle
2012  * @rx_desc_pool: pointer to RX descriptor pool
2013  * @pool_id: pool ID
2014  *
2015  * Return: None
2016  */
2017 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
2018 				  struct rx_desc_pool *rx_desc_pool,
2019 				  uint32_t pool_id);
2020 
2021 void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
2022 				  struct rx_desc_pool *rx_desc_pool,
2023 				  uint32_t pool_id);
2024 
2025 #endif /* _DP_RX_H */
2026