xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_RX_H
20 #define _DP_RX_H
21 
22 #include "hal_rx.h"
23 #include "dp_peer.h"
24 #include "dp_internal.h"
25 
26 #ifdef RXDMA_OPTIMIZATION
27 #ifndef RX_DATA_BUFFER_ALIGNMENT
28 #define RX_DATA_BUFFER_ALIGNMENT        128
29 #endif
30 #ifndef RX_MONITOR_BUFFER_ALIGNMENT
31 #define RX_MONITOR_BUFFER_ALIGNMENT     128
32 #endif
33 #else /* RXDMA_OPTIMIZATION */
34 #define RX_DATA_BUFFER_ALIGNMENT        4
35 #define RX_MONITOR_BUFFER_ALIGNMENT     4
36 #endif /* RXDMA_OPTIMIZATION */
37 
38 #ifdef QCA_HOST2FW_RXBUF_RING
39 #define DP_WBM2SW_RBM(sw0_bm_id)	HAL_RX_BUF_RBM_SW1_BM(sw0_bm_id)
40 /* RBM value used for re-injecting defragmented packets into REO */
41 #define DP_DEFRAG_RBM(sw0_bm_id)	HAL_RX_BUF_RBM_SW3_BM(sw0_bm_id)
42 #else
43 #define DP_WBM2SW_RBM(sw0_bm_id)	HAL_RX_BUF_RBM_SW3_BM(sw0_bm_id)
44 #define DP_DEFRAG_RBM(sw0_bm_id)	DP_WBM2SW_RBM(sw0_bm_id)
45 #endif /* QCA_HOST2FW_RXBUF_RING */
46 
47 #define RX_BUFFER_RESERVATION   0
48 
49 #define DP_PEER_METADATA_PEER_ID_MASK	0x0000ffff
50 #define DP_PEER_METADATA_PEER_ID_SHIFT	0
51 #define DP_PEER_METADATA_VDEV_ID_MASK	0x003f0000
52 #define DP_PEER_METADATA_VDEV_ID_SHIFT	16
53 #define DP_PEER_METADATA_OFFLOAD_MASK	0x01000000
54 #define DP_PEER_METADATA_OFFLOAD_SHIFT	24
55 
56 
57 #define DP_DEFAULT_NOISEFLOOR	(-96)
58 
59 #define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata)		\
60 	(((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK)	\
61 			>> DP_PEER_METADATA_PEER_ID_SHIFT)
62 
63 #define DP_PEER_METADATA_VDEV_ID_GET(_peer_metadata)		\
64 	(((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK)	\
65 			>> DP_PEER_METADATA_VDEV_ID_SHIFT)
66 
67 #define DP_PEER_METADATA_OFFLOAD_GET(_peer_metadata)		\
68 	(((_peer_metadata) & DP_PEER_METADATA_OFFLOAD_MASK)	\
69 			>> DP_PEER_METADATA_OFFLOAD_SHIFT)
70 
71 #define DP_RX_DESC_MAGIC 0xdec0de
72 
73 #define dp_rx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX, params)
74 #define dp_rx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX, params)
75 #define dp_rx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX, params)
76 #define dp_rx_info(params...) \
77 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params)
78 #define dp_rx_info_rl(params...) \
79 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params)
80 #define dp_rx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX, params)
81 
82 /**
83  * enum dp_rx_desc_state
84  *
85  * @RX_DESC_REPLENISH: rx desc replenished
86  * @RX_DESC_FREELIST: rx desc in freelist
87  */
88 enum dp_rx_desc_state {
89 	RX_DESC_REPLENISHED,
90 	RX_DESC_IN_FREELIST,
91 };
92 
93 #ifndef QCA_HOST_MODE_WIFI_DISABLED
94 /**
95  * struct dp_rx_desc_dbg_info
96  *
97  * @freelist_caller: name of the function that put the
98  *  the rx desc in freelist
99  * @freelist_ts: timestamp when the rx desc is put in
100  *  a freelist
101  * @replenish_caller: name of the function that last
102  *  replenished the rx desc
103  * @replenish_ts: last replenish timestamp
104  * @prev_nbuf: previous nbuf info
105  * @prev_nbuf_data_addr: previous nbuf data address
106  */
107 struct dp_rx_desc_dbg_info {
108 	char freelist_caller[QDF_MEM_FUNC_NAME_SIZE];
109 	uint64_t freelist_ts;
110 	char replenish_caller[QDF_MEM_FUNC_NAME_SIZE];
111 	uint64_t replenish_ts;
112 	qdf_nbuf_t prev_nbuf;
113 	uint8_t *prev_nbuf_data_addr;
114 };
115 
116 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
117 
118 /**
119  * struct dp_rx_desc
120  *
121  * @nbuf		: VA of the "skb" posted
122  * @rx_buf_start	: VA of the original Rx buffer, before
123  *			  movement of any skb->data pointer
124  * @paddr_buf_start     : PA of the original Rx buffer, before
125  *                        movement of any frag pointer
126  * @cookie		: index into the sw array which holds
127  *			  the sw Rx descriptors
128  *			  Cookie space is 21 bits:
129  *			  lower 18 bits -- index
130  *			  upper  3 bits -- pool_id
131  * @pool_id		: pool Id for which this allocated.
132  *			  Can only be used if there is no flow
133  *			  steering
134  * @in_use		  rx_desc is in use
135  * @unmapped		  used to mark rx_desc an unmapped if the corresponding
136  *			  nbuf is already unmapped
137  * @in_err_state	: Nbuf sanity failed for this descriptor.
138  * @nbuf_data_addr	: VA of nbuf data posted
139  */
140 struct dp_rx_desc {
141 	qdf_nbuf_t nbuf;
142 	uint8_t *rx_buf_start;
143 	qdf_dma_addr_t paddr_buf_start;
144 	uint32_t cookie;
145 	uint8_t	 pool_id;
146 #ifdef RX_DESC_DEBUG_CHECK
147 	uint32_t magic;
148 	uint8_t *nbuf_data_addr;
149 	struct dp_rx_desc_dbg_info *dbg_info;
150 #endif
151 	uint8_t	in_use:1,
152 		unmapped:1,
153 		in_err_state:1;
154 };
155 
156 #ifndef QCA_HOST_MODE_WIFI_DISABLED
157 #ifdef ATH_RX_PRI_SAVE
158 #define DP_RX_TID_SAVE(_nbuf, _tid) \
159 	(qdf_nbuf_set_priority(_nbuf, _tid))
160 #else
161 #define DP_RX_TID_SAVE(_nbuf, _tid)
162 #endif
163 
164 /* RX Descriptor Multi Page memory alloc related */
165 #define DP_RX_DESC_OFFSET_NUM_BITS 8
166 #define DP_RX_DESC_PAGE_ID_NUM_BITS 8
167 #define DP_RX_DESC_POOL_ID_NUM_BITS 4
168 
169 #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS
170 #define DP_RX_DESC_POOL_ID_SHIFT \
171 		(DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS)
172 #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \
173 	(((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT)
174 #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK	\
175 			(((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \
176 			 DP_RX_DESC_PAGE_ID_SHIFT)
177 #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \
178 			((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1)
179 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie)		\
180 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >>	\
181 			DP_RX_DESC_POOL_ID_SHIFT)
182 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie)		\
183 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >>	\
184 			DP_RX_DESC_PAGE_ID_SHIFT)
185 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie)		\
186 	((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK)
187 
188 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
189 
190 #define RX_DESC_COOKIE_INDEX_SHIFT		0
191 #define RX_DESC_COOKIE_INDEX_MASK		0x3ffff /* 18 bits */
192 #define RX_DESC_COOKIE_POOL_ID_SHIFT		18
193 #define RX_DESC_COOKIE_POOL_ID_MASK		0x1c0000
194 
195 #define DP_RX_DESC_COOKIE_MAX	\
196 	(RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK)
197 
198 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie)		\
199 	(((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >>	\
200 			RX_DESC_COOKIE_POOL_ID_SHIFT)
201 
202 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie)		\
203 	(((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >>	\
204 			RX_DESC_COOKIE_INDEX_SHIFT)
205 
206 #define dp_rx_add_to_free_desc_list(head, tail, new) \
207 	__dp_rx_add_to_free_desc_list(head, tail, new, __func__)
208 
209 #define dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
210 				num_buffers, desc_list, tail) \
211 	__dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
212 				  num_buffers, desc_list, tail, __func__)
213 
214 #ifdef DP_RX_SPECIAL_FRAME_NEED
215 /**
216  * dp_rx_is_special_frame() - check is RX frame special needed
217  *
218  * @nbuf: RX skb pointer
219  * @frame_mask: the mask for speical frame needed
220  *
221  * Check is RX frame wanted matched with mask
222  *
223  * Return: true - special frame needed, false - no
224  */
225 static inline
226 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
227 {
228 	if (((frame_mask & FRAME_MASK_IPV4_ARP) &&
229 	     qdf_nbuf_is_ipv4_arp_pkt(nbuf)) ||
230 	    ((frame_mask & FRAME_MASK_IPV4_DHCP) &&
231 	     qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) ||
232 	    ((frame_mask & FRAME_MASK_IPV4_EAPOL) &&
233 	     qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) ||
234 	    ((frame_mask & FRAME_MASK_IPV6_DHCP) &&
235 	     qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))
236 		return true;
237 
238 	return false;
239 }
240 
241 /**
242  * dp_rx_deliver_special_frame() - Deliver the RX special frame to stack
243  *				   if matches mask
244  *
245  * @soc: Datapath soc handler
246  * @peer: pointer to DP peer
247  * @nbuf: pointer to the skb of RX frame
248  * @frame_mask: the mask for speical frame needed
249  * @rx_tlv_hdr: start of rx tlv header
250  *
251  * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
252  * single nbuf is expected.
253  *
254  * return: true - nbuf has been delivered to stack, false - not.
255  */
256 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
257 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
258 				 uint8_t *rx_tlv_hdr);
259 #else
260 static inline
261 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
262 {
263 	return false;
264 }
265 
266 static inline
267 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
268 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
269 				 uint8_t *rx_tlv_hdr)
270 {
271 	return false;
272 }
273 #endif
274 
275 #ifndef QCA_HOST_MODE_WIFI_DISABLED
276 #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING
277 static inline
278 bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
279 {
280 	if (ta_peer->vdev->opmode == wlan_op_mode_ndi &&
281 	    qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) {
282 		DP_STATS_INC(ta_peer, rx.intra_bss.mdns_no_fwd, 1);
283 		return false;
284 	}
285 		return true;
286 }
287 #else
288 static inline
289 bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
290 {
291 	return true;
292 }
293 #endif
294 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
295 
296 /* DOC: Offset to obtain LLC hdr
297  *
298  * In the case of Wifi parse error
299  * to reach LLC header from beginning
300  * of VLAN tag we need to skip 8 bytes.
301  * Vlan_tag(4)+length(2)+length added
302  * by HW(2) = 8 bytes.
303  */
304 #define DP_SKIP_VLAN		8
305 
306 #ifndef QCA_HOST_MODE_WIFI_DISABLED
307 
308 /**
309  * struct dp_rx_cached_buf - rx cached buffer
310  * @list: linked list node
311  * @buf: skb buffer
312  */
313 struct dp_rx_cached_buf {
314 	qdf_list_node_t node;
315 	qdf_nbuf_t buf;
316 };
317 
318 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
319 
320 /*
321  *dp_rx_xor_block() - xor block of data
322  *@b: destination data block
323  *@a: source data block
324  *@len: length of the data to process
325  *
326  *Returns: None
327  */
328 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
329 {
330 	qdf_size_t i;
331 
332 	for (i = 0; i < len; i++)
333 		b[i] ^= a[i];
334 }
335 
336 /*
337  *dp_rx_rotl() - rotate the bits left
338  *@val: unsigned integer input value
339  *@bits: number of bits
340  *
341  *Returns: Integer with left rotated by number of 'bits'
342  */
343 static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
344 {
345 	return (val << bits) | (val >> (32 - bits));
346 }
347 
348 /*
349  *dp_rx_rotr() - rotate the bits right
350  *@val: unsigned integer input value
351  *@bits: number of bits
352  *
353  *Returns: Integer with right rotated by number of 'bits'
354  */
355 static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
356 {
357 	return (val >> bits) | (val << (32 - bits));
358 }
359 
360 /*
361  * dp_set_rx_queue() - set queue_mapping in skb
362  * @nbuf: skb
363  * @queue_id: rx queue_id
364  *
365  * Return: void
366  */
367 #ifdef QCA_OL_RX_MULTIQ_SUPPORT
368 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
369 {
370 	qdf_nbuf_record_rx_queue(nbuf, queue_id);
371 	return;
372 }
373 #else
374 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
375 {
376 }
377 #endif
378 
379 /*
380  *dp_rx_xswap() - swap the bits left
381  *@val: unsigned integer input value
382  *
383  *Returns: Integer with bits swapped
384  */
385 static inline uint32_t dp_rx_xswap(uint32_t val)
386 {
387 	return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
388 }
389 
390 /*
391  *dp_rx_get_le32_split() - get little endian 32 bits split
392  *@b0: byte 0
393  *@b1: byte 1
394  *@b2: byte 2
395  *@b3: byte 3
396  *
397  *Returns: Integer with split little endian 32 bits
398  */
399 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
400 					uint8_t b3)
401 {
402 	return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
403 }
404 
405 /*
406  *dp_rx_get_le32() - get little endian 32 bits
407  *@b0: byte 0
408  *@b1: byte 1
409  *@b2: byte 2
410  *@b3: byte 3
411  *
412  *Returns: Integer with little endian 32 bits
413  */
414 static inline uint32_t dp_rx_get_le32(const uint8_t *p)
415 {
416 	return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
417 }
418 
419 /*
420  * dp_rx_put_le32() - put little endian 32 bits
421  * @p: destination char array
422  * @v: source 32-bit integer
423  *
424  * Returns: None
425  */
426 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
427 {
428 	p[0] = (v) & 0xff;
429 	p[1] = (v >> 8) & 0xff;
430 	p[2] = (v >> 16) & 0xff;
431 	p[3] = (v >> 24) & 0xff;
432 }
433 
434 /* Extract michal mic block of data */
435 #define dp_rx_michael_block(l, r)	\
436 	do {					\
437 		r ^= dp_rx_rotl(l, 17);	\
438 		l += r;				\
439 		r ^= dp_rx_xswap(l);		\
440 		l += r;				\
441 		r ^= dp_rx_rotl(l, 3);	\
442 		l += r;				\
443 		r ^= dp_rx_rotr(l, 2);	\
444 		l += r;				\
445 	} while (0)
446 
447 /**
448  * struct dp_rx_desc_list_elem_t
449  *
450  * @next		: Next pointer to form free list
451  * @rx_desc		: DP Rx descriptor
452  */
453 union dp_rx_desc_list_elem_t {
454 	union dp_rx_desc_list_elem_t *next;
455 	struct dp_rx_desc rx_desc;
456 };
457 
458 #ifdef RX_DESC_MULTI_PAGE_ALLOC
459 /**
460  * dp_rx_desc_find() - find dp rx descriptor from page ID and offset
461  * @page_id: Page ID
462  * @offset: Offset of the descriptor element
463  *
464  * Return: RX descriptor element
465  */
466 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
467 					      struct rx_desc_pool *rx_pool);
468 
469 static inline
470 struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc,
471 					      struct rx_desc_pool *pool,
472 					      uint32_t cookie)
473 {
474 	uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
475 	uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
476 	uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
477 	struct rx_desc_pool *rx_desc_pool;
478 	union dp_rx_desc_list_elem_t *rx_desc_elem;
479 
480 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
481 		return NULL;
482 
483 	rx_desc_pool = &pool[pool_id];
484 	rx_desc_elem = (union dp_rx_desc_list_elem_t *)
485 		(rx_desc_pool->desc_pages.cacheable_pages[page_id] +
486 		rx_desc_pool->elem_size * offset);
487 
488 	return &rx_desc_elem->rx_desc;
489 }
490 
491 /**
492  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
493  *			 the Rx descriptor on Rx DMA source ring buffer
494  * @soc: core txrx main context
495  * @cookie: cookie used to lookup virtual address
496  *
497  * Return: Pointer to the Rx descriptor
498  */
499 static inline
500 struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc,
501 					       uint32_t cookie)
502 {
503 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie);
504 }
505 
506 /**
507  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
508  *			 the Rx descriptor on monitor ring buffer
509  * @soc: core txrx main context
510  * @cookie: cookie used to lookup virtual address
511  *
512  * Return: Pointer to the Rx descriptor
513  */
514 static inline
515 struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc,
516 					     uint32_t cookie)
517 {
518 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie);
519 }
520 
521 /**
522  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
523  *			 the Rx descriptor on monitor status ring buffer
524  * @soc: core txrx main context
525  * @cookie: cookie used to lookup virtual address
526  *
527  * Return: Pointer to the Rx descriptor
528  */
529 static inline
530 struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc,
531 						uint32_t cookie)
532 {
533 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_status[0], cookie);
534 }
535 #else
536 
537 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
538 			  uint32_t pool_size,
539 			  struct rx_desc_pool *rx_desc_pool);
540 
541 /**
542  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
543  *			 the Rx descriptor on Rx DMA source ring buffer
544  * @soc: core txrx main context
545  * @cookie: cookie used to lookup virtual address
546  *
547  * Return: void *: Virtual Address of the Rx descriptor
548  */
549 static inline
550 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
551 {
552 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
553 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
554 	struct rx_desc_pool *rx_desc_pool;
555 
556 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
557 		return NULL;
558 
559 	rx_desc_pool = &soc->rx_desc_buf[pool_id];
560 
561 	if (qdf_unlikely(index >= rx_desc_pool->pool_size))
562 		return NULL;
563 
564 	return &rx_desc_pool->array[index].rx_desc;
565 }
566 
567 /**
568  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
569  *			 the Rx descriptor on monitor ring buffer
570  * @soc: core txrx main context
571  * @cookie: cookie used to lookup virtual address
572  *
573  * Return: void *: Virtual Address of the Rx descriptor
574  */
575 static inline
576 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
577 {
578 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
579 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
580 	/* TODO */
581 	/* Add sanity for pool_id & index */
582 	return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
583 }
584 
585 /**
586  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
587  *			 the Rx descriptor on monitor status ring buffer
588  * @soc: core txrx main context
589  * @cookie: cookie used to lookup virtual address
590  *
591  * Return: void *: Virtual Address of the Rx descriptor
592  */
593 static inline
594 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
595 {
596 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
597 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
598 	/* TODO */
599 	/* Add sanity for pool_id & index */
600 	return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
601 }
602 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
603 
604 #ifndef QCA_HOST_MODE_WIFI_DISABLED
605 
606 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
607 {
608 	return vdev->ap_bridge_enabled;
609 }
610 
611 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
612 static inline QDF_STATUS
613 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
614 {
615 	if (qdf_unlikely(HAL_RX_REO_BUF_COOKIE_INVALID_GET(ring_desc)))
616 		return QDF_STATUS_E_FAILURE;
617 
618 	HAL_RX_REO_BUF_COOKIE_INVALID_SET(ring_desc);
619 	return QDF_STATUS_SUCCESS;
620 }
621 
622 /**
623  * dp_rx_cookie_reset_invalid_bit() - Reset the invalid bit of the cookie
624  *  field in ring descriptor
625  * @ring_desc: ring descriptor
626  *
627  * Return: None
628  */
629 static inline void
630 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc)
631 {
632 	HAL_RX_REO_BUF_COOKIE_INVALID_RESET(ring_desc);
633 }
634 #else
635 static inline QDF_STATUS
636 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
637 {
638 	return QDF_STATUS_SUCCESS;
639 }
640 
641 static inline void
642 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc)
643 {
644 }
645 #endif
646 
647 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
648 
649 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool);
650 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
651 				 uint32_t pool_size,
652 				 struct rx_desc_pool *rx_desc_pool);
653 
654 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
655 			  uint32_t pool_size,
656 			  struct rx_desc_pool *rx_desc_pool);
657 
658 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
659 				union dp_rx_desc_list_elem_t **local_desc_list,
660 				union dp_rx_desc_list_elem_t **tail,
661 				uint16_t pool_id,
662 				struct rx_desc_pool *rx_desc_pool);
663 
664 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
665 				struct rx_desc_pool *rx_desc_pool,
666 				uint16_t num_descs,
667 				union dp_rx_desc_list_elem_t **desc_list,
668 				union dp_rx_desc_list_elem_t **tail);
669 
670 QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev);
671 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev);
672 
673 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev);
674 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev);
675 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
676 			    struct rx_desc_pool *rx_desc_pool,
677 			    uint32_t pool_id);
678 
679 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
680 QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev);
681 void dp_rx_pdev_buffers_free(struct dp_pdev *pdev);
682 
683 void dp_rx_pdev_detach(struct dp_pdev *pdev);
684 
685 void dp_print_napi_stats(struct dp_soc *soc);
686 
687 /**
688  * dp_rx_vdev_detach() - detach vdev from dp rx
689  * @vdev: virtual device instance
690  *
691  * Return: QDF_STATUS_SUCCESS: success
692  *         QDF_STATUS_E_RESOURCES: Error return
693  */
694 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev);
695 
696 #ifndef QCA_HOST_MODE_WIFI_DISABLED
697 
698 uint32_t
699 dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
700 	      uint8_t reo_ring_num,
701 	      uint32_t quota);
702 
703 /**
704  * dp_rx_err_process() - Processes error frames routed to REO error ring
705  * @int_ctx: pointer to DP interrupt context
706  * @soc: core txrx main context
707  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
708  * @quota: No. of units (packets) that can be serviced in one shot.
709  *
710  * This function implements error processing and top level demultiplexer
711  * for all the frames routed to REO error ring.
712  *
713  * Return: uint32_t: No. of elements processed
714  */
715 uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
716 			   hal_ring_handle_t hal_ring_hdl, uint32_t quota);
717 
718 /**
719  * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
720  * @int_ctx: pointer to DP interrupt context
721  * @soc: core txrx main context
722  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
723  * @quota: No. of units (packets) that can be serviced in one shot.
724  *
725  * This function implements error processing and top level demultiplexer
726  * for all the frames routed to WBM2HOST sw release ring.
727  *
728  * Return: uint32_t: No. of elements processed
729  */
730 uint32_t
731 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
732 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota);
733 
734 /**
735  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
736  *		     multiple nbufs.
737  * @soc: core txrx main context
738  * @nbuf: pointer to the first msdu of an amsdu.
739  *
740  * This function implements the creation of RX frag_list for cases
741  * where an MSDU is spread across multiple nbufs.
742  *
743  * Return: returns the head nbuf which contains complete frag_list.
744  */
745 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf);
746 
747 
748 /*
749  * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during
750  *				     de-initialization of wifi module.
751  *
752  * @soc: core txrx main context
753  * @pool_id: pool_id which is one of 3 mac_ids
754  * @rx_desc_pool: rx descriptor pool pointer
755  *
756  * Return: None
757  */
758 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
759 				   struct rx_desc_pool *rx_desc_pool);
760 
761 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
762 
763 /*
764  * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during
765  *			    de-initialization of wifi module.
766  *
767  * @soc: core txrx main context
768  * @pool_id: pool_id which is one of 3 mac_ids
769  * @rx_desc_pool: rx descriptor pool pointer
770  *
771  * Return: None
772  */
773 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
774 			  struct rx_desc_pool *rx_desc_pool);
775 
776 #ifdef DP_RX_MON_MEM_FRAG
777 /*
778  * dp_rx_desc_frag_free() - free the sw rx desc frag called during
779  *			    de-initialization of wifi module.
780  *
781  * @soc: core txrx main context
782  * @rx_desc_pool: rx descriptor pool pointer
783  *
784  * Return: None
785  */
786 void dp_rx_desc_frag_free(struct dp_soc *soc,
787 			  struct rx_desc_pool *rx_desc_pool);
788 #else
789 static inline
790 void dp_rx_desc_frag_free(struct dp_soc *soc,
791 			  struct rx_desc_pool *rx_desc_pool)
792 {
793 }
794 #endif
795 /*
796  * dp_rx_desc_pool_free() - free the sw rx desc array called during
797  *			    de-initialization of wifi module.
798  *
799  * @soc: core txrx main context
800  * @rx_desc_pool: rx descriptor pool pointer
801  *
802  * Return: None
803  */
804 void dp_rx_desc_pool_free(struct dp_soc *soc,
805 			  struct rx_desc_pool *rx_desc_pool);
806 
807 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
808 				struct dp_peer *peer);
809 
810 #ifdef RX_DESC_LOGGING
811 /*
812  * dp_rx_desc_alloc_dbg_info() - Alloc memory for rx descriptor debug
813  *  structure
814  * @rx_desc: rx descriptor pointer
815  *
816  * Return: None
817  */
818 static inline
819 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
820 {
821 	rx_desc->dbg_info = qdf_mem_malloc(sizeof(struct dp_rx_desc_dbg_info));
822 }
823 
824 /*
825  * dp_rx_desc_free_dbg_info() - Free rx descriptor debug
826  *  structure memory
827  * @rx_desc: rx descriptor pointer
828  *
829  * Return: None
830  */
831 static inline
832 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
833 {
834 	qdf_mem_free(rx_desc->dbg_info);
835 }
836 
837 /*
838  * dp_rx_desc_update_dbg_info() - Update rx descriptor debug info
839  *  structure memory
840  * @rx_desc: rx descriptor pointer
841  *
842  * Return: None
843  */
844 static
845 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
846 				const char *func_name, uint8_t flag)
847 {
848 	struct dp_rx_desc_dbg_info *info = rx_desc->dbg_info;
849 
850 	if (!info)
851 		return;
852 
853 	if (flag == RX_DESC_REPLENISHED) {
854 		qdf_str_lcopy(info->replenish_caller, func_name,
855 			      QDF_MEM_FUNC_NAME_SIZE);
856 		info->replenish_ts = qdf_get_log_timestamp();
857 	} else {
858 		qdf_str_lcopy(info->freelist_caller, func_name,
859 			      QDF_MEM_FUNC_NAME_SIZE);
860 		info->freelist_ts = qdf_get_log_timestamp();
861 		info->prev_nbuf = rx_desc->nbuf;
862 		info->prev_nbuf_data_addr = rx_desc->nbuf_data_addr;
863 		rx_desc->nbuf_data_addr = NULL;
864 	}
865 }
866 #else
867 
868 static inline
869 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
870 {
871 }
872 
873 static inline
874 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
875 {
876 }
877 
878 static inline
879 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
880 				const char *func_name, uint8_t flag)
881 {
882 }
883 #endif /* RX_DESC_LOGGING */
884 
885 /**
886  * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
887  *
888  * @head: pointer to the head of local free list
889  * @tail: pointer to the tail of local free list
890  * @new: new descriptor that is added to the free list
891  * @func_name: caller func name
892  *
893  * Return: void:
894  */
895 static inline
896 void __dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
897 				 union dp_rx_desc_list_elem_t **tail,
898 				 struct dp_rx_desc *new, const char *func_name)
899 {
900 	qdf_assert(head && new);
901 
902 	dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST);
903 
904 	new->nbuf = NULL;
905 	new->in_use = 0;
906 
907 	((union dp_rx_desc_list_elem_t *)new)->next = *head;
908 	*head = (union dp_rx_desc_list_elem_t *)new;
909 	/* reset tail if head->next is NULL */
910 	if (!*tail || !(*head)->next)
911 		*tail = *head;
912 }
913 
914 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
915 				   uint8_t mac_id);
916 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
917 		qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id);
918 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
919 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer);
920 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
921 		       uint16_t peer_id, uint8_t tid);
922 
923 #define DP_RX_HEAD_APPEND(head, elem) \
924 	do {                                                            \
925 		qdf_nbuf_set_next((elem), (head));			\
926 		(head) = (elem);                                        \
927 	} while (0)
928 
929 
930 #define DP_RX_LIST_APPEND(head, tail, elem) \
931 	do {                                                          \
932 		if (!(head)) {                                        \
933 			(head) = (elem);                              \
934 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\
935 		} else {                                              \
936 			qdf_nbuf_set_next((tail), (elem));            \
937 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++;  \
938 		}                                                     \
939 		(tail) = (elem);                                      \
940 		qdf_nbuf_set_next((tail), NULL);                      \
941 	} while (0)
942 
943 #define DP_RX_MERGE_TWO_LIST(phead, ptail, chead, ctail) \
944 	do {                                                          \
945 		if (!(phead)) {                                       \
946 			(phead) = (chead);                            \
947 		} else {                                              \
948 			qdf_nbuf_set_next((ptail), (chead));          \
949 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(phead) += \
950 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(chead);   \
951 		}                                                     \
952 		(ptail) = (ctail);                                    \
953 		qdf_nbuf_set_next((ptail), NULL);                     \
954 	} while (0)
955 
956 #if defined(QCA_PADDR_CHECK_ON_3TH_PLATFORM)
957 /*
958  * on some third-party platform, the memory below 0x2000
959  * is reserved for target use, so any memory allocated in this
960  * region should not be used by host
961  */
962 #define MAX_RETRY 50
963 #define DP_PHY_ADDR_RESERVED	0x2000
964 #elif defined(BUILD_X86)
965 /*
966  * in M2M emulation platforms (x86) the memory below 0x50000000
967  * is reserved for target use, so any memory allocated in this
968  * region should not be used by host
969  */
970 #define MAX_RETRY 100
971 #define DP_PHY_ADDR_RESERVED	0x50000000
972 #endif
973 
974 #if defined(QCA_PADDR_CHECK_ON_3TH_PLATFORM) || defined(BUILD_X86)
975 /**
976  * dp_check_paddr() - check if current phy address is valid or not
977  * @dp_soc: core txrx main context
978  * @rx_netbuf: skb buffer
979  * @paddr: physical address
980  * @rx_desc_pool: struct of rx descriptor pool
981  * check if the physical address of the nbuf->data is less
982  * than DP_PHY_ADDR_RESERVED then free the nbuf and try
983  * allocating new nbuf. We can try for 100 times.
984  *
985  * This is a temp WAR till we fix it properly.
986  *
987  * Return: success or failure.
988  */
989 static inline
990 int dp_check_paddr(struct dp_soc *dp_soc,
991 		   qdf_nbuf_t *rx_netbuf,
992 		   qdf_dma_addr_t *paddr,
993 		   struct rx_desc_pool *rx_desc_pool)
994 {
995 	uint32_t nbuf_retry = 0;
996 	int32_t ret;
997 
998 	if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED))
999 		return QDF_STATUS_SUCCESS;
1000 
1001 	do {
1002 		dp_debug("invalid phy addr 0x%llx, trying again",
1003 			 (uint64_t)(*paddr));
1004 		nbuf_retry++;
1005 		if ((*rx_netbuf)) {
1006 			/* Not freeing buffer intentionally.
1007 			 * Observed that same buffer is getting
1008 			 * re-allocated resulting in longer load time
1009 			 * WMI init timeout.
1010 			 * This buffer is anyway not useful so skip it.
1011 			 *.Add such buffer to invalid list and free
1012 			 *.them when driver unload.
1013 			 **/
1014 			qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
1015 						     *rx_netbuf,
1016 						     QDF_DMA_FROM_DEVICE,
1017 						     rx_desc_pool->buf_size);
1018 			qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue,
1019 					   *rx_netbuf);
1020 		}
1021 
1022 		*rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
1023 					    rx_desc_pool->buf_size,
1024 					    RX_BUFFER_RESERVATION,
1025 					    rx_desc_pool->buf_alignment,
1026 					    FALSE);
1027 
1028 		if (qdf_unlikely(!(*rx_netbuf)))
1029 			return QDF_STATUS_E_FAILURE;
1030 
1031 		ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
1032 						 *rx_netbuf,
1033 						 QDF_DMA_FROM_DEVICE,
1034 						 rx_desc_pool->buf_size);
1035 
1036 		if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
1037 			qdf_nbuf_free(*rx_netbuf);
1038 			*rx_netbuf = NULL;
1039 			continue;
1040 		}
1041 
1042 		*paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
1043 
1044 		if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED))
1045 			return QDF_STATUS_SUCCESS;
1046 
1047 	} while (nbuf_retry < MAX_RETRY);
1048 
1049 	if ((*rx_netbuf)) {
1050 		qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
1051 					     *rx_netbuf,
1052 					     QDF_DMA_FROM_DEVICE,
1053 					     rx_desc_pool->buf_size);
1054 		qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue,
1055 				   *rx_netbuf);
1056 	}
1057 
1058 	return QDF_STATUS_E_FAILURE;
1059 }
1060 
1061 #else
1062 static inline
1063 int dp_check_paddr(struct dp_soc *dp_soc,
1064 		   qdf_nbuf_t *rx_netbuf,
1065 		   qdf_dma_addr_t *paddr,
1066 		   struct rx_desc_pool *rx_desc_pool)
1067 {
1068 	return QDF_STATUS_SUCCESS;
1069 }
1070 
1071 #endif
1072 
1073 /**
1074  * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
1075  *				   the MSDU Link Descriptor
1076  * @soc: core txrx main context
1077  * @buf_info: buf_info includes cookie that is used to lookup
1078  * virtual address of link descriptor after deriving the page id
1079  * and the offset or index of the desc on the associatde page.
1080  *
1081  * This is the VA of the link descriptor, that HAL layer later uses to
1082  * retrieve the list of MSDU's for a given MPDU.
1083  *
1084  * Return: void *: Virtual Address of the Rx descriptor
1085  */
1086 static inline
1087 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
1088 				  struct hal_buf_info *buf_info)
1089 {
1090 	void *link_desc_va;
1091 	struct qdf_mem_multi_page_t *pages;
1092 	uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie);
1093 
1094 	pages = &soc->link_desc_pages;
1095 	if (!pages)
1096 		return NULL;
1097 	if (qdf_unlikely(page_id >= pages->num_pages))
1098 		return NULL;
1099 	link_desc_va = pages->dma_pages[page_id].page_v_addr_start +
1100 		(buf_info->paddr - pages->dma_pages[page_id].page_p_addr);
1101 	return link_desc_va;
1102 }
1103 
1104 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1105 /*
1106  * dp_rx_intrabss_fwd() - API for intrabss fwd. For EAPOL
1107  *  pkt with DA not equal to vdev mac addr, fwd is not allowed.
1108  * @soc: core txrx main context
1109  * @ta_peer: source peer entry
1110  * @rx_tlv_hdr: start address of rx tlvs
1111  * @nbuf: nbuf that has to be intrabss forwarded
1112  * @msdu_metadata: msdu metadata
1113  *
1114  * Return: true if it is forwarded else false
1115  */
1116 
1117 bool dp_rx_intrabss_fwd(struct dp_soc *soc,
1118 			struct dp_peer *ta_peer,
1119 			uint8_t *rx_tlv_hdr,
1120 			qdf_nbuf_t nbuf,
1121 			struct hal_rx_msdu_metadata msdu_metadata);
1122 
1123 #ifdef DISABLE_EAPOL_INTRABSS_FWD
1124 /*
1125  * dp_rx_intrabss_eapol_drop_check() - API For EAPOL
1126  *  pkt with DA not equal to vdev mac addr, fwd is not allowed.
1127  * @soc: core txrx main context
1128  * @ta_peer: source peer entry
1129  * @rx_tlv_hdr: start address of rx tlvs
1130  * @nbuf: nbuf that has to be intrabss forwarded
1131  *
1132  * Return: true if it is forwarded else false
1133  */
1134 static inline
1135 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc,
1136 				     struct dp_peer *ta_peer,
1137 				     uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf)
1138 {
1139 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) &&
1140 			 qdf_mem_cmp(qdf_nbuf_data(nbuf) +
1141 				     QDF_NBUF_DEST_MAC_OFFSET,
1142 				     ta_peer->vdev->mac_addr.raw,
1143 				     QDF_MAC_ADDR_SIZE))) {
1144 		qdf_nbuf_free(nbuf);
1145 		DP_STATS_INC(soc, rx.err.intrabss_eapol_drop, 1);
1146 		return true;
1147 	}
1148 
1149 	return false;
1150 }
1151 #else /* DISABLE_EAPOL_INTRABSS_FWD */
1152 
1153 static inline
1154 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc,
1155 				     struct dp_peer *ta_peer,
1156 				     uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf)
1157 {
1158 	return false;
1159 }
1160 #endif /* DISABLE_EAPOL_INTRABSS_FWD */
1161 /**
1162  * dp_rx_defrag_concat() - Concatenate the fragments
1163  *
1164  * @dst: destination pointer to the buffer
1165  * @src: source pointer from where the fragment payload is to be copied
1166  *
1167  * Return: QDF_STATUS
1168  */
1169 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
1170 {
1171 	/*
1172 	 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
1173 	 * to provide space for src, the headroom portion is copied from
1174 	 * the original dst buffer to the larger new dst buffer.
1175 	 * (This is needed, because the headroom of the dst buffer
1176 	 * contains the rx desc.)
1177 	 */
1178 	if (!qdf_nbuf_cat(dst, src)) {
1179 		/*
1180 		 * qdf_nbuf_cat does not free the src memory.
1181 		 * Free src nbuf before returning
1182 		 * For failure case the caller takes of freeing the nbuf
1183 		 */
1184 		qdf_nbuf_free(src);
1185 		return QDF_STATUS_SUCCESS;
1186 	}
1187 
1188 	return QDF_STATUS_E_DEFRAG_ERROR;
1189 }
1190 
1191 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1192 
1193 #ifndef FEATURE_WDS
1194 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
1195 		    struct dp_peer *ta_peer, qdf_nbuf_t nbuf);
1196 
1197 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
1198 {
1199 	return QDF_STATUS_SUCCESS;
1200 }
1201 
1202 static inline void
1203 dp_rx_wds_srcport_learn(struct dp_soc *soc,
1204 			uint8_t *rx_tlv_hdr,
1205 			struct dp_peer *ta_peer,
1206 			qdf_nbuf_t nbuf,
1207 			struct hal_rx_msdu_metadata msdu_metadata)
1208 {
1209 }
1210 #endif
1211 
1212 /*
1213  * dp_rx_desc_dump() - dump the sw rx descriptor
1214  *
1215  * @rx_desc: sw rx descriptor
1216  */
1217 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc)
1218 {
1219 	dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d",
1220 		rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id,
1221 		rx_desc->in_use, rx_desc->unmapped);
1222 }
1223 
1224 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1225 
1226 /*
1227  * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
1228  *					In qwrap mode, packets originated from
1229  *					any vdev should not loopback and
1230  *					should be dropped.
1231  * @vdev: vdev on which rx packet is received
1232  * @nbuf: rx pkt
1233  *
1234  */
1235 #if ATH_SUPPORT_WRAP
1236 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1237 						qdf_nbuf_t nbuf)
1238 {
1239 	struct dp_vdev *psta_vdev;
1240 	struct dp_pdev *pdev = vdev->pdev;
1241 	uint8_t *data = qdf_nbuf_data(nbuf);
1242 
1243 	if (qdf_unlikely(vdev->proxysta_vdev)) {
1244 		/* In qwrap isolation mode, allow loopback packets as all
1245 		 * packets go to RootAP and Loopback on the mpsta.
1246 		 */
1247 		if (vdev->isolation_vdev)
1248 			return false;
1249 		TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
1250 			if (qdf_unlikely(psta_vdev->proxysta_vdev &&
1251 					 !qdf_mem_cmp(psta_vdev->mac_addr.raw,
1252 						      &data[QDF_MAC_ADDR_SIZE],
1253 						      QDF_MAC_ADDR_SIZE))) {
1254 				/* Drop packet if source address is equal to
1255 				 * any of the vdev addresses.
1256 				 */
1257 				return true;
1258 			}
1259 		}
1260 	}
1261 	return false;
1262 }
1263 #else
1264 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1265 						qdf_nbuf_t nbuf)
1266 {
1267 	return false;
1268 }
1269 #endif
1270 
1271 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1272 
1273 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
1274 	defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\
1275 	defined(WLAN_SUPPORT_RX_FLOW_TAG)
1276 #include "dp_rx_tag.h"
1277 #endif
1278 
1279 #if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\
1280 	!defined(WLAN_SUPPORT_RX_FLOW_TAG)
1281 /**
1282  * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV
1283  *                              and set the corresponding tag in QDF packet
1284  * @soc: core txrx main context
1285  * @vdev: vdev on which the packet is received
1286  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1287  * @rx_tlv_hdr: rBbase address where the RX TLVs starts
1288  * @ring_index: REO ring number, not used for error & monitor ring
1289  * @is_reo_exception: flag to indicate if rx from REO ring or exception ring
1290  * @is_update_stats: flag to indicate whether to update stats or not
1291  * Return: void
1292  */
1293 static inline void
1294 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1295 			  qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
1296 			  uint16_t ring_index,
1297 			  bool is_reo_exception, bool is_update_stats)
1298 {
1299 }
1300 #endif
1301 
1302 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
1303 /**
1304  * dp_rx_err_cce_drop() - Reads CCE metadata from the RX MSDU end TLV
1305  *                        and returns whether cce metadata matches
1306  * @soc: core txrx main context
1307  * @vdev: vdev on which the packet is received
1308  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1309  * @rx_tlv_hdr: rBbase address where the RX TLVs starts
1310  * Return: bool
1311  */
1312 static inline bool
1313 dp_rx_err_cce_drop(struct dp_soc *soc, struct dp_vdev *vdev,
1314 		   qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
1315 {
1316 	return false;
1317 }
1318 
1319 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
1320 
1321 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
1322 /**
1323  * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV
1324  *                           and set the corresponding tag in QDF packet
1325  * @soc: core txrx main context
1326  * @vdev: vdev on which the packet is received
1327  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1328  * @rx_tlv_hdr: base address where the RX TLVs starts
1329  * @is_update_stats: flag to indicate whether to update stats or not
1330  *
1331  * Return: void
1332  */
1333 static inline void
1334 dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1335 		      qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats)
1336 {
1337 }
1338 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
1339 
1340 /*
1341  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
1342  *			       called during dp rx initialization
1343  *			       and at the end of dp_rx_process.
1344  *
1345  * @soc: core txrx main context
1346  * @mac_id: mac_id which is one of 3 mac_ids
1347  * @dp_rxdma_srng: dp rxdma circular ring
1348  * @rx_desc_pool: Pointer to free Rx descriptor pool
1349  * @num_req_buffers: number of buffer to be replenished
1350  * @desc_list: list of descs if called from dp_rx_process
1351  *	       or NULL during dp rx initialization or out of buffer
1352  *	       interrupt.
1353  * @tail: tail of descs list
1354  * @func_name: name of the caller function
1355  * Return: return success or failure
1356  */
1357 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1358 				 struct dp_srng *dp_rxdma_srng,
1359 				 struct rx_desc_pool *rx_desc_pool,
1360 				 uint32_t num_req_buffers,
1361 				 union dp_rx_desc_list_elem_t **desc_list,
1362 				 union dp_rx_desc_list_elem_t **tail,
1363 				 const char *func_name);
1364 
1365 /*
1366  * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs
1367  *                               called during dp rx initialization
1368  *
1369  * @soc: core txrx main context
1370  * @mac_id: mac_id which is one of 3 mac_ids
1371  * @dp_rxdma_srng: dp rxdma circular ring
1372  * @rx_desc_pool: Pointer to free Rx descriptor pool
1373  * @num_req_buffers: number of buffer to be replenished
1374  *
1375  * Return: return success or failure
1376  */
1377 QDF_STATUS
1378 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
1379 			  struct dp_srng *dp_rxdma_srng,
1380 			  struct rx_desc_pool *rx_desc_pool,
1381 			  uint32_t num_req_buffers);
1382 
1383 /**
1384  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
1385  *			      (WBM), following error handling
1386  *
1387  * @soc: core DP main context
1388  * @buf_addr_info: opaque pointer to the REO error ring descriptor
1389  * @buf_addr_info: void pointer to the buffer_addr_info
1390  * @bm_action: put to idle_list or release to msdu_list
1391  *
1392  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
1393  */
1394 QDF_STATUS
1395 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
1396 		       uint8_t bm_action);
1397 
1398 /**
1399  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
1400  *					(WBM) by address
1401  *
1402  * @soc: core DP main context
1403  * @link_desc_addr: link descriptor addr
1404  *
1405  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
1406  */
1407 QDF_STATUS
1408 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
1409 			       hal_buff_addrinfo_t link_desc_addr,
1410 			       uint8_t bm_action);
1411 
1412 /**
1413  * dp_rxdma_err_process() - RxDMA error processing functionality
1414  * @soc: core txrx main contex
1415  * @mac_id: mac id which is one of 3 mac_ids
1416  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1417  * @quota: No. of units (packets) that can be serviced in one shot.
1418  *
1419  * Return: num of buffers processed
1420  */
1421 uint32_t
1422 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1423 		     uint32_t mac_id, uint32_t quota);
1424 
1425 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1426 				uint8_t *rx_tlv_hdr, struct dp_peer *peer);
1427 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1428 					uint8_t *rx_tlv_hdr);
1429 
1430 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
1431 			   struct dp_peer *peer);
1432 
1433 /*
1434  * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
1435  *
1436  * @soc: core txrx main context
1437  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1438  * @ring_desc: opaque pointer to the RX ring descriptor
1439  * @rx_desc: host rx descriptor
1440  *
1441  * Return: void
1442  */
1443 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
1444 				hal_ring_handle_t hal_ring_hdl,
1445 				hal_ring_desc_t ring_desc,
1446 				struct dp_rx_desc *rx_desc);
1447 
1448 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
1449 
1450 #ifdef QCA_PEER_EXT_STATS
1451 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
1452 			     qdf_nbuf_t nbuf);
1453 #endif /* QCA_PEER_EXT_STATS */
1454 
1455 #ifdef RX_DESC_DEBUG_CHECK
1456 /**
1457  * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc
1458  * @rx_desc: rx descriptor pointer
1459  *
1460  * Return: true, if magic is correct, else false.
1461  */
1462 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1463 {
1464 	if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC))
1465 		return false;
1466 
1467 	rx_desc->magic = 0;
1468 	return true;
1469 }
1470 
1471 /**
1472  * dp_rx_desc_prep() - prepare rx desc
1473  * @rx_desc: rx descriptor pointer to be prepared
1474  * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
1475  *
1476  * Note: assumption is that we are associating a nbuf which is mapped
1477  *
1478  * Return: none
1479  */
1480 static inline
1481 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
1482 		     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1483 {
1484 	rx_desc->magic = DP_RX_DESC_MAGIC;
1485 	rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
1486 	rx_desc->unmapped = 0;
1487 	rx_desc->nbuf_data_addr = (uint8_t *)qdf_nbuf_data(rx_desc->nbuf);
1488 }
1489 
1490 /**
1491  * dp_rx_desc_frag_prep() - prepare rx desc
1492  * @rx_desc: rx descriptor pointer to be prepared
1493  * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
1494  *
1495  * Note: assumption is that we frag address is mapped
1496  *
1497  * Return: none
1498  */
1499 #ifdef DP_RX_MON_MEM_FRAG
1500 static inline
1501 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1502 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1503 {
1504 	rx_desc->magic = DP_RX_DESC_MAGIC;
1505 	rx_desc->rx_buf_start =
1506 		(uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
1507 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
1508 	rx_desc->unmapped = 0;
1509 }
1510 #else
1511 static inline
1512 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1513 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1514 {
1515 }
1516 #endif /* DP_RX_MON_MEM_FRAG */
1517 
1518 /**
1519  * dp_rx_desc_paddr_sanity_check() - paddr sanity for ring desc vs rx_desc
1520  * @rx_desc: rx descriptor
1521  * @ring_paddr: paddr obatined from the ring
1522  *
1523  * Returns: QDF_STATUS
1524  */
1525 static inline
1526 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
1527 				   uint64_t ring_paddr)
1528 {
1529 	return (ring_paddr == qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0));
1530 }
1531 #else
1532 
1533 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1534 {
1535 	return true;
1536 }
1537 
1538 static inline
1539 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
1540 		     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1541 {
1542 	rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
1543 	rx_desc->unmapped = 0;
1544 }
1545 
1546 #ifdef DP_RX_MON_MEM_FRAG
1547 static inline
1548 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1549 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1550 {
1551 	rx_desc->rx_buf_start =
1552 		(uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
1553 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
1554 	rx_desc->unmapped = 0;
1555 }
1556 #else
1557 static inline
1558 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1559 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1560 {
1561 }
1562 #endif /* DP_RX_MON_MEM_FRAG */
1563 
1564 static inline
1565 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
1566 				   uint64_t ring_paddr)
1567 {
1568 	return true;
1569 }
1570 #endif /* RX_DESC_DEBUG_CHECK */
1571 
1572 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
1573 				bool is_mon_dest_desc);
1574 
1575 void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1576 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer,
1577 			     uint8_t err_code, uint8_t mac_id);
1578 
1579 #ifndef QCA_MULTIPASS_SUPPORT
1580 static inline
1581 bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, uint8_t tid)
1582 {
1583 	return false;
1584 }
1585 #else
1586 bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf,
1587 			     uint8_t tid);
1588 #endif
1589 
1590 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1591 
1592 #ifndef WLAN_RX_PKT_CAPTURE_ENH
1593 static inline
1594 QDF_STATUS dp_peer_set_rx_capture_enabled(struct dp_pdev *pdev,
1595 					  struct dp_peer *peer_handle,
1596 					  bool value, uint8_t *mac_addr)
1597 {
1598 	return QDF_STATUS_SUCCESS;
1599 }
1600 #endif
1601 
1602 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1603 
1604 /**
1605  * dp_rx_deliver_to_stack() - deliver pkts to network stack
1606  * Caller to hold peer refcount and check for valid peer
1607  * @soc: soc
1608  * @vdev: vdev
1609  * @peer: peer
1610  * @nbuf_head: skb list head
1611  * @nbuf_tail: skb list tail
1612  *
1613  * Return: QDF_STATUS
1614  */
1615 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc,
1616 				  struct dp_vdev *vdev,
1617 				  struct dp_peer *peer,
1618 				  qdf_nbuf_t nbuf_head,
1619 				  qdf_nbuf_t nbuf_tail);
1620 
1621 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
1622 /**
1623  * dp_rx_eapol_deliver_to_stack() - deliver pkts to network stack
1624  * caller to hold peer refcount and check for valid peer
1625  * @soc: soc
1626  * @vdev: vdev
1627  * @peer: peer
1628  * @nbuf_head: skb list head
1629  * @nbuf_tail: skb list tail
1630  *
1631  * return: QDF_STATUS
1632  */
1633 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
1634 					struct dp_vdev *vdev,
1635 					struct dp_peer *peer,
1636 					qdf_nbuf_t nbuf_head,
1637 					qdf_nbuf_t nbuf_tail);
1638 #endif
1639 
1640 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1641 
1642 #ifdef QCA_OL_RX_LOCK_LESS_ACCESS
1643 /*
1644  * dp_rx_ring_access_start()- Wrapper function to log access start of a hal ring
1645  * @int_ctx: pointer to DP interrupt context
1646  * @dp_soc - DP soc structure pointer
1647  * @hal_ring_hdl - HAL ring handle
1648  *
1649  * Return: 0 on success; error on failure
1650  */
1651 static inline int
1652 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
1653 			hal_ring_handle_t hal_ring_hdl)
1654 {
1655 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
1656 }
1657 
1658 /*
1659  * dp_rx_ring_access_end()- Wrapper function to log access end of a hal ring
1660  * @int_ctx: pointer to DP interrupt context
1661  * @dp_soc - DP soc structure pointer
1662  * @hal_ring_hdl - HAL ring handle
1663  *
1664  * Return - None
1665  */
1666 static inline void
1667 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
1668 		      hal_ring_handle_t hal_ring_hdl)
1669 {
1670 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
1671 }
1672 #else
1673 static inline int
1674 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
1675 			hal_ring_handle_t hal_ring_hdl)
1676 {
1677 	return dp_srng_access_start(int_ctx, soc, hal_ring_hdl);
1678 }
1679 
1680 static inline void
1681 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
1682 		      hal_ring_handle_t hal_ring_hdl)
1683 {
1684 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
1685 }
1686 #endif
1687 
1688 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1689 
1690 /*
1691  * dp_rx_wbm_sg_list_reset() - Initialize sg list
1692  *
1693  * This api should be called at soc init and afterevery sg processing.
1694  *@soc: DP SOC handle
1695  */
1696 static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc)
1697 {
1698 	if (soc) {
1699 		soc->wbm_sg_param.wbm_is_first_msdu_in_sg = false;
1700 		soc->wbm_sg_param.wbm_sg_nbuf_head = NULL;
1701 		soc->wbm_sg_param.wbm_sg_nbuf_tail = NULL;
1702 		soc->wbm_sg_param.wbm_sg_desc_msdu_len = 0;
1703 	}
1704 }
1705 
1706 /*
1707  * dp_rx_wbm_sg_list_deinit() - De-initialize sg list
1708  *
1709  * This api should be called in down path, to avoid any leak.
1710  *@soc: DP SOC handle
1711  */
1712 static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc)
1713 {
1714 	if (soc) {
1715 		if (soc->wbm_sg_param.wbm_sg_nbuf_head)
1716 			qdf_nbuf_list_free(soc->wbm_sg_param.wbm_sg_nbuf_head);
1717 
1718 		dp_rx_wbm_sg_list_reset(soc);
1719 	}
1720 }
1721 
1722 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1723 
1724 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
1725 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
1726 	do {								   \
1727 		if (!soc->rx_buff_pool[rx_desc->pool_id].is_initialized) { \
1728 			DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf);	   \
1729 			break;						   \
1730 		}							   \
1731 		DP_RX_LIST_APPEND(ebuf_head, ebuf_tail, rx_desc->nbuf);	   \
1732 		if (!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)) {	   \
1733 			if (!dp_rx_buffer_pool_refill(soc, ebuf_head,	   \
1734 						      rx_desc->pool_id))   \
1735 				DP_RX_MERGE_TWO_LIST(head, tail,	   \
1736 						     ebuf_head, ebuf_tail);\
1737 			ebuf_head = NULL;				   \
1738 			ebuf_tail = NULL;				   \
1739 		}							   \
1740 	} while (0)
1741 #else
1742 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
1743 	DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf)
1744 #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
1745 
1746 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1747 
1748 /*
1749  * dp_rx_link_desc_refill_duplicate_check() - check if link desc duplicate
1750 					      to refill
1751  * @soc: DP SOC handle
1752  * @buf_info: the last link desc buf info
1753  * @ring_buf_info: current buf address pointor including link desc
1754  *
1755  * return: none.
1756  */
1757 void dp_rx_link_desc_refill_duplicate_check(
1758 				struct dp_soc *soc,
1759 				struct hal_buf_info *buf_info,
1760 				hal_buff_addrinfo_t ring_buf_info);
1761 
1762 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
1763 /**
1764  * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture
1765  * @soc : dp_soc handle
1766  * @pdev: dp_pdev handle
1767  * @peer_id: peer_id of the peer for which completion came
1768  * @ppdu_id: ppdu_id
1769  * @netbuf: Buffer pointer
1770  *
1771  * This function is used to deliver rx packet to packet capture
1772  */
1773 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc,  struct dp_pdev *pdev,
1774 				  uint16_t peer_id, uint32_t is_offload,
1775 				  qdf_nbuf_t netbuf);
1776 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
1777 					  uint32_t is_offload);
1778 #else
1779 static inline void
1780 dp_rx_deliver_to_pkt_capture(struct dp_soc *soc,  struct dp_pdev *pdev,
1781 			     uint16_t peer_id, uint32_t is_offload,
1782 			     qdf_nbuf_t netbuf)
1783 {
1784 }
1785 
1786 static inline void
1787 dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
1788 				     uint32_t is_offload)
1789 {
1790 }
1791 #endif
1792 
1793 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1794 #ifdef FEATURE_MEC
1795 /**
1796  * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
1797  *			      back on same vap or a different vap.
1798  * @soc: core DP main context
1799  * @peer: dp peer handler
1800  * @rx_tlv_hdr: start of the rx TLV header
1801  * @nbuf: pkt buffer
1802  *
1803  * Return: bool (true if it is a looped back pkt else false)
1804  *
1805  */
1806 bool dp_rx_mcast_echo_check(struct dp_soc *soc,
1807 			    struct dp_peer *peer,
1808 			    uint8_t *rx_tlv_hdr,
1809 			    qdf_nbuf_t nbuf);
1810 #else
1811 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
1812 					  struct dp_peer *peer,
1813 					  uint8_t *rx_tlv_hdr,
1814 					  qdf_nbuf_t nbuf)
1815 {
1816 	return false;
1817 }
1818 #endif /* FEATURE_MEC */
1819 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1820 
1821 #ifdef RECEIVE_OFFLOAD
1822 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
1823 			 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt);
1824 #else
1825 static inline
1826 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
1827 			 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
1828 {
1829 }
1830 #endif
1831 
1832 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
1833 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer,
1834 			     uint8_t ring_id,
1835 			     struct cdp_tid_rx_stats *tid_stats);
1836 
1837 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf);
1838 
1839 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
1840 				    hal_ring_handle_t hal_ring_hdl,
1841 				    uint32_t num_entries,
1842 				    bool *near_full);
1843 
1844 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1845 void dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
1846 			     hal_ring_desc_t ring_desc);
1847 #else
1848 static inline void
1849 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
1850 			hal_ring_desc_t ring_desc)
1851 {
1852 }
1853 #endif
1854 
1855 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1856 #ifdef RX_DESC_SANITY_WAR
1857 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
1858 			     hal_ring_handle_t hal_ring_hdl,
1859 			     hal_ring_desc_t ring_desc,
1860 			     struct dp_rx_desc *rx_desc);
1861 #else
1862 static inline
1863 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
1864 			     hal_ring_handle_t hal_ring_hdl,
1865 			     hal_ring_desc_t ring_desc,
1866 			     struct dp_rx_desc *rx_desc)
1867 {
1868 	return QDF_STATUS_SUCCESS;
1869 }
1870 #endif
1871 
1872 #ifdef DP_RX_DROP_RAW_FRM
1873 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf);
1874 #else
1875 static inline
1876 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
1877 {
1878 	return false;
1879 }
1880 #endif
1881 
1882 #ifdef RX_DESC_DEBUG_CHECK
1883 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
1884 					hal_ring_desc_t ring_desc,
1885 					struct dp_rx_desc *rx_desc);
1886 #else
1887 static inline
1888 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
1889 					hal_ring_desc_t ring_desc,
1890 					struct dp_rx_desc *rx_desc)
1891 {
1892 	return QDF_STATUS_SUCCESS;
1893 }
1894 #endif
1895 
1896 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1897 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf);
1898 #else
1899 static inline
1900 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf)
1901 {
1902 }
1903 #endif
1904 
1905 /**
1906  * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
1907  * @nbuf: pointer to the first msdu of an amsdu.
1908  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
1909  *
1910  * The ipsumed field of the skb is set based on whether HW validated the
1911  * IP/TCP/UDP checksum.
1912  *
1913  * Return: void
1914  */
1915 static inline
1916 void dp_rx_cksum_offload(struct dp_pdev *pdev,
1917 			 qdf_nbuf_t nbuf,
1918 			 uint8_t *rx_tlv_hdr)
1919 {
1920 	qdf_nbuf_rx_cksum_t cksum = {0};
1921 	//TODO - Move this to ring desc api
1922 	//HAL_RX_MSDU_DESC_IP_CHKSUM_FAIL_GET
1923 	//HAL_RX_MSDU_DESC_TCP_UDP_CHKSUM_FAIL_GET
1924 	uint32_t ip_csum_err, tcp_udp_csum_er;
1925 
1926 	hal_rx_tlv_csum_err_get(pdev->soc->hal_soc, rx_tlv_hdr, &ip_csum_err,
1927 				&tcp_udp_csum_er);
1928 
1929 	if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) {
1930 		cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
1931 		qdf_nbuf_set_rx_cksum(nbuf, &cksum);
1932 	} else {
1933 		DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err);
1934 		DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er);
1935 	}
1936 }
1937 
1938 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1939 
1940 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1941 static inline
1942 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
1943 				   int max_reap_limit)
1944 {
1945 	bool limit_hit = false;
1946 
1947 	limit_hit =
1948 		(num_reaped >= max_reap_limit) ? true : false;
1949 
1950 	if (limit_hit)
1951 		DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1)
1952 
1953 	return limit_hit;
1954 }
1955 
1956 static inline
1957 bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1958 {
1959 	return soc->wlan_cfg_ctx->rx_enable_eol_data_check;
1960 }
1961 
1962 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc)
1963 {
1964 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
1965 
1966 	return cfg->rx_reap_loop_pkt_limit;
1967 }
1968 #else
1969 static inline
1970 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
1971 				   int max_reap_limit)
1972 {
1973 	return false;
1974 }
1975 
1976 static inline
1977 bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
1978 {
1979 	return false;
1980 }
1981 
1982 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc)
1983 {
1984 	return 0;
1985 }
1986 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
1987 
1988 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf);
1989 
1990 #ifdef QCA_SUPPORT_WDS_EXTENDED
1991 /**
1992  * dp_rx_is_list_ready() - Make different lists for 4-address
1993 			   and 3-address frames
1994  * @nbuf_head: skb list head
1995  * @vdev: vdev
1996  * @peer: peer
1997  * @peer_id: peer id of new received frame
1998  * @vdev_id: vdev_id of new received frame
1999  *
2000  * Return: true if peer_ids are different.
2001  */
2002 static inline bool
2003 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
2004 		    struct dp_vdev *vdev,
2005 		    struct dp_peer *peer,
2006 		    uint16_t peer_id,
2007 		    uint8_t vdev_id)
2008 {
2009 	if (nbuf_head && peer && (peer->peer_id != peer_id))
2010 		return true;
2011 
2012 	return false;
2013 }
2014 #else
2015 static inline bool
2016 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
2017 		    struct dp_vdev *vdev,
2018 		    struct dp_peer *peer,
2019 		    uint16_t peer_id,
2020 		    uint8_t vdev_id)
2021 {
2022 	if (nbuf_head && vdev && (vdev->vdev_id != vdev_id))
2023 		return true;
2024 
2025 	return false;
2026 }
2027 #endif
2028 
2029 /**
2030  * dp_rx_desc_pool_init_generic() - Generic Rx descriptors initialization
2031  * @soc: SOC handle
2032  * @rx_desc_pool: pointer to RX descriptor pool
2033  * @pool_id: pool ID
2034  *
2035  * Return: None
2036  */
2037 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
2038 				  struct rx_desc_pool *rx_desc_pool,
2039 				  uint32_t pool_id);
2040 
2041 void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
2042 				  struct rx_desc_pool *rx_desc_pool,
2043 				  uint32_t pool_id);
2044 
2045 #endif /* _DP_RX_H */
2046