xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h (revision c7eaf5ac989ac229214b8317faa3e981d261e7db)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _DP_RX_H
21 #define _DP_RX_H
22 
23 #include "hal_rx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 #include <qdf_tracepoint.h>
27 #include "dp_ipa.h"
28 
29 #ifdef RXDMA_OPTIMIZATION
30 #ifndef RX_DATA_BUFFER_ALIGNMENT
31 #define RX_DATA_BUFFER_ALIGNMENT        128
32 #endif
33 #ifndef RX_MONITOR_BUFFER_ALIGNMENT
34 #define RX_MONITOR_BUFFER_ALIGNMENT     128
35 #endif
36 #else /* RXDMA_OPTIMIZATION */
37 #define RX_DATA_BUFFER_ALIGNMENT        4
38 #define RX_MONITOR_BUFFER_ALIGNMENT     4
39 #endif /* RXDMA_OPTIMIZATION */
40 
41 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
42 #define DP_WBM2SW_RBM(sw0_bm_id)	HAL_RX_BUF_RBM_SW1_BM(sw0_bm_id)
43 /* RBM value used for re-injecting defragmented packets into REO */
44 #define DP_DEFRAG_RBM(sw0_bm_id)	HAL_RX_BUF_RBM_SW3_BM(sw0_bm_id)
45 #endif
46 
47 /* Max buffer in invalid peer SG list*/
48 #define DP_MAX_INVALID_BUFFERS 10
49 #ifdef DP_INVALID_PEER_ASSERT
50 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \
51 		do {                                \
52 			qdf_assert_always(!(head)); \
53 			qdf_assert_always(!(tail)); \
54 		} while (0)
55 #else
56 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */
57 #endif
58 
59 #define RX_BUFFER_RESERVATION   0
60 #ifdef BE_PKTLOG_SUPPORT
61 #define BUFFER_RESIDUE 1
62 #define RX_MON_MIN_HEAD_ROOM   64
63 #endif
64 
65 #define DP_DEFAULT_NOISEFLOOR	(-96)
66 
67 #define DP_RX_DESC_MAGIC 0xdec0de
68 
69 #define dp_rx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX, params)
70 #define dp_rx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX, params)
71 #define dp_rx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX, params)
72 #define dp_rx_info(params...) \
73 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params)
74 #define dp_rx_info_rl(params...) \
75 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params)
76 #define dp_rx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX, params)
77 #define dp_rx_err_err(params...) \
78 	QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX_ERROR, params)
79 
80 /**
81  * enum dp_rx_desc_state
82  *
83  * @RX_DESC_REPLENISHED: rx desc replenished
84  * @RX_DESC_IN_FREELIST: rx desc in freelist
85  */
86 enum dp_rx_desc_state {
87 	RX_DESC_REPLENISHED,
88 	RX_DESC_IN_FREELIST,
89 };
90 
91 #ifndef QCA_HOST_MODE_WIFI_DISABLED
92 /**
93  * struct dp_rx_desc_dbg_info
94  *
95  * @freelist_caller: name of the function that put the
96  *  the rx desc in freelist
97  * @freelist_ts: timestamp when the rx desc is put in
98  *  a freelist
99  * @replenish_caller: name of the function that last
100  *  replenished the rx desc
101  * @replenish_ts: last replenish timestamp
102  * @prev_nbuf: previous nbuf info
103  * @prev_nbuf_data_addr: previous nbuf data address
104  */
105 struct dp_rx_desc_dbg_info {
106 	char freelist_caller[QDF_MEM_FUNC_NAME_SIZE];
107 	uint64_t freelist_ts;
108 	char replenish_caller[QDF_MEM_FUNC_NAME_SIZE];
109 	uint64_t replenish_ts;
110 	qdf_nbuf_t prev_nbuf;
111 	uint8_t *prev_nbuf_data_addr;
112 };
113 
114 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
115 
116 /**
117  * struct dp_rx_desc
118  *
119  * @nbuf:		VA of the "skb" posted
120  * @rx_buf_start:	VA of the original Rx buffer, before
121  *			movement of any skb->data pointer
122  * @paddr_buf_start:	PA of the original Rx buffer, before
123  *                      movement of any frag pointer
124  * @cookie:		index into the sw array which holds
125  *			the sw Rx descriptors
126  *			Cookie space is 21 bits:
127  *			lower 18 bits -- index
128  *			upper  3 bits -- pool_id
129  * @pool_id:		pool Id for which this allocated.
130  *			Can only be used if there is no flow
131  *			steering
132  * @chip_id:		chip_id indicating MLO chip_id
133  *			valid or used only in case of multi-chip MLO
134  * @reuse_nbuf:		VA of the "skb" which is being reused
135  * @magic:
136  * @nbuf_data_addr:	VA of nbuf data posted
137  * @dbg_info:
138  * @in_use:		rx_desc is in use
139  * @unmapped:		used to mark rx_desc an unmapped if the corresponding
140  *			nbuf is already unmapped
141  * @in_err_state:	Nbuf sanity failed for this descriptor.
142  * @has_reuse_nbuf:	the nbuf associated with this desc is also saved in
143  *			reuse_nbuf field
144  */
145 struct dp_rx_desc {
146 	qdf_nbuf_t nbuf;
147 #ifdef WLAN_SUPPORT_PPEDS
148 	qdf_nbuf_t reuse_nbuf;
149 #endif
150 	uint8_t *rx_buf_start;
151 	qdf_dma_addr_t paddr_buf_start;
152 	uint32_t cookie;
153 	uint8_t	 pool_id;
154 	uint8_t chip_id;
155 #ifdef RX_DESC_DEBUG_CHECK
156 	uint32_t magic;
157 	uint8_t *nbuf_data_addr;
158 	struct dp_rx_desc_dbg_info *dbg_info;
159 #endif
160 	uint8_t	in_use:1,
161 		unmapped:1,
162 		in_err_state:1,
163 		has_reuse_nbuf:1;
164 };
165 
166 #ifndef QCA_HOST_MODE_WIFI_DISABLED
167 #ifdef ATH_RX_PRI_SAVE
168 #define DP_RX_TID_SAVE(_nbuf, _tid) \
169 	(qdf_nbuf_set_priority(_nbuf, _tid))
170 #else
171 #define DP_RX_TID_SAVE(_nbuf, _tid)
172 #endif
173 
174 /* RX Descriptor Multi Page memory alloc related */
175 #define DP_RX_DESC_OFFSET_NUM_BITS 8
176 #define DP_RX_DESC_PAGE_ID_NUM_BITS 8
177 #define DP_RX_DESC_POOL_ID_NUM_BITS 4
178 
179 #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS
180 #define DP_RX_DESC_POOL_ID_SHIFT \
181 		(DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS)
182 #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \
183 	(((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT)
184 #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK	\
185 			(((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \
186 			 DP_RX_DESC_PAGE_ID_SHIFT)
187 #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \
188 			((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1)
189 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie)		\
190 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >>	\
191 			DP_RX_DESC_POOL_ID_SHIFT)
192 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie)		\
193 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >>	\
194 			DP_RX_DESC_PAGE_ID_SHIFT)
195 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie)		\
196 	((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK)
197 
198 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
199 
200 #define RX_DESC_COOKIE_INDEX_SHIFT		0
201 #define RX_DESC_COOKIE_INDEX_MASK		0x3ffff /* 18 bits */
202 #define RX_DESC_COOKIE_POOL_ID_SHIFT		18
203 #define RX_DESC_COOKIE_POOL_ID_MASK		0x1c0000
204 
205 #define DP_RX_DESC_COOKIE_MAX	\
206 	(RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK)
207 
208 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie)		\
209 	(((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >>	\
210 			RX_DESC_COOKIE_POOL_ID_SHIFT)
211 
212 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie)		\
213 	(((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >>	\
214 			RX_DESC_COOKIE_INDEX_SHIFT)
215 
216 #define dp_rx_add_to_free_desc_list(head, tail, new) \
217 	__dp_rx_add_to_free_desc_list(head, tail, new, __func__)
218 
219 #define dp_rx_add_to_free_desc_list_reuse(head, tail, new) \
220 	__dp_rx_add_to_free_desc_list_reuse(head, tail, new, __func__)
221 
222 #define dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
223 				num_buffers, desc_list, tail, req_only) \
224 	__dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
225 				  num_buffers, desc_list, tail, req_only, \
226 				  __func__)
227 
228 #ifdef WLAN_SUPPORT_RX_FISA
229 /**
230  * dp_rx_set_hdr_pad() - set l3 padding in nbuf cb
231  * @nbuf: pkt skb pointer
232  * @l3_padding: l3 padding
233  *
234  * Return: None
235  */
236 static inline
237 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
238 {
239 	QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
240 }
241 #else
242 static inline
243 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
244 {
245 }
246 #endif
247 
248 #ifdef DP_RX_SPECIAL_FRAME_NEED
249 /**
250  * dp_rx_is_special_frame() - check is RX frame special needed
251  *
252  * @nbuf: RX skb pointer
253  * @frame_mask: the mask for special frame needed
254  *
255  * Check is RX frame wanted matched with mask
256  *
257  * Return: true - special frame needed, false - no
258  */
259 static inline
260 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
261 {
262 	if (((frame_mask & FRAME_MASK_IPV4_ARP) &&
263 	     qdf_nbuf_is_ipv4_arp_pkt(nbuf)) ||
264 	    ((frame_mask & FRAME_MASK_IPV4_DHCP) &&
265 	     qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) ||
266 	    ((frame_mask & FRAME_MASK_IPV4_EAPOL) &&
267 	     qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) ||
268 	    ((frame_mask & FRAME_MASK_IPV6_DHCP) &&
269 	     qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))
270 		return true;
271 
272 	return false;
273 }
274 
275 /**
276  * dp_rx_deliver_special_frame() - Deliver the RX special frame to stack
277  *				   if matches mask
278  *
279  * @soc: Datapath soc handler
280  * @peer: pointer to DP peer
281  * @nbuf: pointer to the skb of RX frame
282  * @frame_mask: the mask for special frame needed
283  * @rx_tlv_hdr: start of rx tlv header
284  *
285  * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
286  * single nbuf is expected.
287  *
288  * Return: true - nbuf has been delivered to stack, false - not.
289  */
290 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer,
291 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
292 				 uint8_t *rx_tlv_hdr);
293 #else
294 static inline
295 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
296 {
297 	return false;
298 }
299 
300 static inline
301 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer,
302 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
303 				 uint8_t *rx_tlv_hdr)
304 {
305 	return false;
306 }
307 #endif
308 
309 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
310 /**
311  * dp_rx_data_is_specific() - Used to exclude specific frames
312  *                            not practical for getting rx
313  *                            stats like rate, mcs, nss, etc.
314  *
315  * @hal_soc_hdl: soc handler
316  * @rx_tlv_hdr: rx tlv header
317  * @nbuf: RX skb pointer
318  *
319  * Return: true - a specific frame  not suitable
320  *                for getting rx stats from it.
321  *         false - a common frame suitable for
322  *                 getting rx stats from it.
323  */
324 static inline
325 bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl,
326 			    uint8_t *rx_tlv_hdr,
327 			    qdf_nbuf_t nbuf)
328 {
329 	if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf)))
330 		return true;
331 
332 	if (!hal_rx_tlv_first_mpdu_get(hal_soc_hdl, rx_tlv_hdr))
333 		return true;
334 
335 	if (!hal_rx_msdu_end_first_msdu_get(hal_soc_hdl, rx_tlv_hdr))
336 		return true;
337 
338 	/* ARP, EAPOL is neither IPV6 ETH nor IPV4 ETH from L3 level */
339 	if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) ==
340 	    QDF_NBUF_TRAC_IPV4_ETH_TYPE)) {
341 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
342 			return true;
343 	} else if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) ==
344 		   QDF_NBUF_TRAC_IPV6_ETH_TYPE)) {
345 		if (qdf_nbuf_is_ipv6_dhcp_pkt(nbuf))
346 			return true;
347 	} else {
348 		return true;
349 	}
350 	return false;
351 }
352 #else
353 static inline
354 bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl,
355 			    uint8_t *rx_tlv_hdr,
356 			    qdf_nbuf_t nbuf)
357 
358 {
359 	/*
360 	 * default return is true to make sure that rx stats
361 	 * will not be handled when this feature is disabled
362 	 */
363 	return true;
364 }
365 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */
366 
367 #ifndef QCA_HOST_MODE_WIFI_DISABLED
368 #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING
369 static inline
370 bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer,
371 				 qdf_nbuf_t nbuf, uint8_t link_id)
372 {
373 	if (ta_txrx_peer->vdev->opmode == wlan_op_mode_ndi &&
374 	    qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) {
375 		DP_PEER_PER_PKT_STATS_INC(ta_txrx_peer,
376 					  rx.intra_bss.mdns_no_fwd,
377 					  1, link_id);
378 		return false;
379 	}
380 	return true;
381 }
382 #else
383 static inline
384 bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer,
385 				 qdf_nbuf_t nbuf, uint8_t link_id)
386 {
387 	return true;
388 }
389 #endif
390 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
391 
392 /* DOC: Offset to obtain LLC hdr
393  *
394  * In the case of Wifi parse error
395  * to reach LLC header from beginning
396  * of VLAN tag we need to skip 8 bytes.
397  * Vlan_tag(4)+length(2)+length added
398  * by HW(2) = 8 bytes.
399  */
400 #define DP_SKIP_VLAN		8
401 
402 #ifndef QCA_HOST_MODE_WIFI_DISABLED
403 
404 /**
405  * struct dp_rx_cached_buf - rx cached buffer
406  * @node: linked list node
407  * @buf: skb buffer
408  */
409 struct dp_rx_cached_buf {
410 	qdf_list_node_t node;
411 	qdf_nbuf_t buf;
412 };
413 
414 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
415 
416 /**
417  * dp_rx_xor_block() - xor block of data
418  * @b: destination data block
419  * @a: source data block
420  * @len: length of the data to process
421  *
422  * Return: None
423  */
424 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
425 {
426 	qdf_size_t i;
427 
428 	for (i = 0; i < len; i++)
429 		b[i] ^= a[i];
430 }
431 
432 /**
433  * dp_rx_rotl() - rotate the bits left
434  * @val: unsigned integer input value
435  * @bits: number of bits
436  *
437  * Return: Integer with left rotated by number of 'bits'
438  */
439 static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
440 {
441 	return (val << bits) | (val >> (32 - bits));
442 }
443 
444 /**
445  * dp_rx_rotr() - rotate the bits right
446  * @val: unsigned integer input value
447  * @bits: number of bits
448  *
449  * Return: Integer with right rotated by number of 'bits'
450  */
451 static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
452 {
453 	return (val >> bits) | (val << (32 - bits));
454 }
455 
456 /**
457  * dp_set_rx_queue() - set queue_mapping in skb
458  * @nbuf: skb
459  * @queue_id: rx queue_id
460  *
461  * Return: void
462  */
463 #ifdef QCA_OL_RX_MULTIQ_SUPPORT
464 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
465 {
466 	qdf_nbuf_record_rx_queue(nbuf, queue_id);
467 	return;
468 }
469 #else
470 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
471 {
472 }
473 #endif
474 
475 /**
476  * dp_rx_xswap() - swap the bits left
477  * @val: unsigned integer input value
478  *
479  * Return: Integer with bits swapped
480  */
481 static inline uint32_t dp_rx_xswap(uint32_t val)
482 {
483 	return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
484 }
485 
486 /**
487  * dp_rx_get_le32_split() - get little endian 32 bits split
488  * @b0: byte 0
489  * @b1: byte 1
490  * @b2: byte 2
491  * @b3: byte 3
492  *
493  * Return: Integer with split little endian 32 bits
494  */
495 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
496 					uint8_t b3)
497 {
498 	return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
499 }
500 
501 /**
502  * dp_rx_get_le32() - get little endian 32 bits
503  * @p: source 32-bit value
504  *
505  * Return: Integer with little endian 32 bits
506  */
507 static inline uint32_t dp_rx_get_le32(const uint8_t *p)
508 {
509 	return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
510 }
511 
512 /**
513  * dp_rx_put_le32() - put little endian 32 bits
514  * @p: destination char array
515  * @v: source 32-bit integer
516  *
517  * Return: None
518  */
519 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
520 {
521 	p[0] = (v) & 0xff;
522 	p[1] = (v >> 8) & 0xff;
523 	p[2] = (v >> 16) & 0xff;
524 	p[3] = (v >> 24) & 0xff;
525 }
526 
527 /* Extract michal mic block of data */
528 #define dp_rx_michael_block(l, r)	\
529 	do {					\
530 		r ^= dp_rx_rotl(l, 17);	\
531 		l += r;				\
532 		r ^= dp_rx_xswap(l);		\
533 		l += r;				\
534 		r ^= dp_rx_rotl(l, 3);	\
535 		l += r;				\
536 		r ^= dp_rx_rotr(l, 2);	\
537 		l += r;				\
538 	} while (0)
539 
540 /**
541  * struct dp_rx_desc_list_elem_t
542  *
543  * @next: Next pointer to form free list
544  * @rx_desc: DP Rx descriptor
545  */
546 union dp_rx_desc_list_elem_t {
547 	union dp_rx_desc_list_elem_t *next;
548 	struct dp_rx_desc rx_desc;
549 };
550 
551 #ifdef RX_DESC_MULTI_PAGE_ALLOC
552 /**
553  * dp_rx_desc_find() - find dp rx descriptor from page ID and offset
554  * @page_id: Page ID
555  * @offset: Offset of the descriptor element
556  * @rx_pool: RX pool
557  *
558  * Return: RX descriptor element
559  */
560 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
561 					      struct rx_desc_pool *rx_pool);
562 
563 static inline
564 struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc,
565 					      struct rx_desc_pool *pool,
566 					      uint32_t cookie)
567 {
568 	uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
569 	uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
570 	uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
571 	struct rx_desc_pool *rx_desc_pool;
572 	union dp_rx_desc_list_elem_t *rx_desc_elem;
573 
574 	if (qdf_unlikely(pool_id >= MAX_PDEV_CNT))
575 		return NULL;
576 
577 	rx_desc_pool = &pool[pool_id];
578 	rx_desc_elem = (union dp_rx_desc_list_elem_t *)
579 		(rx_desc_pool->desc_pages.cacheable_pages[page_id] +
580 		rx_desc_pool->elem_size * offset);
581 
582 	return &rx_desc_elem->rx_desc;
583 }
584 
585 static inline
586 struct dp_rx_desc *dp_get_rx_mon_status_desc_from_cookie(struct dp_soc *soc,
587 							 struct rx_desc_pool *pool,
588 							 uint32_t cookie)
589 {
590 	uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
591 	uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
592 	uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
593 	struct rx_desc_pool *rx_desc_pool;
594 	union dp_rx_desc_list_elem_t *rx_desc_elem;
595 
596 	if (qdf_unlikely(pool_id >= NUM_RXDMA_RINGS_PER_PDEV))
597 		return NULL;
598 
599 	rx_desc_pool = &pool[pool_id];
600 	rx_desc_elem = (union dp_rx_desc_list_elem_t *)
601 		(rx_desc_pool->desc_pages.cacheable_pages[page_id] +
602 		rx_desc_pool->elem_size * offset);
603 
604 	return &rx_desc_elem->rx_desc;
605 }
606 
607 /**
608  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
609  *			 the Rx descriptor on Rx DMA source ring buffer
610  * @soc: core txrx main context
611  * @cookie: cookie used to lookup virtual address
612  *
613  * Return: Pointer to the Rx descriptor
614  */
615 static inline
616 struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc,
617 					       uint32_t cookie)
618 {
619 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie);
620 }
621 
622 /**
623  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
624  *			 the Rx descriptor on monitor ring buffer
625  * @soc: core txrx main context
626  * @cookie: cookie used to lookup virtual address
627  *
628  * Return: Pointer to the Rx descriptor
629  */
630 static inline
631 struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc,
632 					     uint32_t cookie)
633 {
634 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie);
635 }
636 
637 /**
638  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
639  *			 the Rx descriptor on monitor status ring buffer
640  * @soc: core txrx main context
641  * @cookie: cookie used to lookup virtual address
642  *
643  * Return: Pointer to the Rx descriptor
644  */
645 static inline
646 struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc,
647 						uint32_t cookie)
648 {
649 	return dp_get_rx_mon_status_desc_from_cookie(soc,
650 						     &soc->rx_desc_status[0],
651 						     cookie);
652 }
653 #else
654 
655 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
656 			  uint32_t pool_size,
657 			  struct rx_desc_pool *rx_desc_pool);
658 
659 /**
660  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
661  *			 the Rx descriptor on Rx DMA source ring buffer
662  * @soc: core txrx main context
663  * @cookie: cookie used to lookup virtual address
664  *
665  * Return: void *: Virtual Address of the Rx descriptor
666  */
667 static inline
668 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
669 {
670 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
671 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
672 	struct rx_desc_pool *rx_desc_pool;
673 
674 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
675 		return NULL;
676 
677 	rx_desc_pool = &soc->rx_desc_buf[pool_id];
678 
679 	if (qdf_unlikely(index >= rx_desc_pool->pool_size))
680 		return NULL;
681 
682 	return &rx_desc_pool->array[index].rx_desc;
683 }
684 
685 /**
686  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
687  *			 the Rx descriptor on monitor ring buffer
688  * @soc: core txrx main context
689  * @cookie: cookie used to lookup virtual address
690  *
691  * Return: void *: Virtual Address of the Rx descriptor
692  */
693 static inline
694 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
695 {
696 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
697 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
698 	/* TODO */
699 	/* Add sanity for pool_id & index */
700 	return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
701 }
702 
703 /**
704  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
705  *			 the Rx descriptor on monitor status ring buffer
706  * @soc: core txrx main context
707  * @cookie: cookie used to lookup virtual address
708  *
709  * Return: void *: Virtual Address of the Rx descriptor
710  */
711 static inline
712 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
713 {
714 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
715 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
716 	/* TODO */
717 	/* Add sanity for pool_id & index */
718 	return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
719 }
720 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
721 
722 #ifndef QCA_HOST_MODE_WIFI_DISABLED
723 
724 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
725 {
726 	return vdev->ap_bridge_enabled;
727 }
728 
729 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
730 static inline QDF_STATUS
731 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
732 {
733 	if (qdf_unlikely(HAL_RX_REO_BUF_COOKIE_INVALID_GET(ring_desc)))
734 		return QDF_STATUS_E_FAILURE;
735 
736 	HAL_RX_REO_BUF_COOKIE_INVALID_SET(ring_desc);
737 	return QDF_STATUS_SUCCESS;
738 }
739 
740 /**
741  * dp_rx_cookie_reset_invalid_bit() - Reset the invalid bit of the cookie
742  *  field in ring descriptor
743  * @ring_desc: ring descriptor
744  *
745  * Return: None
746  */
747 static inline void
748 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc)
749 {
750 	HAL_RX_REO_BUF_COOKIE_INVALID_RESET(ring_desc);
751 }
752 #else
753 static inline QDF_STATUS
754 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
755 {
756 	return QDF_STATUS_SUCCESS;
757 }
758 
759 static inline void
760 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc)
761 {
762 }
763 #endif
764 
765 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
766 
767 #if defined(RX_DESC_MULTI_PAGE_ALLOC) && \
768 	defined(DP_WAR_VALIDATE_RX_ERR_MSDU_COOKIE)
769 /**
770  * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid
771  * @soc: dp soc ref
772  * @cookie: Rx buf SW cookie value
773  *
774  * Return: true if cookie is valid else false
775  */
776 static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc,
777 					    uint32_t cookie)
778 {
779 	uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
780 	uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
781 	uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
782 	struct rx_desc_pool *rx_desc_pool;
783 
784 	if (qdf_unlikely(pool_id >= MAX_PDEV_CNT))
785 		goto fail;
786 
787 	rx_desc_pool = &soc->rx_desc_buf[pool_id];
788 
789 	if (page_id >= rx_desc_pool->desc_pages.num_pages ||
790 	    offset >= rx_desc_pool->desc_pages.num_element_per_page)
791 		goto fail;
792 
793 	return true;
794 
795 fail:
796 	DP_STATS_INC(soc, rx.err.invalid_cookie, 1);
797 	return false;
798 }
799 #else
800 /**
801  * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid
802  * @soc: dp soc ref
803  * @cookie: Rx buf SW cookie value
804  *
805  * When multi page alloc is disabled SW cookie validness is
806  * checked while fetching Rx descriptor, so no need to check here
807  *
808  * Return: true if cookie is valid else false
809  */
810 static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc,
811 					    uint32_t cookie)
812 {
813 	return true;
814 }
815 #endif
816 
817 /**
818  * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
819  *					rx descriptor pool
820  * @rx_desc_pool: rx descriptor pool pointer
821  *
822  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
823  *		       QDF_STATUS_E_NOMEM
824  */
825 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool);
826 
827 /**
828  * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
829  *			     descriptors
830  * @soc: core txrx main context
831  * @pool_size: number of rx descriptors (size of the pool)
832  * @rx_desc_pool: rx descriptor pool pointer
833  *
834  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
835  *		       QDF_STATUS_E_NOMEM
836  *		       QDF_STATUS_E_FAULT
837  */
838 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
839 				 uint32_t pool_size,
840 				 struct rx_desc_pool *rx_desc_pool);
841 
842 /**
843  * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
844  * @soc: core txrx main context
845  * @pool_id: pool_id which is one of 3 mac_ids
846  * @pool_size: size of the rx descriptor pool
847  * @rx_desc_pool: rx descriptor pool pointer
848  *
849  * Convert the pool of memory into a list of rx descriptors and create
850  * locks to access this list of rx descriptors.
851  *
852  */
853 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
854 			  uint32_t pool_size,
855 			  struct rx_desc_pool *rx_desc_pool);
856 
857 /**
858  * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
859  *					freelist.
860  * @soc: core txrx main context
861  * @local_desc_list: local desc list provided by the caller
862  * @tail: attach the point to last desc of local desc list
863  * @pool_id: pool_id which is one of 3 mac_ids
864  * @rx_desc_pool: rx descriptor pool pointer
865  */
866 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
867 				union dp_rx_desc_list_elem_t **local_desc_list,
868 				union dp_rx_desc_list_elem_t **tail,
869 				uint16_t pool_id,
870 				struct rx_desc_pool *rx_desc_pool);
871 
872 /**
873  * dp_rx_get_free_desc_list() - provide a list of descriptors from
874  *				the free rx desc pool.
875  * @soc: core txrx main context
876  * @pool_id: pool_id which is one of 3 mac_ids
877  * @rx_desc_pool: rx descriptor pool pointer
878  * @num_descs: number of descs requested from freelist
879  * @desc_list: attach the descs to this list (output parameter)
880  * @tail: attach the point to last desc of free list (output parameter)
881  *
882  * Return: number of descs allocated from free list.
883  */
884 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
885 				struct rx_desc_pool *rx_desc_pool,
886 				uint16_t num_descs,
887 				union dp_rx_desc_list_elem_t **desc_list,
888 				union dp_rx_desc_list_elem_t **tail);
889 
890 /**
891  * dp_rx_pdev_desc_pool_alloc() -  allocate memory for software rx descriptor
892  *				   pool
893  * @pdev: core txrx pdev context
894  *
895  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
896  *			QDF_STATUS_E_NOMEM
897  */
898 QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev);
899 
900 /**
901  * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool
902  * @pdev: core txrx pdev context
903  */
904 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev);
905 
906 /**
907  * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors
908  * @pdev: core txrx pdev context
909  *
910  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
911  *			QDF_STATUS_E_NOMEM
912  */
913 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev);
914 
915 /**
916  * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools
917  * @pdev: core txrx pdev context
918  *
919  * This function resets the freelist of rx descriptors and destroys locks
920  * associated with this list of descriptors.
921  */
922 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev);
923 
924 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
925 			    struct rx_desc_pool *rx_desc_pool,
926 			    uint32_t pool_id);
927 
928 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
929 
930 /**
931  * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring
932  * @pdev: core txrx pdev context
933  *
934  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
935  *			QDF_STATUS_E_NOMEM
936  */
937 QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev);
938 
939 /**
940  * dp_rx_pdev_buffers_free() - Free nbufs (skbs)
941  * @pdev: core txrx pdev context
942  */
943 void dp_rx_pdev_buffers_free(struct dp_pdev *pdev);
944 
945 void dp_rx_pdev_detach(struct dp_pdev *pdev);
946 
947 /**
948  * dp_print_napi_stats() - NAPI stats
949  * @soc: soc handle
950  */
951 void dp_print_napi_stats(struct dp_soc *soc);
952 
953 /**
954  * dp_rx_vdev_detach() - detach vdev from dp rx
955  * @vdev: virtual device instance
956  *
957  * Return: QDF_STATUS_SUCCESS: success
958  *         QDF_STATUS_E_RESOURCES: Error return
959  */
960 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev);
961 
962 #ifndef QCA_HOST_MODE_WIFI_DISABLED
963 
964 uint32_t
965 dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
966 	      uint8_t reo_ring_num,
967 	      uint32_t quota);
968 
969 /**
970  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
971  *		     multiple nbufs.
972  * @soc: core txrx main context
973  * @nbuf: pointer to the first msdu of an amsdu.
974  *
975  * This function implements the creation of RX frag_list for cases
976  * where an MSDU is spread across multiple nbufs.
977  *
978  * Return: returns the head nbuf which contains complete frag_list.
979  */
980 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf);
981 
982 /**
983  * dp_rx_is_sg_supported() - SG packets processing supported or not.
984  *
985  * Return: returns true when processing is supported else false.
986  */
987 bool dp_rx_is_sg_supported(void);
988 
989 /**
990  * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during
991  *				     de-initialization of wifi module.
992  *
993  * @soc: core txrx main context
994  * @pool_id: pool_id which is one of 3 mac_ids
995  * @rx_desc_pool: rx descriptor pool pointer
996  *
997  * Return: None
998  */
999 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
1000 				   struct rx_desc_pool *rx_desc_pool);
1001 
1002 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1003 
1004 /**
1005  * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during
1006  *			    de-initialization of wifi module.
1007  *
1008  * @soc: core txrx main context
1009  * @rx_desc_pool: rx descriptor pool pointer
1010  * @is_mon_pool: true if this is a monitor pool
1011  *
1012  * Return: None
1013  */
1014 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
1015 			  struct rx_desc_pool *rx_desc_pool,
1016 			  bool is_mon_pool);
1017 
1018 #ifdef DP_RX_MON_MEM_FRAG
1019 /**
1020  * dp_rx_desc_frag_free() - free the sw rx desc frag called during
1021  *			    de-initialization of wifi module.
1022  *
1023  * @soc: core txrx main context
1024  * @rx_desc_pool: rx descriptor pool pointer
1025  *
1026  * Return: None
1027  */
1028 void dp_rx_desc_frag_free(struct dp_soc *soc,
1029 			  struct rx_desc_pool *rx_desc_pool);
1030 #else
1031 static inline
1032 void dp_rx_desc_frag_free(struct dp_soc *soc,
1033 			  struct rx_desc_pool *rx_desc_pool)
1034 {
1035 }
1036 #endif
1037 /**
1038  * dp_rx_desc_pool_free() - free the sw rx desc array called during
1039  *			    de-initialization of wifi module.
1040  *
1041  * @soc: core txrx main context
1042  * @rx_desc_pool: rx descriptor pool pointer
1043  *
1044  * Return: None
1045  */
1046 void dp_rx_desc_pool_free(struct dp_soc *soc,
1047 			  struct rx_desc_pool *rx_desc_pool);
1048 
1049 /**
1050  * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
1051  *				pkts to RAW mode simulation to
1052  *				decapsulate the pkt.
1053  * @vdev: vdev on which RAW mode is enabled
1054  * @nbuf_list: list of RAW pkts to process
1055  * @peer: peer object from which the pkt is rx
1056  * @link_id: link Id on which the packet is received
1057  *
1058  * Return: void
1059  */
1060 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
1061 		       struct dp_txrx_peer *peer, uint8_t link_id);
1062 
1063 #ifdef RX_DESC_LOGGING
1064 /**
1065  * dp_rx_desc_alloc_dbg_info() - Alloc memory for rx descriptor debug
1066  *  structure
1067  * @rx_desc: rx descriptor pointer
1068  *
1069  * Return: None
1070  */
1071 static inline
1072 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
1073 {
1074 	rx_desc->dbg_info = qdf_mem_malloc(sizeof(struct dp_rx_desc_dbg_info));
1075 }
1076 
1077 /**
1078  * dp_rx_desc_free_dbg_info() - Free rx descriptor debug
1079  *  structure memory
1080  * @rx_desc: rx descriptor pointer
1081  *
1082  * Return: None
1083  */
1084 static inline
1085 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
1086 {
1087 	qdf_mem_free(rx_desc->dbg_info);
1088 }
1089 
1090 /**
1091  * dp_rx_desc_update_dbg_info() - Update rx descriptor debug info
1092  *  structure memory
1093  * @rx_desc: rx descriptor pointer
1094  * @func_name: name of calling function
1095  * @flag:
1096  *
1097  * Return: None
1098  */
1099 static
1100 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
1101 				const char *func_name, uint8_t flag)
1102 {
1103 	struct dp_rx_desc_dbg_info *info = rx_desc->dbg_info;
1104 
1105 	if (!info)
1106 		return;
1107 
1108 	if (flag == RX_DESC_REPLENISHED) {
1109 		qdf_str_lcopy(info->replenish_caller, func_name,
1110 			      QDF_MEM_FUNC_NAME_SIZE);
1111 		info->replenish_ts = qdf_get_log_timestamp();
1112 	} else {
1113 		qdf_str_lcopy(info->freelist_caller, func_name,
1114 			      QDF_MEM_FUNC_NAME_SIZE);
1115 		info->freelist_ts = qdf_get_log_timestamp();
1116 		info->prev_nbuf = rx_desc->nbuf;
1117 		info->prev_nbuf_data_addr = rx_desc->nbuf_data_addr;
1118 		rx_desc->nbuf_data_addr = NULL;
1119 	}
1120 }
1121 #else
1122 
1123 static inline
1124 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
1125 {
1126 }
1127 
1128 static inline
1129 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
1130 {
1131 }
1132 
1133 static inline
1134 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
1135 				const char *func_name, uint8_t flag)
1136 {
1137 }
1138 #endif /* RX_DESC_LOGGING */
1139 
1140 /**
1141  * __dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
1142  *
1143  * @head: pointer to the head of local free list
1144  * @tail: pointer to the tail of local free list
1145  * @new: new descriptor that is added to the free list
1146  * @func_name: caller func name
1147  *
1148  * Return: void:
1149  */
1150 static inline
1151 void __dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
1152 				 union dp_rx_desc_list_elem_t **tail,
1153 				 struct dp_rx_desc *new, const char *func_name)
1154 {
1155 	qdf_assert(head && new);
1156 
1157 	dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST);
1158 
1159 	new->nbuf = NULL;
1160 	new->in_use = 0;
1161 
1162 	((union dp_rx_desc_list_elem_t *)new)->next = *head;
1163 	*head = (union dp_rx_desc_list_elem_t *)new;
1164 	/* reset tail if head->next is NULL */
1165 	if (!*tail || !(*head)->next)
1166 		*tail = *head;
1167 }
1168 
1169 /**
1170  * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
1171  * @soc: DP SOC handle
1172  * @nbuf: network buffer
1173  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
1174  * pool_id has same mapping)
1175  *
1176  * Return: integer type
1177  */
1178 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
1179 				   uint8_t mac_id);
1180 
1181 /**
1182  * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
1183  * @soc: DP SOC handle
1184  * @mpdu: mpdu for which peer is invalid
1185  * @mpdu_done: if an mpdu is completed
1186  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
1187  * pool_id has same mapping)
1188  *
1189  * Return: integer type
1190  */
1191 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
1192 		qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id);
1193 
1194 #define DP_RX_HEAD_APPEND(head, elem) \
1195 	do {                                                            \
1196 		qdf_nbuf_set_next((elem), (head));			\
1197 		(head) = (elem);                                        \
1198 	} while (0)
1199 
1200 
1201 #define DP_RX_LIST_APPEND(head, tail, elem) \
1202 	do {                                                          \
1203 		if (!(head)) {                                        \
1204 			(head) = (elem);                              \
1205 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\
1206 		} else {                                              \
1207 			qdf_nbuf_set_next((tail), (elem));            \
1208 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++;  \
1209 		}                                                     \
1210 		(tail) = (elem);                                      \
1211 		qdf_nbuf_set_next((tail), NULL);                      \
1212 	} while (0)
1213 
1214 #define DP_RX_MERGE_TWO_LIST(phead, ptail, chead, ctail) \
1215 	do {                                                          \
1216 		if (!(phead)) {                                       \
1217 			(phead) = (chead);                            \
1218 		} else {                                              \
1219 			qdf_nbuf_set_next((ptail), (chead));          \
1220 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(phead) += \
1221 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(chead);   \
1222 		}                                                     \
1223 		(ptail) = (ctail);                                    \
1224 		qdf_nbuf_set_next((ptail), NULL);                     \
1225 	} while (0)
1226 
1227 #if defined(QCA_PADDR_CHECK_ON_3TH_PLATFORM)
1228 /*
1229  * on some third-party platform, the memory below 0x2000
1230  * is reserved for target use, so any memory allocated in this
1231  * region should not be used by host
1232  */
1233 #define MAX_RETRY 50
1234 #define DP_PHY_ADDR_RESERVED	0x2000
1235 #elif defined(BUILD_X86)
1236 /*
1237  * in M2M emulation platforms (x86) the memory below 0x50000000
1238  * is reserved for target use, so any memory allocated in this
1239  * region should not be used by host
1240  */
1241 #define MAX_RETRY 100
1242 #define DP_PHY_ADDR_RESERVED	0x50000000
1243 #endif
1244 
1245 #if defined(QCA_PADDR_CHECK_ON_3TH_PLATFORM) || defined(BUILD_X86)
1246 /**
1247  * dp_check_paddr() - check if current phy address is valid or not
1248  * @dp_soc: core txrx main context
1249  * @rx_netbuf: skb buffer
1250  * @paddr: physical address
1251  * @rx_desc_pool: struct of rx descriptor pool
1252  * check if the physical address of the nbuf->data is less
1253  * than DP_PHY_ADDR_RESERVED then free the nbuf and try
1254  * allocating new nbuf. We can try for 100 times.
1255  *
1256  * This is a temp WAR till we fix it properly.
1257  *
1258  * Return: success or failure.
1259  */
1260 static inline
1261 int dp_check_paddr(struct dp_soc *dp_soc,
1262 		   qdf_nbuf_t *rx_netbuf,
1263 		   qdf_dma_addr_t *paddr,
1264 		   struct rx_desc_pool *rx_desc_pool)
1265 {
1266 	uint32_t nbuf_retry = 0;
1267 	int32_t ret;
1268 
1269 	if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED))
1270 		return QDF_STATUS_SUCCESS;
1271 
1272 	do {
1273 		dp_debug("invalid phy addr 0x%llx, trying again",
1274 			 (uint64_t)(*paddr));
1275 		nbuf_retry++;
1276 		if ((*rx_netbuf)) {
1277 			/* Not freeing buffer intentionally.
1278 			 * Observed that same buffer is getting
1279 			 * re-allocated resulting in longer load time
1280 			 * WMI init timeout.
1281 			 * This buffer is anyway not useful so skip it.
1282 			 *.Add such buffer to invalid list and free
1283 			 *.them when driver unload.
1284 			 **/
1285 			qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
1286 						     *rx_netbuf,
1287 						     QDF_DMA_FROM_DEVICE,
1288 						     rx_desc_pool->buf_size);
1289 			qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue,
1290 					   *rx_netbuf);
1291 		}
1292 
1293 		*rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
1294 					    rx_desc_pool->buf_size,
1295 					    RX_BUFFER_RESERVATION,
1296 					    rx_desc_pool->buf_alignment,
1297 					    FALSE);
1298 
1299 		if (qdf_unlikely(!(*rx_netbuf)))
1300 			return QDF_STATUS_E_FAILURE;
1301 
1302 		ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
1303 						 *rx_netbuf,
1304 						 QDF_DMA_FROM_DEVICE,
1305 						 rx_desc_pool->buf_size);
1306 
1307 		if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
1308 			qdf_nbuf_free(*rx_netbuf);
1309 			*rx_netbuf = NULL;
1310 			continue;
1311 		}
1312 
1313 		*paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
1314 
1315 		if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED))
1316 			return QDF_STATUS_SUCCESS;
1317 
1318 	} while (nbuf_retry < MAX_RETRY);
1319 
1320 	if ((*rx_netbuf)) {
1321 		qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
1322 					     *rx_netbuf,
1323 					     QDF_DMA_FROM_DEVICE,
1324 					     rx_desc_pool->buf_size);
1325 		qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue,
1326 				   *rx_netbuf);
1327 	}
1328 
1329 	return QDF_STATUS_E_FAILURE;
1330 }
1331 
1332 #else
1333 static inline
1334 int dp_check_paddr(struct dp_soc *dp_soc,
1335 		   qdf_nbuf_t *rx_netbuf,
1336 		   qdf_dma_addr_t *paddr,
1337 		   struct rx_desc_pool *rx_desc_pool)
1338 {
1339 	return QDF_STATUS_SUCCESS;
1340 }
1341 
1342 #endif
1343 
1344 /**
1345  * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
1346  *				   the MSDU Link Descriptor
1347  * @soc: core txrx main context
1348  * @buf_info: buf_info includes cookie that is used to lookup
1349  * virtual address of link descriptor after deriving the page id
1350  * and the offset or index of the desc on the associatde page.
1351  *
1352  * This is the VA of the link descriptor, that HAL layer later uses to
1353  * retrieve the list of MSDU's for a given MPDU.
1354  *
1355  * Return: void *: Virtual Address of the Rx descriptor
1356  */
1357 static inline
1358 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
1359 				  struct hal_buf_info *buf_info)
1360 {
1361 	void *link_desc_va;
1362 	struct qdf_mem_multi_page_t *pages;
1363 	uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie);
1364 
1365 	pages = &soc->link_desc_pages;
1366 	if (!pages)
1367 		return NULL;
1368 	if (qdf_unlikely(page_id >= pages->num_pages))
1369 		return NULL;
1370 	link_desc_va = pages->dma_pages[page_id].page_v_addr_start +
1371 		(buf_info->paddr - pages->dma_pages[page_id].page_p_addr);
1372 	return link_desc_va;
1373 }
1374 
1375 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1376 #ifdef DISABLE_EAPOL_INTRABSS_FWD
1377 #ifdef WLAN_FEATURE_11BE_MLO
1378 static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev,
1379 						qdf_nbuf_t nbuf)
1380 {
1381 	struct qdf_mac_addr *self_mld_mac_addr =
1382 				(struct qdf_mac_addr *)vdev->mld_mac_addr.raw;
1383 	return qdf_is_macaddr_equal(self_mld_mac_addr,
1384 				    (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) +
1385 				    QDF_NBUF_DEST_MAC_OFFSET);
1386 }
1387 #else
1388 static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev,
1389 						qdf_nbuf_t nbuf)
1390 {
1391 	return false;
1392 }
1393 #endif
1394 
1395 static inline bool dp_nbuf_dst_addr_is_self_addr(struct dp_vdev *vdev,
1396 						 qdf_nbuf_t nbuf)
1397 {
1398 	return qdf_is_macaddr_equal((struct qdf_mac_addr *)vdev->mac_addr.raw,
1399 				    (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) +
1400 				    QDF_NBUF_DEST_MAC_OFFSET);
1401 }
1402 
1403 /**
1404  * dp_rx_intrabss_eapol_drop_check() - API For EAPOL
1405  *  pkt with DA not equal to vdev mac addr, fwd is not allowed.
1406  * @soc: core txrx main context
1407  * @ta_txrx_peer: source peer entry
1408  * @rx_tlv_hdr: start address of rx tlvs
1409  * @nbuf: nbuf that has to be intrabss forwarded
1410  *
1411  * Return: true if it is forwarded else false
1412  */
1413 static inline
1414 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc,
1415 				     struct dp_txrx_peer *ta_txrx_peer,
1416 				     uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf)
1417 {
1418 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) &&
1419 			 !(dp_nbuf_dst_addr_is_self_addr(ta_txrx_peer->vdev,
1420 							 nbuf) ||
1421 			   dp_nbuf_dst_addr_is_mld_addr(ta_txrx_peer->vdev,
1422 							nbuf)))) {
1423 		qdf_nbuf_free(nbuf);
1424 		DP_STATS_INC(soc, rx.err.intrabss_eapol_drop, 1);
1425 		return true;
1426 	}
1427 
1428 	return false;
1429 }
1430 #else /* DISABLE_EAPOL_INTRABSS_FWD */
1431 
1432 static inline
1433 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc,
1434 				     struct dp_txrx_peer *ta_txrx_peer,
1435 				     uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf)
1436 {
1437 	return false;
1438 }
1439 #endif /* DISABLE_EAPOL_INTRABSS_FWD */
1440 
1441 /**
1442  * dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets
1443  * @soc: core txrx main context
1444  * @ta_peer: source peer entry
1445  * @rx_tlv_hdr: start address of rx tlvs
1446  * @nbuf: nbuf that has to be intrabss forwarded
1447  * @tid_stats: tid stats pointer
1448  * @link_id: link Id on which packet is received
1449  *
1450  * Return: bool: true if it is forwarded else false
1451  */
1452 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc,
1453 			     struct dp_txrx_peer *ta_peer,
1454 			     uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
1455 			     struct cdp_tid_rx_stats *tid_stats,
1456 			     uint8_t link_id);
1457 
1458 /**
1459  * dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets
1460  * @soc: core txrx main context
1461  * @ta_peer: source peer entry
1462  * @tx_vdev_id: VDEV ID for Intra-BSS TX
1463  * @rx_tlv_hdr: start address of rx tlvs
1464  * @nbuf: nbuf that has to be intrabss forwarded
1465  * @tid_stats: tid stats pointer
1466  * @link_id: link Id on which packet is received
1467  *
1468  * Return: bool: true if it is forwarded else false
1469  */
1470 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc,
1471 			      struct dp_txrx_peer *ta_peer,
1472 			      uint8_t tx_vdev_id,
1473 			      uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
1474 			      struct cdp_tid_rx_stats *tid_stats,
1475 			      uint8_t link_id);
1476 
1477 /**
1478  * dp_rx_defrag_concat() - Concatenate the fragments
1479  *
1480  * @dst: destination pointer to the buffer
1481  * @src: source pointer from where the fragment payload is to be copied
1482  *
1483  * Return: QDF_STATUS
1484  */
1485 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
1486 {
1487 	/*
1488 	 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
1489 	 * to provide space for src, the headroom portion is copied from
1490 	 * the original dst buffer to the larger new dst buffer.
1491 	 * (This is needed, because the headroom of the dst buffer
1492 	 * contains the rx desc.)
1493 	 */
1494 	if (!qdf_nbuf_cat(dst, src)) {
1495 		/*
1496 		 * qdf_nbuf_cat does not free the src memory.
1497 		 * Free src nbuf before returning
1498 		 * For failure case the caller takes of freeing the nbuf
1499 		 */
1500 		qdf_nbuf_free(src);
1501 		return QDF_STATUS_SUCCESS;
1502 	}
1503 
1504 	return QDF_STATUS_E_DEFRAG_ERROR;
1505 }
1506 
1507 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1508 
1509 #ifndef FEATURE_WDS
1510 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
1511 		    struct dp_txrx_peer *ta_txrx_peer, qdf_nbuf_t nbuf);
1512 
1513 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
1514 {
1515 	return QDF_STATUS_SUCCESS;
1516 }
1517 
1518 static inline void
1519 dp_rx_wds_srcport_learn(struct dp_soc *soc,
1520 			uint8_t *rx_tlv_hdr,
1521 			struct dp_txrx_peer *txrx_peer,
1522 			qdf_nbuf_t nbuf,
1523 			struct hal_rx_msdu_metadata msdu_metadata)
1524 {
1525 }
1526 
1527 static inline void
1528 dp_rx_ipa_wds_srcport_learn(struct dp_soc *soc,
1529 			    struct dp_peer *ta_peer, qdf_nbuf_t nbuf,
1530 			    struct hal_rx_msdu_metadata msdu_end_info,
1531 			    bool ad4_valid, bool chfrag_start)
1532 {
1533 }
1534 #endif
1535 
1536 /**
1537  * dp_rx_desc_dump() - dump the sw rx descriptor
1538  *
1539  * @rx_desc: sw rx descriptor
1540  */
1541 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc)
1542 {
1543 	dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d",
1544 		rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id,
1545 		rx_desc->in_use, rx_desc->unmapped);
1546 }
1547 
1548 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1549 
1550 /**
1551  * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
1552  *					In qwrap mode, packets originated from
1553  *					any vdev should not loopback and
1554  *					should be dropped.
1555  * @vdev: vdev on which rx packet is received
1556  * @nbuf: rx pkt
1557  *
1558  */
1559 #if ATH_SUPPORT_WRAP
1560 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1561 						qdf_nbuf_t nbuf)
1562 {
1563 	struct dp_vdev *psta_vdev;
1564 	struct dp_pdev *pdev = vdev->pdev;
1565 	uint8_t *data = qdf_nbuf_data(nbuf);
1566 
1567 	if (qdf_unlikely(vdev->proxysta_vdev)) {
1568 		/* In qwrap isolation mode, allow loopback packets as all
1569 		 * packets go to RootAP and Loopback on the mpsta.
1570 		 */
1571 		if (vdev->isolation_vdev)
1572 			return false;
1573 		TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
1574 			if (qdf_unlikely(psta_vdev->proxysta_vdev &&
1575 					 !qdf_mem_cmp(psta_vdev->mac_addr.raw,
1576 						      &data[QDF_MAC_ADDR_SIZE],
1577 						      QDF_MAC_ADDR_SIZE))) {
1578 				/* Drop packet if source address is equal to
1579 				 * any of the vdev addresses.
1580 				 */
1581 				return true;
1582 			}
1583 		}
1584 	}
1585 	return false;
1586 }
1587 #else
1588 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1589 						qdf_nbuf_t nbuf)
1590 {
1591 	return false;
1592 }
1593 #endif
1594 
1595 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1596 
1597 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
1598 	defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\
1599 	defined(WLAN_SUPPORT_RX_FLOW_TAG)
1600 #include "dp_rx_tag.h"
1601 #endif
1602 
1603 #if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\
1604 	!defined(WLAN_SUPPORT_RX_FLOW_TAG)
1605 /**
1606  * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV
1607  *                              and set the corresponding tag in QDF packet
1608  * @soc: core txrx main context
1609  * @vdev: vdev on which the packet is received
1610  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1611  * @rx_tlv_hdr: rBbase address where the RX TLVs starts
1612  * @ring_index: REO ring number, not used for error & monitor ring
1613  * @is_reo_exception: flag to indicate if rx from REO ring or exception ring
1614  * @is_update_stats: flag to indicate whether to update stats or not
1615  *
1616  * Return: void
1617  */
1618 static inline void
1619 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1620 			  qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
1621 			  uint16_t ring_index,
1622 			  bool is_reo_exception, bool is_update_stats)
1623 {
1624 }
1625 #endif
1626 
1627 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
1628 /**
1629  * dp_rx_err_cce_drop() - Reads CCE metadata from the RX MSDU end TLV
1630  *                        and returns whether cce metadata matches
1631  * @soc: core txrx main context
1632  * @vdev: vdev on which the packet is received
1633  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1634  * @rx_tlv_hdr: rBbase address where the RX TLVs starts
1635  *
1636  * Return: bool
1637  */
1638 static inline bool
1639 dp_rx_err_cce_drop(struct dp_soc *soc, struct dp_vdev *vdev,
1640 		   qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
1641 {
1642 	return false;
1643 }
1644 
1645 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
1646 
1647 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
1648 /**
1649  * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV
1650  *                           and set the corresponding tag in QDF packet
1651  * @soc: core txrx main context
1652  * @vdev: vdev on which the packet is received
1653  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1654  * @rx_tlv_hdr: base address where the RX TLVs starts
1655  * @update_stats: flag to indicate whether to update stats or not
1656  *
1657  * Return: void
1658  */
1659 static inline void
1660 dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1661 		      qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats)
1662 {
1663 }
1664 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
1665 
1666 #define CRITICAL_BUFFER_THRESHOLD	64
1667 /**
1668  * __dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
1669  *			       called during dp rx initialization
1670  *			       and at the end of dp_rx_process.
1671  *
1672  * @dp_soc: core txrx main context
1673  * @mac_id: mac_id which is one of 3 mac_ids
1674  * @dp_rxdma_srng: dp rxdma circular ring
1675  * @rx_desc_pool: Pointer to free Rx descriptor pool
1676  * @num_req_buffers: number of buffer to be replenished
1677  * @desc_list: list of descs if called from dp_rx_process
1678  *	       or NULL during dp rx initialization or out of buffer
1679  *	       interrupt.
1680  * @tail: tail of descs list
1681  * @req_only: If true don't replenish more than req buffers
1682  * @func_name: name of the caller function
1683  *
1684  * Return: return success or failure
1685  */
1686 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1687 				 struct dp_srng *dp_rxdma_srng,
1688 				 struct rx_desc_pool *rx_desc_pool,
1689 				 uint32_t num_req_buffers,
1690 				 union dp_rx_desc_list_elem_t **desc_list,
1691 				 union dp_rx_desc_list_elem_t **tail,
1692 				 bool req_only,
1693 				 const char *func_name);
1694 
1695 /**
1696  * __dp_rx_buffers_no_map_replenish() - replenish rxdma ring with rx nbufs
1697  *					use direct APIs to get invalidate
1698  *					and get the physical address of the
1699  *					nbuf instead of map api,called during
1700  *					dp rx initialization and at the end
1701  *					of dp_rx_process.
1702  *
1703  * @dp_soc: core txrx main context
1704  * @mac_id: mac_id which is one of 3 mac_ids
1705  * @dp_rxdma_srng: dp rxdma circular ring
1706  * @rx_desc_pool: Pointer to free Rx descriptor pool
1707  * @num_req_buffers: number of buffer to be replenished
1708  * @desc_list: list of descs if called from dp_rx_process
1709  *	       or NULL during dp rx initialization or out of buffer
1710  *	       interrupt.
1711  * @tail: tail of descs list
1712  *
1713  * Return: return success or failure
1714  */
1715 QDF_STATUS
1716 __dp_rx_buffers_no_map_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1717 				 struct dp_srng *dp_rxdma_srng,
1718 				 struct rx_desc_pool *rx_desc_pool,
1719 				 uint32_t num_req_buffers,
1720 				 union dp_rx_desc_list_elem_t **desc_list,
1721 				 union dp_rx_desc_list_elem_t **tail);
1722 
1723 /**
1724  * __dp_rx_comp2refill_replenish() - replenish rxdma ring with rx nbufs
1725  *					use direct APIs to get invalidate
1726  *					and get the physical address of the
1727  *					nbuf instead of map api,called during
1728  *					dp rx initialization and at the end
1729  *					of dp_rx_process.
1730  *
1731  * @dp_soc: core txrx main context
1732  * @mac_id: mac_id which is one of 3 mac_ids
1733  * @dp_rxdma_srng: dp rxdma circular ring
1734  * @rx_desc_pool: Pointer to free Rx descriptor pool
1735  * @num_req_buffers: number of buffer to be replenished
1736  * @desc_list: list of descs if called from dp_rx_process
1737  *	       or NULL during dp rx initialization or out of buffer
1738  *	       interrupt.
1739  * @tail: tail of descs list
1740  * Return: return success or failure
1741  */
1742 QDF_STATUS
1743 __dp_rx_comp2refill_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1744 			      struct dp_srng *dp_rxdma_srng,
1745 			      struct rx_desc_pool *rx_desc_pool,
1746 			      uint32_t num_req_buffers,
1747 			      union dp_rx_desc_list_elem_t **desc_list,
1748 			      union dp_rx_desc_list_elem_t **tail);
1749 
1750 /**
1751  * __dp_rx_buffers_no_map_lt_replenish() - replenish rxdma ring with rx nbufs
1752  *					use direct APIs to get invalidate
1753  *					and get the physical address of the
1754  *					nbuf instead of map api,called when
1755  *					low threshold interrupt is triggered
1756  *
1757  * @dp_soc: core txrx main context
1758  * @mac_id: mac_id which is one of 3 mac_ids
1759  * @dp_rxdma_srng: dp rxdma circular ring
1760  * @rx_desc_pool: Pointer to free Rx descriptor pool
1761  *
1762  * Return: return success or failure
1763  */
1764 QDF_STATUS
1765 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1766 				    struct dp_srng *dp_rxdma_srng,
1767 				    struct rx_desc_pool *rx_desc_pool);
1768 
1769 /**
1770  * __dp_pdev_rx_buffers_no_map_attach() - replenish rxdma ring with rx nbufs
1771  *					use direct APIs to get invalidate
1772  *					and get the physical address of the
1773  *					nbuf instead of map api,called during
1774  *					dp rx initialization.
1775  *
1776  * @dp_soc: core txrx main context
1777  * @mac_id: mac_id which is one of 3 mac_ids
1778  * @dp_rxdma_srng: dp rxdma circular ring
1779  * @rx_desc_pool: Pointer to free Rx descriptor pool
1780  * @num_req_buffers: number of buffer to be replenished
1781  *
1782  * Return: return success or failure
1783  */
1784 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *dp_soc,
1785 					      uint32_t mac_id,
1786 					      struct dp_srng *dp_rxdma_srng,
1787 					      struct rx_desc_pool *rx_desc_pool,
1788 					      uint32_t num_req_buffers);
1789 
1790 /**
1791  * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs
1792  *                               called during dp rx initialization
1793  *
1794  * @dp_soc: core txrx main context
1795  * @mac_id: mac_id which is one of 3 mac_ids
1796  * @dp_rxdma_srng: dp rxdma circular ring
1797  * @rx_desc_pool: Pointer to free Rx descriptor pool
1798  * @num_req_buffers: number of buffer to be replenished
1799  *
1800  * Return: return success or failure
1801  */
1802 QDF_STATUS
1803 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
1804 			  struct dp_srng *dp_rxdma_srng,
1805 			  struct rx_desc_pool *rx_desc_pool,
1806 			  uint32_t num_req_buffers);
1807 
1808 /**
1809  * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
1810  * @vdev: DP Virtual device handle
1811  * @nbuf: Buffer pointer
1812  * @rx_tlv_hdr: start of rx tlv header
1813  * @txrx_peer: pointer to peer
1814  *
1815  * This function allocated memory for mesh receive stats and fill the
1816  * required stats. Stores the memory address in skb cb.
1817  *
1818  * Return: void
1819  */
1820 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1821 			   uint8_t *rx_tlv_hdr,
1822 			   struct dp_txrx_peer *txrx_peer);
1823 
1824 /**
1825  * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
1826  * @vdev: DP Virtual device handle
1827  * @nbuf: Buffer pointer
1828  * @rx_tlv_hdr: start of rx tlv header
1829  *
1830  * This checks if the received packet is matching any filter out
1831  * catogery and and drop the packet if it matches.
1832  *
1833  * Return: QDF_STATUS_SUCCESS indicates drop,
1834  *         QDF_STATUS_E_FAILURE indicate to not drop
1835  */
1836 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1837 					uint8_t *rx_tlv_hdr);
1838 
1839 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
1840 			   struct dp_txrx_peer *peer);
1841 
1842 /**
1843  * dp_rx_compute_delay() - Compute and fill in all timestamps
1844  *				to pass in correct fields
1845  * @vdev: pdev handle
1846  * @nbuf: network buffer
1847  *
1848  * Return: none
1849  */
1850 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
1851 
1852 #ifdef QCA_PEER_EXT_STATS
1853 
1854 /**
1855  * dp_rx_compute_tid_delay - Compute per TID delay stats
1856  * @stats: TID delay stats to update
1857  * @nbuf: NBuffer
1858  *
1859  * Return: Void
1860  */
1861 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
1862 			     qdf_nbuf_t nbuf);
1863 #endif /* QCA_PEER_EXT_STATS */
1864 
1865 #ifdef WLAN_SUPPORT_PPEDS
1866 static inline
1867 void dp_rx_set_reuse_nbuf(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
1868 {
1869 	rx_desc->reuse_nbuf = nbuf;
1870 	rx_desc->has_reuse_nbuf = true;
1871 }
1872 
1873 /**
1874  * __dp_rx_add_to_free_desc_list_reuse() - Adds to a local free descriptor list
1875  *					   this list will reused
1876  *
1877  * @head: pointer to the head of local free list
1878  * @tail: pointer to the tail of local free list
1879  * @new: new descriptor that is added to the free list
1880  * @func_name: caller func name
1881  *
1882  * Return: void:
1883  */
1884 static inline
1885 void __dp_rx_add_to_free_desc_list_reuse(union dp_rx_desc_list_elem_t **head,
1886 					 union dp_rx_desc_list_elem_t **tail,
1887 					 struct dp_rx_desc *new,
1888 					 const char *func_name)
1889 {
1890 	qdf_assert(head && new);
1891 
1892 	dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST);
1893 
1894 	new->nbuf = NULL;
1895 
1896 	((union dp_rx_desc_list_elem_t *)new)->next = *head;
1897 	*head = (union dp_rx_desc_list_elem_t *)new;
1898 	/* reset tail if head->next is NULL */
1899 	if (!*tail || !(*head)->next)
1900 		*tail = *head;
1901 }
1902 #else
1903 static inline
1904 void dp_rx_set_reuse_nbuf(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
1905 {
1906 }
1907 
1908 static inline
1909 void __dp_rx_add_to_free_desc_list_reuse(union dp_rx_desc_list_elem_t **head,
1910 					 union dp_rx_desc_list_elem_t **tail,
1911 					 struct dp_rx_desc *new,
1912 					 const char *func_name)
1913 {
1914 }
1915 #endif
1916 
1917 #ifdef RX_DESC_DEBUG_CHECK
1918 /**
1919  * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc
1920  * @rx_desc: rx descriptor pointer
1921  *
1922  * Return: true, if magic is correct, else false.
1923  */
1924 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1925 {
1926 	if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC))
1927 		return false;
1928 
1929 	rx_desc->magic = 0;
1930 	return true;
1931 }
1932 
1933 /**
1934  * dp_rx_desc_prep() - prepare rx desc
1935  * @rx_desc: rx descriptor pointer to be prepared
1936  * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
1937  *
1938  * Note: assumption is that we are associating a nbuf which is mapped
1939  *
1940  * Return: none
1941  */
1942 static inline
1943 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
1944 		     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1945 {
1946 	rx_desc->magic = DP_RX_DESC_MAGIC;
1947 	rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
1948 	rx_desc->unmapped = 0;
1949 	rx_desc->nbuf_data_addr = (uint8_t *)qdf_nbuf_data(rx_desc->nbuf);
1950 	dp_rx_set_reuse_nbuf(rx_desc, rx_desc->nbuf);
1951 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
1952 }
1953 
1954 /**
1955  * dp_rx_desc_frag_prep() - prepare rx desc
1956  * @rx_desc: rx descriptor pointer to be prepared
1957  * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
1958  *
1959  * Note: assumption is that we frag address is mapped
1960  *
1961  * Return: none
1962  */
1963 #ifdef DP_RX_MON_MEM_FRAG
1964 static inline
1965 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1966 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1967 {
1968 	rx_desc->magic = DP_RX_DESC_MAGIC;
1969 	rx_desc->rx_buf_start =
1970 		(uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
1971 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
1972 	rx_desc->unmapped = 0;
1973 }
1974 #else
1975 static inline
1976 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1977 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1978 {
1979 }
1980 #endif /* DP_RX_MON_MEM_FRAG */
1981 
1982 /**
1983  * dp_rx_desc_paddr_sanity_check() - paddr sanity for ring desc vs rx_desc
1984  * @rx_desc: rx descriptor
1985  * @ring_paddr: paddr obatined from the ring
1986  *
1987  * Return: QDF_STATUS
1988  */
1989 static inline
1990 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
1991 				   uint64_t ring_paddr)
1992 {
1993 	return (ring_paddr == qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0));
1994 }
1995 #else
1996 
1997 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1998 {
1999 	return true;
2000 }
2001 
2002 static inline
2003 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
2004 		     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
2005 {
2006 	rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
2007 	dp_rx_set_reuse_nbuf(rx_desc, rx_desc->nbuf);
2008 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
2009 	rx_desc->unmapped = 0;
2010 }
2011 
2012 #ifdef DP_RX_MON_MEM_FRAG
2013 static inline
2014 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
2015 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
2016 {
2017 	rx_desc->rx_buf_start =
2018 		(uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
2019 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
2020 	rx_desc->unmapped = 0;
2021 }
2022 #else
2023 static inline
2024 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
2025 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
2026 {
2027 }
2028 #endif /* DP_RX_MON_MEM_FRAG */
2029 
2030 static inline
2031 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
2032 				   uint64_t ring_paddr)
2033 {
2034 	return true;
2035 }
2036 #endif /* RX_DESC_DEBUG_CHECK */
2037 
2038 /**
2039  * dp_rx_enable_mon_dest_frag() - Enable frag processing for
2040  *              monitor destination ring via frag.
2041  * @rx_desc_pool: Rx desc pool
2042  * @is_mon_dest_desc: Is it for monitor dest buffer
2043  *
2044  * Enable this flag only for monitor destination buffer processing
2045  * if DP_RX_MON_MEM_FRAG feature is enabled.
2046  * If flag is set then frag based function will be called for alloc,
2047  * map, prep desc and free ops for desc buffer else normal nbuf based
2048  * function will be called.
2049  *
2050  * Return: None
2051  */
2052 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
2053 				bool is_mon_dest_desc);
2054 
2055 #ifndef QCA_MULTIPASS_SUPPORT
2056 static inline
2057 bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf,
2058 			     uint8_t tid)
2059 {
2060 	return false;
2061 }
2062 #else
2063 /**
2064  * dp_rx_multipass_process - insert vlan tag on frames for traffic separation
2065  * @txrx_peer: DP txrx peer handle
2066  * @nbuf: skb
2067  * @tid: traffic priority
2068  *
2069  * Return: bool: true in case of success else false
2070  * Success is considered if:
2071  *  i. If frame has vlan header
2072  *  ii. If the frame comes from different peer and dont need multipass processing
2073  * Failure is considered if:
2074  *  i. Frame comes from multipass peer but doesn't contain vlan header.
2075  *  In failure case, drop such frames.
2076  */
2077 bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf,
2078 			     uint8_t tid);
2079 #endif
2080 
2081 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2082 
2083 #ifndef WLAN_RX_PKT_CAPTURE_ENH
2084 static inline
2085 QDF_STATUS dp_peer_set_rx_capture_enabled(struct dp_pdev *pdev,
2086 					  struct dp_peer *peer_handle,
2087 					  bool value, uint8_t *mac_addr)
2088 {
2089 	return QDF_STATUS_SUCCESS;
2090 }
2091 #endif
2092 
2093 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2094 
2095 /**
2096  * dp_rx_deliver_to_stack() - deliver pkts to network stack
2097  * Caller to hold peer refcount and check for valid peer
2098  * @soc: soc
2099  * @vdev: vdev
2100  * @peer: txrx peer
2101  * @nbuf_head: skb list head
2102  * @nbuf_tail: skb list tail
2103  *
2104  * Return: QDF_STATUS
2105  */
2106 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc,
2107 				  struct dp_vdev *vdev,
2108 				  struct dp_txrx_peer *peer,
2109 				  qdf_nbuf_t nbuf_head,
2110 				  qdf_nbuf_t nbuf_tail);
2111 
2112 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
2113 /**
2114  * dp_rx_eapol_deliver_to_stack() - deliver pkts to network stack
2115  * caller to hold peer refcount and check for valid peer
2116  * @soc: soc
2117  * @vdev: vdev
2118  * @peer: peer
2119  * @nbuf_head: skb list head
2120  * @nbuf_tail: skb list tail
2121  *
2122  * Return: QDF_STATUS
2123  */
2124 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
2125 					struct dp_vdev *vdev,
2126 					struct dp_txrx_peer *peer,
2127 					qdf_nbuf_t nbuf_head,
2128 					qdf_nbuf_t nbuf_tail);
2129 #endif
2130 
2131 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2132 
2133 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
2134 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
2135 	do {								   \
2136 		if (!soc->rx_buff_pool[rx_desc->pool_id].is_initialized) { \
2137 			DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf);	   \
2138 			break;						   \
2139 		}							   \
2140 		DP_RX_LIST_APPEND(ebuf_head, ebuf_tail, rx_desc->nbuf);	   \
2141 		if (!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)) {	   \
2142 			if (!dp_rx_buffer_pool_refill(soc, ebuf_head,	   \
2143 						      rx_desc->pool_id))   \
2144 				DP_RX_MERGE_TWO_LIST(head, tail,	   \
2145 						     ebuf_head, ebuf_tail);\
2146 			ebuf_head = NULL;				   \
2147 			ebuf_tail = NULL;				   \
2148 		}							   \
2149 	} while (0)
2150 #else
2151 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
2152 	DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf)
2153 #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
2154 
2155 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2156 
2157 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
2158 /**
2159  * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture
2160  * @soc : dp_soc handle
2161  * @pdev: dp_pdev handle
2162  * @peer_id: peer_id of the peer for which completion came
2163  * @is_offload:
2164  * @netbuf: Buffer pointer
2165  *
2166  * This function is used to deliver rx packet to packet capture
2167  */
2168 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc,  struct dp_pdev *pdev,
2169 				  uint16_t peer_id, uint32_t is_offload,
2170 				  qdf_nbuf_t netbuf);
2171 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
2172 					  uint32_t is_offload);
2173 #else
2174 static inline void
2175 dp_rx_deliver_to_pkt_capture(struct dp_soc *soc,  struct dp_pdev *pdev,
2176 			     uint16_t peer_id, uint32_t is_offload,
2177 			     qdf_nbuf_t netbuf)
2178 {
2179 }
2180 
2181 static inline void
2182 dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
2183 				     uint32_t is_offload)
2184 {
2185 }
2186 #endif
2187 
2188 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2189 #ifdef FEATURE_MEC
2190 /**
2191  * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
2192  *			      back on same vap or a different vap.
2193  * @soc: core DP main context
2194  * @peer: dp peer handler
2195  * @rx_tlv_hdr: start of the rx TLV header
2196  * @nbuf: pkt buffer
2197  *
2198  * Return: bool (true if it is a looped back pkt else false)
2199  *
2200  */
2201 bool dp_rx_mcast_echo_check(struct dp_soc *soc,
2202 			    struct dp_txrx_peer *peer,
2203 			    uint8_t *rx_tlv_hdr,
2204 			    qdf_nbuf_t nbuf);
2205 #else
2206 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
2207 					  struct dp_txrx_peer *peer,
2208 					  uint8_t *rx_tlv_hdr,
2209 					  qdf_nbuf_t nbuf)
2210 {
2211 	return false;
2212 }
2213 #endif /* FEATURE_MEC */
2214 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2215 
2216 #ifdef RECEIVE_OFFLOAD
2217 /**
2218  * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
2219  * @soc: DP SOC handle
2220  * @rx_tlv: RX TLV received for the msdu
2221  * @msdu: msdu for which GRO info needs to be filled
2222  * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
2223  *
2224  * Return: None
2225  */
2226 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
2227 			 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt);
2228 #else
2229 static inline
2230 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
2231 			 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
2232 {
2233 }
2234 #endif
2235 
2236 /**
2237  * dp_rx_msdu_stats_update() - update per msdu stats.
2238  * @soc: core txrx main context
2239  * @nbuf: pointer to the first msdu of an amsdu.
2240  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
2241  * @txrx_peer: pointer to the txrx peer object.
2242  * @ring_id: reo dest ring number on which pkt is reaped.
2243  * @tid_stats: per tid rx stats.
2244  * @link_id: link Id on which packet is received
2245  *
2246  * update all the per msdu stats for that nbuf.
2247  *
2248  * Return: void
2249  */
2250 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
2251 			     uint8_t *rx_tlv_hdr,
2252 			     struct dp_txrx_peer *txrx_peer,
2253 			     uint8_t ring_id,
2254 			     struct cdp_tid_rx_stats *tid_stats,
2255 			     uint8_t link_id);
2256 
2257 /**
2258  * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
2259  *				      no corresbonding peer found
2260  * @soc: core txrx main context
2261  * @nbuf: pkt skb pointer
2262  *
2263  * This function will try to deliver some RX special frames to stack
2264  * even there is no peer matched found. for instance, LFR case, some
2265  * eapol data will be sent to host before peer_map done.
2266  *
2267  * Return: None
2268  */
2269 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf);
2270 
2271 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2272 #ifdef DP_RX_DROP_RAW_FRM
2273 /**
2274  * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop
2275  * @nbuf: pkt skb pointer
2276  *
2277  * Return: true - raw frame, dropped
2278  *	   false - not raw frame, do nothing
2279  */
2280 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf);
2281 #else
2282 static inline
2283 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
2284 {
2285 	return false;
2286 }
2287 #endif
2288 
2289 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
2290 /**
2291  * dp_rx_update_stats() - Update soc level rx packet count
2292  * @soc: DP soc handle
2293  * @nbuf: nbuf received
2294  *
2295  * Return: none
2296  */
2297 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf);
2298 #else
2299 static inline
2300 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf)
2301 {
2302 }
2303 #endif
2304 
2305 /**
2306  * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
2307  * @pdev: dp_pdev handle
2308  * @nbuf: pointer to the first msdu of an amsdu.
2309  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
2310  *
2311  * The ipsumed field of the skb is set based on whether HW validated the
2312  * IP/TCP/UDP checksum.
2313  *
2314  * Return: void
2315  */
2316 #if defined(MAX_PDEV_CNT) && (MAX_PDEV_CNT == 1)
2317 static inline
2318 void dp_rx_cksum_offload(struct dp_pdev *pdev,
2319 			 qdf_nbuf_t nbuf,
2320 			 uint8_t *rx_tlv_hdr)
2321 {
2322 	qdf_nbuf_rx_cksum_t cksum = {0};
2323 	//TODO - Move this to ring desc api
2324 	//HAL_RX_MSDU_DESC_IP_CHKSUM_FAIL_GET
2325 	//HAL_RX_MSDU_DESC_TCP_UDP_CHKSUM_FAIL_GET
2326 	uint32_t ip_csum_err, tcp_udp_csum_er;
2327 
2328 	hal_rx_tlv_csum_err_get(pdev->soc->hal_soc, rx_tlv_hdr, &ip_csum_err,
2329 				&tcp_udp_csum_er);
2330 
2331 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
2332 		if (qdf_likely(!ip_csum_err)) {
2333 			cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
2334 			if (qdf_nbuf_is_ipv4_udp_pkt(nbuf) ||
2335 			    qdf_nbuf_is_ipv4_tcp_pkt(nbuf)) {
2336 				if (qdf_likely(!tcp_udp_csum_er))
2337 					cksum.csum_level = 1;
2338 				else
2339 					DP_STATS_INCC(pdev,
2340 						      err.tcp_udp_csum_err, 1,
2341 						      tcp_udp_csum_er);
2342 			}
2343 		} else {
2344 			DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err);
2345 		}
2346 	} else if (qdf_nbuf_is_ipv6_udp_pkt(nbuf) ||
2347 		   qdf_nbuf_is_ipv6_tcp_pkt(nbuf)) {
2348 		if (qdf_likely(!tcp_udp_csum_er))
2349 			cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
2350 		else
2351 			DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1,
2352 				      tcp_udp_csum_er);
2353 	} else {
2354 		cksum.l4_result = QDF_NBUF_RX_CKSUM_NONE;
2355 	}
2356 
2357 	qdf_nbuf_set_rx_cksum(nbuf, &cksum);
2358 }
2359 #else
2360 static inline
2361 void dp_rx_cksum_offload(struct dp_pdev *pdev,
2362 			 qdf_nbuf_t nbuf,
2363 			 uint8_t *rx_tlv_hdr)
2364 {
2365 }
2366 #endif
2367 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2368 
2369 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
2370 static inline
2371 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
2372 				   int max_reap_limit)
2373 {
2374 	bool limit_hit = false;
2375 
2376 	limit_hit =
2377 		(num_reaped >= max_reap_limit) ? true : false;
2378 
2379 	if (limit_hit)
2380 		DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1)
2381 
2382 	return limit_hit;
2383 }
2384 
2385 static inline
2386 bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
2387 {
2388 	return soc->wlan_cfg_ctx->rx_enable_eol_data_check;
2389 }
2390 
2391 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc)
2392 {
2393 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
2394 
2395 	return cfg->rx_reap_loop_pkt_limit;
2396 }
2397 #else
2398 static inline
2399 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
2400 				   int max_reap_limit)
2401 {
2402 	return false;
2403 }
2404 
2405 static inline
2406 bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
2407 {
2408 	return false;
2409 }
2410 
2411 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc)
2412 {
2413 	return 0;
2414 }
2415 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
2416 
2417 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf);
2418 
2419 static inline uint16_t
2420 dp_rx_peer_metadata_peer_id_get(struct dp_soc *soc, uint32_t peer_metadata)
2421 {
2422 	return soc->arch_ops.dp_rx_peer_metadata_peer_id_get(soc,
2423 							     peer_metadata);
2424 }
2425 
2426 /**
2427  * dp_rx_desc_pool_init_generic() - Generic Rx descriptors initialization
2428  * @soc: SOC handle
2429  * @rx_desc_pool: pointer to RX descriptor pool
2430  * @pool_id: pool ID
2431  *
2432  * Return: None
2433  */
2434 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
2435 				  struct rx_desc_pool *rx_desc_pool,
2436 				  uint32_t pool_id);
2437 
2438 void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
2439 				  struct rx_desc_pool *rx_desc_pool,
2440 				  uint32_t pool_id);
2441 
2442 /**
2443  * dp_rx_pkt_tracepoints_enabled() - Get the state of rx pkt tracepoint
2444  *
2445  * Return: True if any rx pkt tracepoint is enabled else false
2446  */
2447 static inline
2448 bool dp_rx_pkt_tracepoints_enabled(void)
2449 {
2450 	return (qdf_trace_dp_rx_tcp_pkt_enabled() ||
2451 		qdf_trace_dp_rx_udp_pkt_enabled() ||
2452 		qdf_trace_dp_rx_pkt_enabled());
2453 }
2454 
2455 #ifdef FEATURE_DIRECT_LINK
2456 /**
2457  * dp_audio_smmu_map()- Map memory region into Audio SMMU CB
2458  * @qdf_dev: pointer to QDF device structure
2459  * @paddr: physical address
2460  * @iova: DMA address
2461  * @size: memory region size
2462  *
2463  * Return: 0 on success else failure code
2464  */
2465 static inline
2466 int dp_audio_smmu_map(qdf_device_t qdf_dev, qdf_dma_addr_t paddr,
2467 		      qdf_dma_addr_t iova, qdf_size_t size)
2468 {
2469 	return pld_audio_smmu_map(qdf_dev->dev, paddr, iova, size);
2470 }
2471 
2472 /**
2473  * dp_audio_smmu_unmap()- Remove memory region mapping from Audio SMMU CB
2474  * @qdf_dev: pointer to QDF device structure
2475  * @iova: DMA address
2476  * @size: memory region size
2477  *
2478  * Return: None
2479  */
2480 static inline
2481 void dp_audio_smmu_unmap(qdf_device_t qdf_dev, qdf_dma_addr_t iova,
2482 			 qdf_size_t size)
2483 {
2484 	pld_audio_smmu_unmap(qdf_dev->dev, iova, size);
2485 }
2486 #else
2487 static inline
2488 int dp_audio_smmu_map(qdf_device_t qdf_dev, qdf_dma_addr_t paddr,
2489 		      qdf_dma_addr_t iova, qdf_size_t size)
2490 {
2491 	return 0;
2492 }
2493 
2494 static inline
2495 void dp_audio_smmu_unmap(qdf_device_t qdf_dev, qdf_dma_addr_t iova,
2496 			 qdf_size_t size)
2497 {
2498 }
2499 #endif
2500 
2501 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
2502 static inline
2503 QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id,
2504 					    struct dp_srng *rxdma_srng,
2505 					    struct rx_desc_pool *rx_desc_pool,
2506 					    uint32_t num_req_buffers)
2507 {
2508 	return __dp_pdev_rx_buffers_no_map_attach(soc, mac_id,
2509 						  rxdma_srng,
2510 						  rx_desc_pool,
2511 						  num_req_buffers);
2512 }
2513 
2514 static inline
2515 void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
2516 				    struct dp_srng *rxdma_srng,
2517 				    struct rx_desc_pool *rx_desc_pool,
2518 				    uint32_t num_req_buffers,
2519 				    union dp_rx_desc_list_elem_t **desc_list,
2520 				    union dp_rx_desc_list_elem_t **tail)
2521 {
2522 	__dp_rx_buffers_no_map_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
2523 					 num_req_buffers, desc_list, tail);
2524 }
2525 
2526 static inline
2527 void dp_rx_comp2refill_replenish(struct dp_soc *soc, uint32_t mac_id,
2528 				 struct dp_srng *rxdma_srng,
2529 				 struct rx_desc_pool *rx_desc_pool,
2530 				 uint32_t num_req_buffers,
2531 				 union dp_rx_desc_list_elem_t **desc_list,
2532 				 union dp_rx_desc_list_elem_t **tail)
2533 {
2534 	__dp_rx_comp2refill_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
2535 				      num_req_buffers, desc_list, tail);
2536 }
2537 
2538 static inline
2539 void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
2540 				       struct dp_srng *rxdma_srng,
2541 				       struct rx_desc_pool *rx_desc_pool,
2542 				       uint32_t num_req_buffers,
2543 				       union dp_rx_desc_list_elem_t **desc_list,
2544 				       union dp_rx_desc_list_elem_t **tail)
2545 {
2546 	__dp_rx_buffers_no_map_lt_replenish(soc, mac_id, rxdma_srng,
2547 					    rx_desc_pool);
2548 }
2549 
2550 #ifndef QCA_DP_NBUF_FAST_RECYCLE_CHECK
2551 static inline
2552 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
2553 				      qdf_nbuf_t nbuf,
2554 				      uint32_t buf_size)
2555 {
2556 	qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2557 				      (void *)(nbuf->data + buf_size));
2558 
2559 	return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2560 }
2561 #else
2562 #define L3_HEADER_PAD 2
2563 static inline
2564 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
2565 				      qdf_nbuf_t nbuf,
2566 				      uint32_t buf_size)
2567 {
2568 	if (nbuf->recycled_for_ds)
2569 		return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2570 
2571 	if (unlikely(!nbuf->fast_recycled)) {
2572 		qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2573 					      (void *)(nbuf->data + buf_size));
2574 	}
2575 
2576 	DP_STATS_INC(dp_soc, rx.fast_recycled, 1);
2577 	nbuf->fast_recycled = 0;
2578 
2579 	return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2580 }
2581 #endif
2582 
2583 static inline
2584 qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc,
2585 			       qdf_nbuf_t nbuf,
2586 			       uint32_t buf_size)
2587 {
2588 	qdf_nbuf_dma_inv_range((void *)nbuf->data,
2589 			       (void *)(nbuf->data + buf_size));
2590 
2591 	return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2592 }
2593 
2594 #if !defined(SPECULATIVE_READ_DISABLED)
2595 static inline
2596 void dp_rx_nbuf_unmap(struct dp_soc *soc,
2597 		      struct dp_rx_desc *rx_desc,
2598 		      uint8_t reo_ring_num)
2599 {
2600 	struct rx_desc_pool *rx_desc_pool;
2601 	qdf_nbuf_t nbuf;
2602 
2603 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2604 	nbuf = rx_desc->nbuf;
2605 
2606 	qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2607 			       (void *)(nbuf->data + rx_desc_pool->buf_size));
2608 }
2609 
2610 static inline
2611 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc,
2612 			   struct rx_desc_pool *rx_desc_pool,
2613 			   qdf_nbuf_t nbuf)
2614 {
2615 	qdf_nbuf_dma_inv_range((void *)nbuf->data,
2616 			       (void *)(nbuf->data + rx_desc_pool->buf_size));
2617 }
2618 
2619 #else
2620 static inline
2621 void dp_rx_nbuf_unmap(struct dp_soc *soc,
2622 		      struct dp_rx_desc *rx_desc,
2623 		      uint8_t reo_ring_num)
2624 {
2625 }
2626 
2627 static inline
2628 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc,
2629 			   struct rx_desc_pool *rx_desc_pool,
2630 			   qdf_nbuf_t nbuf)
2631 {
2632 }
2633 #endif
2634 
2635 static inline
2636 void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id,
2637 				 uint32_t bufs_reaped)
2638 {
2639 }
2640 
2641 static inline
2642 qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc,
2643 			    struct rx_desc_pool *rx_desc_pool)
2644 {
2645 	return qdf_nbuf_alloc_simple(soc->osdev, rx_desc_pool->buf_size,
2646 				     RX_BUFFER_RESERVATION,
2647 				     rx_desc_pool->buf_alignment, FALSE);
2648 }
2649 
2650 static inline
2651 void  dp_rx_nbuf_free(qdf_nbuf_t nbuf)
2652 {
2653 	qdf_nbuf_free_simple(nbuf);
2654 }
2655 #else
2656 static inline
2657 QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id,
2658 					    struct dp_srng *rxdma_srng,
2659 					    struct rx_desc_pool *rx_desc_pool,
2660 					    uint32_t num_req_buffers)
2661 {
2662 	return dp_pdev_rx_buffers_attach(soc, mac_id,
2663 					 rxdma_srng,
2664 					 rx_desc_pool,
2665 					 num_req_buffers);
2666 }
2667 
2668 static inline
2669 void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
2670 				    struct dp_srng *rxdma_srng,
2671 				    struct rx_desc_pool *rx_desc_pool,
2672 				    uint32_t num_req_buffers,
2673 				    union dp_rx_desc_list_elem_t **desc_list,
2674 				    union dp_rx_desc_list_elem_t **tail)
2675 {
2676 	dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
2677 				num_req_buffers, desc_list, tail, false);
2678 }
2679 
2680 static inline
2681 void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
2682 				       struct dp_srng *rxdma_srng,
2683 				       struct rx_desc_pool *rx_desc_pool,
2684 				       uint32_t num_req_buffers,
2685 				       union dp_rx_desc_list_elem_t **desc_list,
2686 				       union dp_rx_desc_list_elem_t **tail)
2687 {
2688 	dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
2689 				num_req_buffers, desc_list, tail, false);
2690 }
2691 
2692 static inline
2693 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
2694 				      qdf_nbuf_t nbuf,
2695 				      uint32_t buf_size)
2696 {
2697 	return (qdf_dma_addr_t)NULL;
2698 }
2699 
2700 static inline
2701 qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc,
2702 			       qdf_nbuf_t nbuf,
2703 			       uint32_t buf_size)
2704 {
2705 	return (qdf_dma_addr_t)NULL;
2706 }
2707 
2708 static inline
2709 void dp_rx_nbuf_unmap(struct dp_soc *soc,
2710 		      struct dp_rx_desc *rx_desc,
2711 		      uint8_t reo_ring_num)
2712 {
2713 	struct rx_desc_pool *rx_desc_pool;
2714 
2715 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2716 	dp_ipa_reo_ctx_buf_mapping_lock(soc, reo_ring_num);
2717 
2718 	dp_audio_smmu_unmap(soc->osdev,
2719 			    QDF_NBUF_CB_PADDR(rx_desc->nbuf),
2720 			    rx_desc_pool->buf_size);
2721 
2722 	dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
2723 					  rx_desc_pool->buf_size,
2724 					  false, __func__, __LINE__);
2725 
2726 	qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
2727 				     QDF_DMA_FROM_DEVICE,
2728 				     rx_desc_pool->buf_size);
2729 
2730 	dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num);
2731 }
2732 
2733 static inline
2734 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc,
2735 			   struct rx_desc_pool *rx_desc_pool,
2736 			   qdf_nbuf_t nbuf)
2737 {
2738 	dp_audio_smmu_unmap(soc->osdev, QDF_NBUF_CB_PADDR(nbuf),
2739 			    rx_desc_pool->buf_size);
2740 	dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, rx_desc_pool->buf_size,
2741 					  false, __func__, __LINE__);
2742 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE,
2743 				     rx_desc_pool->buf_size);
2744 }
2745 
2746 static inline
2747 void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id,
2748 				 uint32_t bufs_reaped)
2749 {
2750 	int cpu_id = qdf_get_cpu();
2751 
2752 	DP_STATS_INC(soc, rx.ring_packets[cpu_id][ring_id], bufs_reaped);
2753 }
2754 
2755 static inline
2756 qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc,
2757 			    struct rx_desc_pool *rx_desc_pool)
2758 {
2759 	return qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
2760 			      RX_BUFFER_RESERVATION,
2761 			      rx_desc_pool->buf_alignment, FALSE);
2762 }
2763 
2764 static inline
2765 void dp_rx_nbuf_free(qdf_nbuf_t nbuf)
2766 {
2767 	qdf_nbuf_free(nbuf);
2768 }
2769 #endif
2770 
2771 #ifdef DP_UMAC_HW_RESET_SUPPORT
2772 /**
2773  * dp_rx_desc_reuse() - Reuse the rx descriptors to fill the rx buf ring
2774  * @soc: core txrx main context
2775  * @nbuf_list: nbuf list for delayed free
2776  *
2777  * Return: void
2778  */
2779 void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list);
2780 
2781 /**
2782  * dp_rx_desc_delayed_free() - Delayed free of the rx descs
2783  *
2784  * @soc: core txrx main context
2785  *
2786  * Return: void
2787  */
2788 void dp_rx_desc_delayed_free(struct dp_soc *soc);
2789 #endif
2790 
2791 /**
2792  * dp_rx_get_txrx_peer_and_vdev() - Get txrx peer and vdev from peer id
2793  * @soc: core txrx main context
2794  * @nbuf : pointer to the first msdu of an amsdu.
2795  * @peer_id : Peer id of the peer
2796  * @txrx_ref_handle : Buffer to save the handle for txrx peer's reference
2797  * @pkt_capture_offload : Flag indicating if pkt capture offload is needed
2798  * @vdev : Buffer to hold pointer to vdev
2799  * @rx_pdev : Buffer to hold pointer to rx pdev
2800  * @dsf : delay stats flag
2801  * @old_tid : Old tid
2802  *
2803  * Get txrx peer and vdev from peer id
2804  *
2805  * Return: Pointer to txrx peer
2806  */
2807 static inline struct dp_txrx_peer *
2808 dp_rx_get_txrx_peer_and_vdev(struct dp_soc *soc,
2809 			     qdf_nbuf_t nbuf,
2810 			     uint16_t peer_id,
2811 			     dp_txrx_ref_handle *txrx_ref_handle,
2812 			     bool pkt_capture_offload,
2813 			     struct dp_vdev **vdev,
2814 			     struct dp_pdev **rx_pdev,
2815 			     uint32_t *dsf,
2816 			     uint32_t *old_tid)
2817 {
2818 	struct dp_txrx_peer *txrx_peer = NULL;
2819 
2820 	txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, txrx_ref_handle,
2821 					       DP_MOD_ID_RX);
2822 
2823 	if (qdf_likely(txrx_peer)) {
2824 		*vdev = txrx_peer->vdev;
2825 	} else {
2826 		nbuf->next = NULL;
2827 		dp_rx_deliver_to_pkt_capture_no_peer(soc, nbuf,
2828 						     pkt_capture_offload);
2829 		if (!pkt_capture_offload)
2830 			dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2831 
2832 		goto end;
2833 	}
2834 
2835 	if (qdf_unlikely(!(*vdev))) {
2836 		qdf_nbuf_free(nbuf);
2837 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
2838 		goto end;
2839 	}
2840 
2841 	*rx_pdev = (*vdev)->pdev;
2842 	*dsf = (*rx_pdev)->delay_stats_flag;
2843 	*old_tid = 0xff;
2844 
2845 end:
2846 	return txrx_peer;
2847 }
2848 
2849 static inline QDF_STATUS
2850 dp_peer_rx_reorder_queue_setup(struct dp_soc *soc, struct dp_peer *peer,
2851 			       int tid, uint32_t ba_window_size)
2852 {
2853 	return soc->arch_ops.dp_peer_rx_reorder_queue_setup(soc,
2854 							    peer, tid,
2855 							    ba_window_size);
2856 }
2857 
2858 static inline
2859 void dp_rx_nbuf_list_deliver(struct dp_soc *soc,
2860 			     struct dp_vdev *vdev,
2861 			     struct dp_txrx_peer *txrx_peer,
2862 			     uint16_t peer_id,
2863 			     uint8_t pkt_capture_offload,
2864 			     qdf_nbuf_t deliver_list_head,
2865 			     qdf_nbuf_t deliver_list_tail)
2866 {
2867 	qdf_nbuf_t nbuf, next;
2868 
2869 	if (qdf_likely(deliver_list_head)) {
2870 		if (qdf_likely(txrx_peer)) {
2871 			dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id,
2872 						     pkt_capture_offload,
2873 						     deliver_list_head);
2874 			if (!pkt_capture_offload)
2875 				dp_rx_deliver_to_stack(soc, vdev, txrx_peer,
2876 						       deliver_list_head,
2877 						       deliver_list_tail);
2878 		} else {
2879 			nbuf = deliver_list_head;
2880 			while (nbuf) {
2881 				next = nbuf->next;
2882 				nbuf->next = NULL;
2883 				dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2884 				nbuf = next;
2885 			}
2886 		}
2887 	}
2888 }
2889 
2890 #ifdef DP_TX_RX_TPUT_SIMULATE
2891 /*
2892  * Change this macro value to simulate different RX T-put,
2893  * if OTA is 100 Mbps, to simulate 200 Mbps, then multiplication factor
2894  * is 2, set macro value as 1 (multiplication factor - 1).
2895  */
2896 #define DP_RX_PKTS_DUPLICATE_CNT 0
2897 static inline
2898 void dp_rx_nbuf_list_dup_deliver(struct dp_soc *soc,
2899 				 struct dp_vdev *vdev,
2900 				 struct dp_txrx_peer *txrx_peer,
2901 				 uint16_t peer_id,
2902 				 uint8_t pkt_capture_offload,
2903 				 qdf_nbuf_t ori_list_head,
2904 				 qdf_nbuf_t ori_list_tail)
2905 {
2906 	qdf_nbuf_t new_skb = NULL;
2907 	qdf_nbuf_t new_list_head = NULL;
2908 	qdf_nbuf_t new_list_tail = NULL;
2909 	qdf_nbuf_t nbuf = NULL;
2910 	int i;
2911 
2912 	for (i = 0; i < DP_RX_PKTS_DUPLICATE_CNT; i++) {
2913 		nbuf = ori_list_head;
2914 		new_list_head = NULL;
2915 		new_list_tail = NULL;
2916 
2917 		while (nbuf) {
2918 			new_skb = qdf_nbuf_copy(nbuf);
2919 			if (qdf_likely(new_skb))
2920 				DP_RX_LIST_APPEND(new_list_head,
2921 						  new_list_tail,
2922 						  new_skb);
2923 			else
2924 				dp_err("copy skb failed");
2925 
2926 			nbuf = qdf_nbuf_next(nbuf);
2927 		}
2928 
2929 		/* deliver the copied nbuf list */
2930 		dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id,
2931 					pkt_capture_offload,
2932 					new_list_head,
2933 					new_list_tail);
2934 	}
2935 
2936 	/* deliver the original skb_list */
2937 	dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id,
2938 				pkt_capture_offload,
2939 				ori_list_head,
2940 				ori_list_tail);
2941 }
2942 
2943 #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_dup_deliver
2944 
2945 #else /* !DP_TX_RX_TPUT_SIMULATE */
2946 
2947 #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_deliver
2948 
2949 #endif /* DP_TX_RX_TPUT_SIMULATE */
2950 
2951 /**
2952  * dp_rx_wbm_desc_nbuf_sanity_check() - Add sanity check to for WBM rx_desc
2953  *                                      paddr corruption
2954  * @soc: core txrx main context
2955  * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring
2956  * @ring_desc: REO ring descriptor
2957  * @rx_desc: Rx descriptor
2958  *
2959  * Return: NONE
2960  */
2961 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
2962 					    hal_ring_handle_t hal_ring_hdl,
2963 					    hal_ring_desc_t ring_desc,
2964 					    struct dp_rx_desc *rx_desc);
2965 /**
2966  * dp_rx_is_sg_formation_required() - Check if sg formation is required
2967  * @info: WBM desc info
2968  *
2969  * Return: True if sg is required else false
2970  */
2971 bool dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info);
2972 
2973 /**
2974  * dp_rx_err_tlv_invalidate() - Invalidate network buffer
2975  * @soc: core txrx main context
2976  * @nbuf: Network buffer to invalidate
2977  *
2978  * Return: NONE
2979  */
2980 void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
2981 			      qdf_nbuf_t nbuf);
2982 
2983 /**
2984  * dp_rx_wbm_sg_list_last_msdu_war() - war for HW issue
2985  * @soc: DP SOC handle
2986  *
2987  * This is a war for HW issue where length is only valid in last msdu
2988  *
2989  * Return: NONE
2990  */
2991 void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc);
2992 
2993 /**
2994  * dp_rx_check_pkt_len() - Check for pktlen validity
2995  * @soc: DP SOC context
2996  * @pkt_len: computed length of the pkt from caller in bytes
2997  *
2998  * Return: true if pktlen > RX_BUFFER_SIZE, else return false
2999  *
3000  */
3001 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len);
3002 
3003 /**
3004  * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
3005  * @soc: pointer to dp_soc struct
3006  * @pool_id: Pool id to find dp_pdev
3007  * @rx_tlv_hdr: TLV header of received packet
3008  * @nbuf: SKB
3009  *
3010  * In certain types of packets if peer_id is not correct then
3011  * driver may not be able find. Try finding peer by addr_2 of
3012  * received MPDU. If you find the peer then most likely sw_peer_id &
3013  * ast_idx is corrupted.
3014  *
3015  * Return: True if you find the peer by addr_2 of received MPDU else false
3016  */
3017 bool dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
3018 						   uint8_t pool_id,
3019 						   uint8_t *rx_tlv_hdr,
3020 						   qdf_nbuf_t nbuf);
3021 
3022 /**
3023  * dp_rx_err_drop_3addr_mcast() - Check if feature drop_3ddr_mcast is enabled
3024  *                                If so, drop the multicast frame.
3025  * @vdev: datapath vdev
3026  * @rx_tlv_hdr: TLV header
3027  *
3028  * Return: true if packet is to be dropped,
3029  *         false, if packet is not dropped.
3030  */
3031 bool dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr);
3032 
3033 /**
3034  * dp_rx_deliver_to_osif_stack() - function to deliver rx pkts to stack
3035  * @soc: DP soc
3036  * @vdev: DP vdev handle
3037  * @txrx_peer: pointer to the txrx_peer object
3038  * @nbuf: skb list head
3039  * @tail: skb list tail
3040  * @is_eapol: eapol pkt check
3041  *
3042  * Return: None
3043  */
3044 void
3045 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
3046 			    struct dp_vdev *vdev,
3047 			    struct dp_txrx_peer *txrx_peer,
3048 			    qdf_nbuf_t nbuf,
3049 			    qdf_nbuf_t tail,
3050 			    bool is_eapol);
3051 
3052 /**
3053  * dp_rx_set_wbm_err_info_in_nbuf() - function to set wbm err info in nbuf
3054  * @soc: DP soc
3055  * @nbuf: skb list head
3056  * @wbm_err: wbm error info details
3057  *
3058  * Return: None
3059  */
3060 void
3061 dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
3062 			       qdf_nbuf_t nbuf,
3063 			       union hal_wbm_err_info_u wbm_err);
3064 
3065 #ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */
3066 /**
3067  * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
3068  *
3069  * @soc: core txrx main context
3070  * @hal_ring_hdl: opaque pointer to the HAL Rx Ring, which will be serviced
3071  * @ring_desc: opaque pointer to the RX ring descriptor
3072  * @rx_desc: host rx descriptor
3073  *
3074  * Return: void
3075  */
3076 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
3077 				hal_ring_handle_t hal_ring_hdl,
3078 				hal_ring_desc_t ring_desc,
3079 				struct dp_rx_desc *rx_desc);
3080 
3081 /**
3082  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
3083  *			      (WBM), following error handling
3084  *
3085  * @soc: core DP main context
3086  * @ring_desc: opaque pointer to the REO error ring descriptor
3087  * @bm_action: put to idle_list or release to msdu_list
3088  *
3089  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
3090  */
3091 QDF_STATUS
3092 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
3093 		       uint8_t bm_action);
3094 
3095 /**
3096  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
3097  *					(WBM) by address
3098  *
3099  * @soc: core DP main context
3100  * @link_desc_addr: link descriptor addr
3101  * @bm_action: put to idle_list or release to msdu_list
3102  *
3103  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
3104  */
3105 QDF_STATUS
3106 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
3107 			       hal_buff_addrinfo_t link_desc_addr,
3108 			       uint8_t bm_action);
3109 
3110 /**
3111  * dp_rxdma_err_process() - RxDMA error processing functionality
3112  * @int_ctx: pointer to DP interrupt context
3113  * @soc: core txrx main context
3114  * @mac_id: mac id which is one of 3 mac_ids
3115  * @quota: No. of units (packets) that can be serviced in one shot.
3116  *
3117  * Return: num of buffers processed
3118  */
3119 uint32_t
3120 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
3121 		     uint32_t mac_id, uint32_t quota);
3122 
3123 /**
3124  * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
3125  *			       frames to OS or wifi parse errors.
3126  * @soc: core DP main context
3127  * @nbuf: buffer pointer
3128  * @rx_tlv_hdr: start of rx tlv header
3129  * @txrx_peer: peer reference
3130  * @err_code: rxdma err code
3131  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
3132  * pool_id has same mapping)
3133  * @link_id: link Id on which the packet is received
3134  *
3135  * Return: None
3136  */
3137 void
3138 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
3139 			uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
3140 			uint8_t err_code, uint8_t mac_id, uint8_t link_id);
3141 
3142 /**
3143  * dp_rx_process_mic_error(): Function to pass mic error indication to umac
3144  * @soc: core DP main context
3145  * @nbuf: buffer pointer
3146  * @rx_tlv_hdr: start of rx tlv header
3147  * @txrx_peer: txrx peer handle
3148  *
3149  * Return: void
3150  */
3151 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
3152 			     uint8_t *rx_tlv_hdr,
3153 			     struct dp_txrx_peer *txrx_peer);
3154 
3155 /**
3156  * dp_2k_jump_handle() - Function to handle 2k jump exception
3157  *                        on WBM ring
3158  * @soc: core DP main context
3159  * @nbuf: buffer pointer
3160  * @rx_tlv_hdr: start of rx tlv header
3161  * @peer_id: peer id of first msdu
3162  * @tid: Tid for which exception occurred
3163  *
3164  * This function handles 2k jump violations arising out
3165  * of receiving aggregates in non BA case. This typically
3166  * may happen if aggregates are received on a QOS enabled TID
3167  * while Rx window size is still initialized to value of 2. Or
3168  * it may also happen if negotiated window size is 1 but peer
3169  * sends aggregates.
3170  */
3171 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
3172 		       uint16_t peer_id, uint8_t tid);
3173 
3174 #ifndef QCA_HOST_MODE_WIFI_DISABLED
3175 
3176 /**
3177  * dp_rx_err_process() - Processes error frames routed to REO error ring
3178  * @int_ctx: pointer to DP interrupt context
3179  * @soc: core txrx main context
3180  * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be serviced
3181  * @quota: No. of units (packets) that can be serviced in one shot.
3182  *
3183  * This function implements error processing and top level demultiplexer
3184  * for all the frames routed to REO error ring.
3185  *
3186  * Return: uint32_t: No. of elements processed
3187  */
3188 uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
3189 			   hal_ring_handle_t hal_ring_hdl, uint32_t quota);
3190 
3191 /**
3192  * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
3193  * @int_ctx: pointer to DP interrupt context
3194  * @soc: core txrx main context
3195  * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be
3196  *                serviced
3197  * @quota: No. of units (packets) that can be serviced in one shot.
3198  *
3199  * This function implements error processing and top level demultiplexer
3200  * for all the frames routed to WBM2HOST sw release ring.
3201  *
3202  * Return: uint32_t: No. of elements processed
3203  */
3204 uint32_t
3205 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
3206 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota);
3207 
3208 #ifdef QCA_OL_RX_LOCK_LESS_ACCESS
3209 /**
3210  * dp_rx_srng_access_start()- Wrapper function to log access start of a hal ring
3211  * @int_ctx: pointer to DP interrupt context
3212  * @soc: DP soc structure pointer
3213  * @hal_ring_hdl: HAL ring handle
3214  *
3215  * Return: 0 on success; error on failure
3216  */
3217 static inline int
3218 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
3219 			hal_ring_handle_t hal_ring_hdl)
3220 {
3221 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
3222 }
3223 
3224 /**
3225  * dp_rx_srng_access_end()- Wrapper function to log access end of a hal ring
3226  * @int_ctx: pointer to DP interrupt context
3227  * @soc: DP soc structure pointer
3228  * @hal_ring_hdl: HAL ring handle
3229  *
3230  * Return: None
3231  */
3232 static inline void
3233 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
3234 		      hal_ring_handle_t hal_ring_hdl)
3235 {
3236 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
3237 }
3238 #else
3239 static inline int
3240 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
3241 			hal_ring_handle_t hal_ring_hdl)
3242 {
3243 	return dp_srng_access_start(int_ctx, soc, hal_ring_hdl);
3244 }
3245 
3246 static inline void
3247 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
3248 		      hal_ring_handle_t hal_ring_hdl)
3249 {
3250 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
3251 }
3252 #endif
3253 
3254 #ifdef RX_DESC_SANITY_WAR
3255 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
3256 			     hal_ring_handle_t hal_ring_hdl,
3257 			     hal_ring_desc_t ring_desc,
3258 			     struct dp_rx_desc *rx_desc);
3259 #else
3260 static inline
3261 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
3262 			     hal_ring_handle_t hal_ring_hdl,
3263 			     hal_ring_desc_t ring_desc,
3264 			     struct dp_rx_desc *rx_desc)
3265 {
3266 	return QDF_STATUS_SUCCESS;
3267 }
3268 #endif
3269 
3270 #ifdef RX_DESC_DEBUG_CHECK
3271 /**
3272  * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
3273  *				  corruption
3274  * @soc: DP SoC context
3275  * @ring_desc: REO ring descriptor
3276  * @rx_desc: Rx descriptor
3277  *
3278  * Return: NONE
3279  */
3280 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
3281 					hal_ring_desc_t ring_desc,
3282 					struct dp_rx_desc *rx_desc);
3283 #else
3284 static inline
3285 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
3286 					hal_ring_desc_t ring_desc,
3287 					struct dp_rx_desc *rx_desc)
3288 {
3289 	return QDF_STATUS_SUCCESS;
3290 }
3291 #endif
3292 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
3293 
3294 /**
3295  * dp_rx_wbm_sg_list_reset() - Initialize sg list
3296  *
3297  * This api should be called at soc init and afterevery sg processing.
3298  *@soc: DP SOC handle
3299  */
3300 static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc)
3301 {
3302 	if (soc) {
3303 		soc->wbm_sg_param.wbm_is_first_msdu_in_sg = false;
3304 		soc->wbm_sg_param.wbm_sg_nbuf_head = NULL;
3305 		soc->wbm_sg_param.wbm_sg_nbuf_tail = NULL;
3306 		soc->wbm_sg_param.wbm_sg_desc_msdu_len = 0;
3307 	}
3308 }
3309 
3310 /**
3311  * dp_rx_wbm_sg_list_deinit() - De-initialize sg list
3312  *
3313  * This api should be called in down path, to avoid any leak.
3314  *@soc: DP SOC handle
3315  */
3316 static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc)
3317 {
3318 	if (soc) {
3319 		if (soc->wbm_sg_param.wbm_sg_nbuf_head)
3320 			qdf_nbuf_list_free(soc->wbm_sg_param.wbm_sg_nbuf_head);
3321 
3322 		dp_rx_wbm_sg_list_reset(soc);
3323 	}
3324 }
3325 
3326 /**
3327  * dp_rx_link_desc_refill_duplicate_check() - check if link desc duplicate
3328  *					      to refill
3329  * @soc: DP SOC handle
3330  * @buf_info: the last link desc buf info
3331  * @ring_buf_info: current buf address pointor including link desc
3332  *
3333  * Return: none.
3334  */
3335 void dp_rx_link_desc_refill_duplicate_check(
3336 				struct dp_soc *soc,
3337 				struct hal_buf_info *buf_info,
3338 				hal_buff_addrinfo_t ring_buf_info);
3339 /**
3340  * dp_rx_srng_get_num_pending() - get number of pending entries
3341  * @hal_soc: hal soc opaque pointer
3342  * @hal_ring_hdl: opaque pointer to the HAL Rx Ring
3343  * @num_entries: number of entries in the hal_ring.
3344  * @near_full: pointer to a boolean. This is set if ring is near full.
3345  *
3346  * The function returns the number of entries in a destination ring which are
3347  * yet to be reaped. The function also checks if the ring is near full.
3348  * If more than half of the ring needs to be reaped, the ring is considered
3349  * approaching full.
3350  * The function uses hal_srng_dst_num_valid_locked to get the number of valid
3351  * entries. It should not be called within a SRNG lock. HW pointer value is
3352  * synced into cached_hp.
3353  *
3354  * Return: Number of pending entries if any
3355  */
3356 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
3357 				    hal_ring_handle_t hal_ring_hdl,
3358 				    uint32_t num_entries,
3359 				    bool *near_full);
3360 
3361 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
3362 /**
3363  * dp_rx_ring_record_entry() - Record an entry into the rx ring history.
3364  * @soc: Datapath soc structure
3365  * @ring_num: REO ring number
3366  * @ring_desc: REO ring descriptor
3367  *
3368  * Return: None
3369  */
3370 void dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
3371 			     hal_ring_desc_t ring_desc);
3372 #else
3373 static inline void
3374 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
3375 			hal_ring_desc_t ring_desc)
3376 {
3377 }
3378 #endif
3379 
3380 #ifdef QCA_SUPPORT_WDS_EXTENDED
3381 /**
3382  * dp_rx_is_list_ready() - Make different lists for 4-address
3383  *			   and 3-address frames
3384  * @nbuf_head: skb list head
3385  * @vdev: vdev
3386  * @txrx_peer : txrx_peer
3387  * @peer_id: peer id of new received frame
3388  * @vdev_id: vdev_id of new received frame
3389  *
3390  * Return: true if peer_ids are different.
3391  */
3392 static inline bool
3393 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
3394 		    struct dp_vdev *vdev,
3395 		    struct dp_txrx_peer *txrx_peer,
3396 		    uint16_t peer_id,
3397 		    uint8_t vdev_id)
3398 {
3399 	if (nbuf_head && txrx_peer && txrx_peer->peer_id != peer_id)
3400 		return true;
3401 
3402 	return false;
3403 }
3404 #else
3405 static inline bool
3406 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
3407 		    struct dp_vdev *vdev,
3408 		    struct dp_txrx_peer *txrx_peer,
3409 		    uint16_t peer_id,
3410 		    uint8_t vdev_id)
3411 {
3412 	if (nbuf_head && vdev && (vdev->vdev_id != vdev_id))
3413 		return true;
3414 
3415 	return false;
3416 }
3417 #endif
3418 
3419 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
3420 /**
3421  * dp_rx_mark_first_packet_after_wow_wakeup - get first packet after wow wakeup
3422  * @pdev: pointer to dp_pdev structure
3423  * @rx_tlv: pointer to rx_pkt_tlvs structure
3424  * @nbuf: pointer to skb buffer
3425  *
3426  * Return: None
3427  */
3428 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
3429 					      uint8_t *rx_tlv,
3430 					      qdf_nbuf_t nbuf);
3431 #else
3432 static inline void
3433 dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
3434 					 uint8_t *rx_tlv,
3435 					 qdf_nbuf_t nbuf)
3436 {
3437 }
3438 #endif
3439 
3440 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
3441 static inline uint8_t
3442 dp_rx_get_defrag_bm_id(struct dp_soc *soc)
3443 {
3444 	return DP_DEFRAG_RBM(soc->wbm_sw0_bm_id);
3445 }
3446 
3447 static inline uint8_t
3448 dp_rx_get_rx_bm_id(struct dp_soc *soc)
3449 {
3450 	return DP_WBM2SW_RBM(soc->wbm_sw0_bm_id);
3451 }
3452 #else
3453 static inline uint8_t
3454 dp_rx_get_rx_bm_id(struct dp_soc *soc)
3455 {
3456 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
3457 	uint8_t wbm2_sw_rx_rel_ring_id;
3458 
3459 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
3460 
3461 	return HAL_RX_BUF_RBM_SW_BM(soc->wbm_sw0_bm_id,
3462 				    wbm2_sw_rx_rel_ring_id);
3463 }
3464 
3465 static inline uint8_t
3466 dp_rx_get_defrag_bm_id(struct dp_soc *soc)
3467 {
3468 	return dp_rx_get_rx_bm_id(soc);
3469 }
3470 #endif
3471 
3472 #else
3473 static inline QDF_STATUS
3474 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
3475 			       hal_buff_addrinfo_t link_desc_addr,
3476 			       uint8_t bm_action)
3477 {
3478 	return QDF_STATUS_SUCCESS;
3479 }
3480 
3481 static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc)
3482 {
3483 }
3484 
3485 static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc)
3486 {
3487 }
3488 
3489 static inline uint8_t
3490 dp_rx_get_defrag_bm_id(struct dp_soc *soc)
3491 {
3492 	return 0;
3493 }
3494 
3495 static inline uint8_t
3496 dp_rx_get_rx_bm_id(struct dp_soc *soc)
3497 {
3498 	return 0;
3499 }
3500 #endif /* WLAN_SOFTUMAC_SUPPORT */
3501 
3502 #ifndef CONFIG_NBUF_AP_PLATFORM
3503 static inline uint8_t
3504 dp_rx_get_stats_arr_idx_from_link_id(qdf_nbuf_t nbuf,
3505 				     struct dp_txrx_peer *txrx_peer)
3506 {
3507 	return 0;
3508 }
3509 #else
3510 static inline uint8_t
3511 dp_rx_get_stats_arr_idx_from_link_id(qdf_nbuf_t nbuf,
3512 				     struct dp_txrx_peer *txrx_peer)
3513 {
3514 	uint8_t link_id = 0;
3515 
3516 	link_id = (QDF_NBUF_CB_RX_HW_LINK_ID(nbuf) + 1);
3517 	if (link_id > DP_MAX_MLO_LINKS) {
3518 		link_id = 0;
3519 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3520 					  rx.inval_link_id_pkt_cnt,
3521 					  1, link_id);
3522 	}
3523 
3524 	return link_id;
3525 }
3526 #endif /* CONFIG_NBUF_AP_PLATFORM */
3527 
3528 #endif /* _DP_RX_H */
3529