xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _DP_RX_H
21 #define _DP_RX_H
22 
23 #include "hal_rx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 #include <qdf_tracepoint.h>
27 #include "dp_ipa.h"
28 
29 #ifdef RXDMA_OPTIMIZATION
30 #ifndef RX_DATA_BUFFER_ALIGNMENT
31 #define RX_DATA_BUFFER_ALIGNMENT        128
32 #endif
33 #ifndef RX_MONITOR_BUFFER_ALIGNMENT
34 #define RX_MONITOR_BUFFER_ALIGNMENT     128
35 #endif
36 #else /* RXDMA_OPTIMIZATION */
37 #define RX_DATA_BUFFER_ALIGNMENT        4
38 #define RX_MONITOR_BUFFER_ALIGNMENT     4
39 #endif /* RXDMA_OPTIMIZATION */
40 
41 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
42 #define DP_WBM2SW_RBM(sw0_bm_id)	HAL_RX_BUF_RBM_SW1_BM(sw0_bm_id)
43 /* RBM value used for re-injecting defragmented packets into REO */
44 #define DP_DEFRAG_RBM(sw0_bm_id)	HAL_RX_BUF_RBM_SW3_BM(sw0_bm_id)
45 #endif
46 
47 /* Max buffer in invalid peer SG list*/
48 #define DP_MAX_INVALID_BUFFERS 10
49 #ifdef DP_INVALID_PEER_ASSERT
50 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \
51 		do {                                \
52 			qdf_assert_always(!(head)); \
53 			qdf_assert_always(!(tail)); \
54 		} while (0)
55 #else
56 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */
57 #endif
58 
59 #define RX_BUFFER_RESERVATION   0
60 #ifdef BE_PKTLOG_SUPPORT
61 #define BUFFER_RESIDUE 1
62 #define RX_MON_MIN_HEAD_ROOM   64
63 #endif
64 
65 #define DP_DEFAULT_NOISEFLOOR	(-96)
66 
67 #define DP_RX_DESC_MAGIC 0xdec0de
68 
69 #define dp_rx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX, params)
70 #define dp_rx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX, params)
71 #define dp_rx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX, params)
72 #define dp_rx_info(params...) \
73 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params)
74 #define dp_rx_info_rl(params...) \
75 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params)
76 #define dp_rx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX, params)
77 #define dp_rx_err_err(params...) \
78 	QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX_ERROR, params)
79 
80 /**
81  * enum dp_rx_desc_state
82  *
83  * @RX_DESC_REPLENISHED: rx desc replenished
84  * @RX_DESC_IN_FREELIST: rx desc in freelist
85  */
86 enum dp_rx_desc_state {
87 	RX_DESC_REPLENISHED,
88 	RX_DESC_IN_FREELIST,
89 };
90 
91 #ifndef QCA_HOST_MODE_WIFI_DISABLED
92 /**
93  * struct dp_rx_desc_dbg_info
94  *
95  * @freelist_caller: name of the function that put the
96  *  the rx desc in freelist
97  * @freelist_ts: timestamp when the rx desc is put in
98  *  a freelist
99  * @replenish_caller: name of the function that last
100  *  replenished the rx desc
101  * @replenish_ts: last replenish timestamp
102  * @prev_nbuf: previous nbuf info
103  * @prev_nbuf_data_addr: previous nbuf data address
104  */
105 struct dp_rx_desc_dbg_info {
106 	char freelist_caller[QDF_MEM_FUNC_NAME_SIZE];
107 	uint64_t freelist_ts;
108 	char replenish_caller[QDF_MEM_FUNC_NAME_SIZE];
109 	uint64_t replenish_ts;
110 	qdf_nbuf_t prev_nbuf;
111 	uint8_t *prev_nbuf_data_addr;
112 };
113 
114 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
115 
116 /**
117  * struct dp_rx_desc
118  *
119  * @nbuf:		VA of the "skb" posted
120  * @rx_buf_start:	VA of the original Rx buffer, before
121  *			movement of any skb->data pointer
122  * @paddr_buf_start:	PA of the original Rx buffer, before
123  *                      movement of any frag pointer
124  * @cookie:		index into the sw array which holds
125  *			the sw Rx descriptors
126  *			Cookie space is 21 bits:
127  *			lower 18 bits -- index
128  *			upper  3 bits -- pool_id
129  * @pool_id:		pool Id for which this allocated.
130  *			Can only be used if there is no flow
131  *			steering
132  * @chip_id:		chip_id indicating MLO chip_id
133  *			valid or used only in case of multi-chip MLO
134  * @magic:
135  * @nbuf_data_addr:	VA of nbuf data posted
136  * @dbg_info:
137  * @in_use:		rx_desc is in use
138  * @unmapped:		used to mark rx_desc an unmapped if the corresponding
139  *			nbuf is already unmapped
140  * @in_err_state:	Nbuf sanity failed for this descriptor.
141  */
142 struct dp_rx_desc {
143 	qdf_nbuf_t nbuf;
144 	uint8_t *rx_buf_start;
145 	qdf_dma_addr_t paddr_buf_start;
146 	uint32_t cookie;
147 	uint8_t	 pool_id;
148 	uint8_t chip_id;
149 #ifdef RX_DESC_DEBUG_CHECK
150 	uint32_t magic;
151 	uint8_t *nbuf_data_addr;
152 	struct dp_rx_desc_dbg_info *dbg_info;
153 #endif
154 	uint8_t	in_use:1,
155 		unmapped:1,
156 		in_err_state:1;
157 };
158 
159 #ifndef QCA_HOST_MODE_WIFI_DISABLED
160 #ifdef ATH_RX_PRI_SAVE
161 #define DP_RX_TID_SAVE(_nbuf, _tid) \
162 	(qdf_nbuf_set_priority(_nbuf, _tid))
163 #else
164 #define DP_RX_TID_SAVE(_nbuf, _tid)
165 #endif
166 
167 /* RX Descriptor Multi Page memory alloc related */
168 #define DP_RX_DESC_OFFSET_NUM_BITS 8
169 #define DP_RX_DESC_PAGE_ID_NUM_BITS 8
170 #define DP_RX_DESC_POOL_ID_NUM_BITS 4
171 
172 #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS
173 #define DP_RX_DESC_POOL_ID_SHIFT \
174 		(DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS)
175 #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \
176 	(((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT)
177 #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK	\
178 			(((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \
179 			 DP_RX_DESC_PAGE_ID_SHIFT)
180 #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \
181 			((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1)
182 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie)		\
183 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >>	\
184 			DP_RX_DESC_POOL_ID_SHIFT)
185 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie)		\
186 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >>	\
187 			DP_RX_DESC_PAGE_ID_SHIFT)
188 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie)		\
189 	((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK)
190 
191 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
192 
193 #define RX_DESC_COOKIE_INDEX_SHIFT		0
194 #define RX_DESC_COOKIE_INDEX_MASK		0x3ffff /* 18 bits */
195 #define RX_DESC_COOKIE_POOL_ID_SHIFT		18
196 #define RX_DESC_COOKIE_POOL_ID_MASK		0x1c0000
197 
198 #define DP_RX_DESC_COOKIE_MAX	\
199 	(RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK)
200 
201 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie)		\
202 	(((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >>	\
203 			RX_DESC_COOKIE_POOL_ID_SHIFT)
204 
205 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie)		\
206 	(((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >>	\
207 			RX_DESC_COOKIE_INDEX_SHIFT)
208 
209 #define dp_rx_add_to_free_desc_list(head, tail, new) \
210 	__dp_rx_add_to_free_desc_list(head, tail, new, __func__)
211 
212 #define dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
213 				num_buffers, desc_list, tail, req_only) \
214 	__dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
215 				  num_buffers, desc_list, tail, req_only, \
216 				  __func__)
217 
218 #ifdef WLAN_SUPPORT_RX_FISA
219 /**
220  * dp_rx_set_hdr_pad() - set l3 padding in nbuf cb
221  * @nbuf: pkt skb pointer
222  * @l3_padding: l3 padding
223  *
224  * Return: None
225  */
226 static inline
227 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
228 {
229 	QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
230 }
231 #else
232 static inline
233 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
234 {
235 }
236 #endif
237 
238 #ifdef DP_RX_SPECIAL_FRAME_NEED
239 /**
240  * dp_rx_is_special_frame() - check is RX frame special needed
241  *
242  * @nbuf: RX skb pointer
243  * @frame_mask: the mask for special frame needed
244  *
245  * Check is RX frame wanted matched with mask
246  *
247  * Return: true - special frame needed, false - no
248  */
249 static inline
250 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
251 {
252 	if (((frame_mask & FRAME_MASK_IPV4_ARP) &&
253 	     qdf_nbuf_is_ipv4_arp_pkt(nbuf)) ||
254 	    ((frame_mask & FRAME_MASK_IPV4_DHCP) &&
255 	     qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) ||
256 	    ((frame_mask & FRAME_MASK_IPV4_EAPOL) &&
257 	     qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) ||
258 	    ((frame_mask & FRAME_MASK_IPV6_DHCP) &&
259 	     qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))
260 		return true;
261 
262 	return false;
263 }
264 
265 /**
266  * dp_rx_deliver_special_frame() - Deliver the RX special frame to stack
267  *				   if matches mask
268  *
269  * @soc: Datapath soc handler
270  * @peer: pointer to DP peer
271  * @nbuf: pointer to the skb of RX frame
272  * @frame_mask: the mask for special frame needed
273  * @rx_tlv_hdr: start of rx tlv header
274  *
275  * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
276  * single nbuf is expected.
277  *
278  * Return: true - nbuf has been delivered to stack, false - not.
279  */
280 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer,
281 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
282 				 uint8_t *rx_tlv_hdr);
283 #else
284 static inline
285 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
286 {
287 	return false;
288 }
289 
290 static inline
291 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer,
292 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
293 				 uint8_t *rx_tlv_hdr)
294 {
295 	return false;
296 }
297 #endif
298 
299 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
300 /**
301  * dp_rx_data_is_specific() - Used to exclude specific frames
302  *                            not practical for getting rx
303  *                            stats like rate, mcs, nss, etc.
304  *
305  * @hal_soc_hdl: soc handler
306  * @rx_tlv_hdr: rx tlv header
307  * @nbuf: RX skb pointer
308  *
309  * Return: true - a specific frame  not suitable
310  *                for getting rx stats from it.
311  *         false - a common frame suitable for
312  *                 getting rx stats from it.
313  */
314 static inline
315 bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl,
316 			    uint8_t *rx_tlv_hdr,
317 			    qdf_nbuf_t nbuf)
318 {
319 	if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf)))
320 		return true;
321 
322 	if (!hal_rx_tlv_first_mpdu_get(hal_soc_hdl, rx_tlv_hdr))
323 		return true;
324 
325 	if (!hal_rx_msdu_end_first_msdu_get(hal_soc_hdl, rx_tlv_hdr))
326 		return true;
327 
328 	/* ARP, EAPOL is neither IPV6 ETH nor IPV4 ETH from L3 level */
329 	if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) ==
330 	    QDF_NBUF_TRAC_IPV4_ETH_TYPE)) {
331 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
332 			return true;
333 	} else if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) ==
334 		   QDF_NBUF_TRAC_IPV6_ETH_TYPE)) {
335 		if (qdf_nbuf_is_ipv6_dhcp_pkt(nbuf))
336 			return true;
337 	} else {
338 		return true;
339 	}
340 	return false;
341 }
342 #else
343 static inline
344 bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl,
345 			    uint8_t *rx_tlv_hdr,
346 			    qdf_nbuf_t nbuf)
347 
348 {
349 	/*
350 	 * default return is true to make sure that rx stats
351 	 * will not be handled when this feature is disabled
352 	 */
353 	return true;
354 }
355 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */
356 
357 #ifndef QCA_HOST_MODE_WIFI_DISABLED
358 #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING
359 static inline
360 bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer,
361 				 qdf_nbuf_t nbuf)
362 {
363 	if (ta_txrx_peer->vdev->opmode == wlan_op_mode_ndi &&
364 	    qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) {
365 		DP_PEER_PER_PKT_STATS_INC(ta_txrx_peer,
366 					  rx.intra_bss.mdns_no_fwd, 1);
367 		return false;
368 	}
369 	return true;
370 }
371 #else
372 static inline
373 bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer,
374 				 qdf_nbuf_t nbuf)
375 {
376 	return true;
377 }
378 #endif
379 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
380 
381 /* DOC: Offset to obtain LLC hdr
382  *
383  * In the case of Wifi parse error
384  * to reach LLC header from beginning
385  * of VLAN tag we need to skip 8 bytes.
386  * Vlan_tag(4)+length(2)+length added
387  * by HW(2) = 8 bytes.
388  */
389 #define DP_SKIP_VLAN		8
390 
391 #ifndef QCA_HOST_MODE_WIFI_DISABLED
392 
393 /**
394  * struct dp_rx_cached_buf - rx cached buffer
395  * @node: linked list node
396  * @buf: skb buffer
397  */
398 struct dp_rx_cached_buf {
399 	qdf_list_node_t node;
400 	qdf_nbuf_t buf;
401 };
402 
403 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
404 
405 /**
406  * dp_rx_xor_block() - xor block of data
407  * @b: destination data block
408  * @a: source data block
409  * @len: length of the data to process
410  *
411  * Return: None
412  */
413 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
414 {
415 	qdf_size_t i;
416 
417 	for (i = 0; i < len; i++)
418 		b[i] ^= a[i];
419 }
420 
421 /**
422  * dp_rx_rotl() - rotate the bits left
423  * @val: unsigned integer input value
424  * @bits: number of bits
425  *
426  * Return: Integer with left rotated by number of 'bits'
427  */
428 static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
429 {
430 	return (val << bits) | (val >> (32 - bits));
431 }
432 
433 /**
434  * dp_rx_rotr() - rotate the bits right
435  * @val: unsigned integer input value
436  * @bits: number of bits
437  *
438  * Return: Integer with right rotated by number of 'bits'
439  */
440 static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
441 {
442 	return (val >> bits) | (val << (32 - bits));
443 }
444 
445 /**
446  * dp_set_rx_queue() - set queue_mapping in skb
447  * @nbuf: skb
448  * @queue_id: rx queue_id
449  *
450  * Return: void
451  */
452 #ifdef QCA_OL_RX_MULTIQ_SUPPORT
453 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
454 {
455 	qdf_nbuf_record_rx_queue(nbuf, queue_id);
456 	return;
457 }
458 #else
459 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
460 {
461 }
462 #endif
463 
464 /**
465  * dp_rx_xswap() - swap the bits left
466  * @val: unsigned integer input value
467  *
468  * Return: Integer with bits swapped
469  */
470 static inline uint32_t dp_rx_xswap(uint32_t val)
471 {
472 	return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
473 }
474 
475 /**
476  * dp_rx_get_le32_split() - get little endian 32 bits split
477  * @b0: byte 0
478  * @b1: byte 1
479  * @b2: byte 2
480  * @b3: byte 3
481  *
482  * Return: Integer with split little endian 32 bits
483  */
484 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
485 					uint8_t b3)
486 {
487 	return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
488 }
489 
490 /**
491  * dp_rx_get_le32() - get little endian 32 bits
492  * @p: source 32-bit value
493  *
494  * Return: Integer with little endian 32 bits
495  */
496 static inline uint32_t dp_rx_get_le32(const uint8_t *p)
497 {
498 	return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
499 }
500 
501 /**
502  * dp_rx_put_le32() - put little endian 32 bits
503  * @p: destination char array
504  * @v: source 32-bit integer
505  *
506  * Return: None
507  */
508 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
509 {
510 	p[0] = (v) & 0xff;
511 	p[1] = (v >> 8) & 0xff;
512 	p[2] = (v >> 16) & 0xff;
513 	p[3] = (v >> 24) & 0xff;
514 }
515 
516 /* Extract michal mic block of data */
517 #define dp_rx_michael_block(l, r)	\
518 	do {					\
519 		r ^= dp_rx_rotl(l, 17);	\
520 		l += r;				\
521 		r ^= dp_rx_xswap(l);		\
522 		l += r;				\
523 		r ^= dp_rx_rotl(l, 3);	\
524 		l += r;				\
525 		r ^= dp_rx_rotr(l, 2);	\
526 		l += r;				\
527 	} while (0)
528 
529 /**
530  * struct dp_rx_desc_list_elem_t
531  *
532  * @next: Next pointer to form free list
533  * @rx_desc: DP Rx descriptor
534  */
535 union dp_rx_desc_list_elem_t {
536 	union dp_rx_desc_list_elem_t *next;
537 	struct dp_rx_desc rx_desc;
538 };
539 
540 #ifdef RX_DESC_MULTI_PAGE_ALLOC
541 /**
542  * dp_rx_desc_find() - find dp rx descriptor from page ID and offset
543  * @page_id: Page ID
544  * @offset: Offset of the descriptor element
545  * @rx_pool: RX pool
546  *
547  * Return: RX descriptor element
548  */
549 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
550 					      struct rx_desc_pool *rx_pool);
551 
552 static inline
553 struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc,
554 					      struct rx_desc_pool *pool,
555 					      uint32_t cookie)
556 {
557 	uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
558 	uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
559 	uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
560 	struct rx_desc_pool *rx_desc_pool;
561 	union dp_rx_desc_list_elem_t *rx_desc_elem;
562 
563 	if (qdf_unlikely(pool_id >= MAX_PDEV_CNT))
564 		return NULL;
565 
566 	rx_desc_pool = &pool[pool_id];
567 	rx_desc_elem = (union dp_rx_desc_list_elem_t *)
568 		(rx_desc_pool->desc_pages.cacheable_pages[page_id] +
569 		rx_desc_pool->elem_size * offset);
570 
571 	return &rx_desc_elem->rx_desc;
572 }
573 
574 static inline
575 struct dp_rx_desc *dp_get_rx_mon_status_desc_from_cookie(struct dp_soc *soc,
576 							 struct rx_desc_pool *pool,
577 							 uint32_t cookie)
578 {
579 	uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
580 	uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
581 	uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
582 	struct rx_desc_pool *rx_desc_pool;
583 	union dp_rx_desc_list_elem_t *rx_desc_elem;
584 
585 	if (qdf_unlikely(pool_id >= NUM_RXDMA_RINGS_PER_PDEV))
586 		return NULL;
587 
588 	rx_desc_pool = &pool[pool_id];
589 	rx_desc_elem = (union dp_rx_desc_list_elem_t *)
590 		(rx_desc_pool->desc_pages.cacheable_pages[page_id] +
591 		rx_desc_pool->elem_size * offset);
592 
593 	return &rx_desc_elem->rx_desc;
594 }
595 
596 /**
597  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
598  *			 the Rx descriptor on Rx DMA source ring buffer
599  * @soc: core txrx main context
600  * @cookie: cookie used to lookup virtual address
601  *
602  * Return: Pointer to the Rx descriptor
603  */
604 static inline
605 struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc,
606 					       uint32_t cookie)
607 {
608 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie);
609 }
610 
611 /**
612  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
613  *			 the Rx descriptor on monitor ring buffer
614  * @soc: core txrx main context
615  * @cookie: cookie used to lookup virtual address
616  *
617  * Return: Pointer to the Rx descriptor
618  */
619 static inline
620 struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc,
621 					     uint32_t cookie)
622 {
623 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie);
624 }
625 
626 /**
627  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
628  *			 the Rx descriptor on monitor status ring buffer
629  * @soc: core txrx main context
630  * @cookie: cookie used to lookup virtual address
631  *
632  * Return: Pointer to the Rx descriptor
633  */
634 static inline
635 struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc,
636 						uint32_t cookie)
637 {
638 	return dp_get_rx_mon_status_desc_from_cookie(soc,
639 						     &soc->rx_desc_status[0],
640 						     cookie);
641 }
642 #else
643 
644 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
645 			  uint32_t pool_size,
646 			  struct rx_desc_pool *rx_desc_pool);
647 
648 /**
649  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
650  *			 the Rx descriptor on Rx DMA source ring buffer
651  * @soc: core txrx main context
652  * @cookie: cookie used to lookup virtual address
653  *
654  * Return: void *: Virtual Address of the Rx descriptor
655  */
656 static inline
657 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
658 {
659 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
660 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
661 	struct rx_desc_pool *rx_desc_pool;
662 
663 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
664 		return NULL;
665 
666 	rx_desc_pool = &soc->rx_desc_buf[pool_id];
667 
668 	if (qdf_unlikely(index >= rx_desc_pool->pool_size))
669 		return NULL;
670 
671 	return &rx_desc_pool->array[index].rx_desc;
672 }
673 
674 /**
675  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
676  *			 the Rx descriptor on monitor ring buffer
677  * @soc: core txrx main context
678  * @cookie: cookie used to lookup virtual address
679  *
680  * Return: void *: Virtual Address of the Rx descriptor
681  */
682 static inline
683 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
684 {
685 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
686 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
687 	/* TODO */
688 	/* Add sanity for pool_id & index */
689 	return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
690 }
691 
692 /**
693  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
694  *			 the Rx descriptor on monitor status ring buffer
695  * @soc: core txrx main context
696  * @cookie: cookie used to lookup virtual address
697  *
698  * Return: void *: Virtual Address of the Rx descriptor
699  */
700 static inline
701 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
702 {
703 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
704 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
705 	/* TODO */
706 	/* Add sanity for pool_id & index */
707 	return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
708 }
709 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
710 
711 #ifndef QCA_HOST_MODE_WIFI_DISABLED
712 
713 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
714 {
715 	return vdev->ap_bridge_enabled;
716 }
717 
718 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
719 static inline QDF_STATUS
720 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
721 {
722 	if (qdf_unlikely(HAL_RX_REO_BUF_COOKIE_INVALID_GET(ring_desc)))
723 		return QDF_STATUS_E_FAILURE;
724 
725 	HAL_RX_REO_BUF_COOKIE_INVALID_SET(ring_desc);
726 	return QDF_STATUS_SUCCESS;
727 }
728 
729 /**
730  * dp_rx_cookie_reset_invalid_bit() - Reset the invalid bit of the cookie
731  *  field in ring descriptor
732  * @ring_desc: ring descriptor
733  *
734  * Return: None
735  */
736 static inline void
737 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc)
738 {
739 	HAL_RX_REO_BUF_COOKIE_INVALID_RESET(ring_desc);
740 }
741 #else
742 static inline QDF_STATUS
743 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
744 {
745 	return QDF_STATUS_SUCCESS;
746 }
747 
748 static inline void
749 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc)
750 {
751 }
752 #endif
753 
754 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
755 
756 #if defined(RX_DESC_MULTI_PAGE_ALLOC) && \
757 	defined(DP_WAR_VALIDATE_RX_ERR_MSDU_COOKIE)
758 /**
759  * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid
760  * @soc: dp soc ref
761  * @cookie: Rx buf SW cookie value
762  *
763  * Return: true if cookie is valid else false
764  */
765 static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc,
766 					    uint32_t cookie)
767 {
768 	uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
769 	uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
770 	uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
771 	struct rx_desc_pool *rx_desc_pool;
772 
773 	if (qdf_unlikely(pool_id >= MAX_PDEV_CNT))
774 		goto fail;
775 
776 	rx_desc_pool = &soc->rx_desc_buf[pool_id];
777 
778 	if (page_id >= rx_desc_pool->desc_pages.num_pages ||
779 	    offset >= rx_desc_pool->desc_pages.num_element_per_page)
780 		goto fail;
781 
782 	return true;
783 
784 fail:
785 	DP_STATS_INC(soc, rx.err.invalid_cookie, 1);
786 	return false;
787 }
788 #else
789 /**
790  * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid
791  * @soc: dp soc ref
792  * @cookie: Rx buf SW cookie value
793  *
794  * When multi page alloc is disabled SW cookie validness is
795  * checked while fetching Rx descriptor, so no need to check here
796  *
797  * Return: true if cookie is valid else false
798  */
799 static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc,
800 					    uint32_t cookie)
801 {
802 	return true;
803 }
804 #endif
805 
806 /**
807  * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
808  *					rx descriptor pool
809  * @rx_desc_pool: rx descriptor pool pointer
810  *
811  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
812  *		       QDF_STATUS_E_NOMEM
813  */
814 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool);
815 
816 /**
817  * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
818  *			     descriptors
819  * @soc: core txrx main context
820  * @pool_size: number of rx descriptors (size of the pool)
821  * @rx_desc_pool: rx descriptor pool pointer
822  *
823  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
824  *		       QDF_STATUS_E_NOMEM
825  *		       QDF_STATUS_E_FAULT
826  */
827 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
828 				 uint32_t pool_size,
829 				 struct rx_desc_pool *rx_desc_pool);
830 
831 /**
832  * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
833  * @soc: core txrx main context
834  * @pool_id: pool_id which is one of 3 mac_ids
835  * @pool_size: size of the rx descriptor pool
836  * @rx_desc_pool: rx descriptor pool pointer
837  *
838  * Convert the pool of memory into a list of rx descriptors and create
839  * locks to access this list of rx descriptors.
840  *
841  */
842 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
843 			  uint32_t pool_size,
844 			  struct rx_desc_pool *rx_desc_pool);
845 
846 /**
847  * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
848  *					freelist.
849  * @soc: core txrx main context
850  * @local_desc_list: local desc list provided by the caller
851  * @tail: attach the point to last desc of local desc list
852  * @pool_id: pool_id which is one of 3 mac_ids
853  * @rx_desc_pool: rx descriptor pool pointer
854  */
855 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
856 				union dp_rx_desc_list_elem_t **local_desc_list,
857 				union dp_rx_desc_list_elem_t **tail,
858 				uint16_t pool_id,
859 				struct rx_desc_pool *rx_desc_pool);
860 
861 /**
862  * dp_rx_get_free_desc_list() - provide a list of descriptors from
863  *				the free rx desc pool.
864  * @soc: core txrx main context
865  * @pool_id: pool_id which is one of 3 mac_ids
866  * @rx_desc_pool: rx descriptor pool pointer
867  * @num_descs: number of descs requested from freelist
868  * @desc_list: attach the descs to this list (output parameter)
869  * @tail: attach the point to last desc of free list (output parameter)
870  *
871  * Return: number of descs allocated from free list.
872  */
873 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
874 				struct rx_desc_pool *rx_desc_pool,
875 				uint16_t num_descs,
876 				union dp_rx_desc_list_elem_t **desc_list,
877 				union dp_rx_desc_list_elem_t **tail);
878 
879 /**
880  * dp_rx_pdev_desc_pool_alloc() -  allocate memory for software rx descriptor
881  *				   pool
882  * @pdev: core txrx pdev context
883  *
884  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
885  *			QDF_STATUS_E_NOMEM
886  */
887 QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev);
888 
889 /**
890  * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool
891  * @pdev: core txrx pdev context
892  */
893 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev);
894 
895 /**
896  * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors
897  * @pdev: core txrx pdev context
898  *
899  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
900  *			QDF_STATUS_E_NOMEM
901  */
902 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev);
903 
904 /**
905  * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools
906  * @pdev: core txrx pdev context
907  *
908  * This function resets the freelist of rx descriptors and destroys locks
909  * associated with this list of descriptors.
910  */
911 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev);
912 
913 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
914 			    struct rx_desc_pool *rx_desc_pool,
915 			    uint32_t pool_id);
916 
917 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
918 
919 /**
920  * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring
921  * @pdev: core txrx pdev context
922  *
923  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
924  *			QDF_STATUS_E_NOMEM
925  */
926 QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev);
927 
928 /**
929  * dp_rx_pdev_buffers_free() - Free nbufs (skbs)
930  * @pdev: core txrx pdev context
931  */
932 void dp_rx_pdev_buffers_free(struct dp_pdev *pdev);
933 
934 void dp_rx_pdev_detach(struct dp_pdev *pdev);
935 
936 /**
937  * dp_print_napi_stats() - NAPI stats
938  * @soc: soc handle
939  */
940 void dp_print_napi_stats(struct dp_soc *soc);
941 
942 /**
943  * dp_rx_vdev_detach() - detach vdev from dp rx
944  * @vdev: virtual device instance
945  *
946  * Return: QDF_STATUS_SUCCESS: success
947  *         QDF_STATUS_E_RESOURCES: Error return
948  */
949 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev);
950 
951 #ifndef QCA_HOST_MODE_WIFI_DISABLED
952 
953 uint32_t
954 dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
955 	      uint8_t reo_ring_num,
956 	      uint32_t quota);
957 
958 /**
959  * dp_rx_err_process() - Processes error frames routed to REO error ring
960  * @int_ctx: pointer to DP interrupt context
961  * @soc: core txrx main context
962  * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be serviced
963  * @quota: No. of units (packets) that can be serviced in one shot.
964  *
965  * This function implements error processing and top level demultiplexer
966  * for all the frames routed to REO error ring.
967  *
968  * Return: uint32_t: No. of elements processed
969  */
970 uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
971 			   hal_ring_handle_t hal_ring_hdl, uint32_t quota);
972 
973 /**
974  * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
975  * @int_ctx: pointer to DP interrupt context
976  * @soc: core txrx main context
977  * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be
978  *                serviced
979  * @quota: No. of units (packets) that can be serviced in one shot.
980  *
981  * This function implements error processing and top level demultiplexer
982  * for all the frames routed to WBM2HOST sw release ring.
983  *
984  * Return: uint32_t: No. of elements processed
985  */
986 uint32_t
987 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
988 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota);
989 
990 /**
991  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
992  *		     multiple nbufs.
993  * @soc: core txrx main context
994  * @nbuf: pointer to the first msdu of an amsdu.
995  *
996  * This function implements the creation of RX frag_list for cases
997  * where an MSDU is spread across multiple nbufs.
998  *
999  * Return: returns the head nbuf which contains complete frag_list.
1000  */
1001 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf);
1002 
1003 /**
1004  * dp_rx_is_sg_supported() - SG packets processing supported or not.
1005  *
1006  * Return: returns true when processing is supported else false.
1007  */
1008 bool dp_rx_is_sg_supported(void);
1009 
1010 /**
1011  * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during
1012  *				     de-initialization of wifi module.
1013  *
1014  * @soc: core txrx main context
1015  * @pool_id: pool_id which is one of 3 mac_ids
1016  * @rx_desc_pool: rx descriptor pool pointer
1017  *
1018  * Return: None
1019  */
1020 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
1021 				   struct rx_desc_pool *rx_desc_pool);
1022 
1023 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1024 
1025 /**
1026  * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during
1027  *			    de-initialization of wifi module.
1028  *
1029  * @soc: core txrx main context
1030  * @rx_desc_pool: rx descriptor pool pointer
1031  * @is_mon_pool: true if this is a monitor pool
1032  *
1033  * Return: None
1034  */
1035 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
1036 			  struct rx_desc_pool *rx_desc_pool,
1037 			  bool is_mon_pool);
1038 
1039 #ifdef DP_RX_MON_MEM_FRAG
1040 /**
1041  * dp_rx_desc_frag_free() - free the sw rx desc frag called during
1042  *			    de-initialization of wifi module.
1043  *
1044  * @soc: core txrx main context
1045  * @rx_desc_pool: rx descriptor pool pointer
1046  *
1047  * Return: None
1048  */
1049 void dp_rx_desc_frag_free(struct dp_soc *soc,
1050 			  struct rx_desc_pool *rx_desc_pool);
1051 #else
1052 static inline
1053 void dp_rx_desc_frag_free(struct dp_soc *soc,
1054 			  struct rx_desc_pool *rx_desc_pool)
1055 {
1056 }
1057 #endif
1058 /**
1059  * dp_rx_desc_pool_free() - free the sw rx desc array called during
1060  *			    de-initialization of wifi module.
1061  *
1062  * @soc: core txrx main context
1063  * @rx_desc_pool: rx descriptor pool pointer
1064  *
1065  * Return: None
1066  */
1067 void dp_rx_desc_pool_free(struct dp_soc *soc,
1068 			  struct rx_desc_pool *rx_desc_pool);
1069 
1070 /**
1071  * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
1072  *				pkts to RAW mode simulation to
1073  *				decapsulate the pkt.
1074  * @vdev: vdev on which RAW mode is enabled
1075  * @nbuf_list: list of RAW pkts to process
1076  * @txrx_peer: peer object from which the pkt is rx
1077  *
1078  * Return: void
1079  */
1080 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
1081 				struct dp_txrx_peer *txrx_peer);
1082 
1083 #ifdef RX_DESC_LOGGING
1084 /**
1085  * dp_rx_desc_alloc_dbg_info() - Alloc memory for rx descriptor debug
1086  *  structure
1087  * @rx_desc: rx descriptor pointer
1088  *
1089  * Return: None
1090  */
1091 static inline
1092 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
1093 {
1094 	rx_desc->dbg_info = qdf_mem_malloc(sizeof(struct dp_rx_desc_dbg_info));
1095 }
1096 
1097 /**
1098  * dp_rx_desc_free_dbg_info() - Free rx descriptor debug
1099  *  structure memory
1100  * @rx_desc: rx descriptor pointer
1101  *
1102  * Return: None
1103  */
1104 static inline
1105 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
1106 {
1107 	qdf_mem_free(rx_desc->dbg_info);
1108 }
1109 
1110 /**
1111  * dp_rx_desc_update_dbg_info() - Update rx descriptor debug info
1112  *  structure memory
1113  * @rx_desc: rx descriptor pointer
1114  * @func_name: name of calling function
1115  * @flag:
1116  *
1117  * Return: None
1118  */
1119 static
1120 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
1121 				const char *func_name, uint8_t flag)
1122 {
1123 	struct dp_rx_desc_dbg_info *info = rx_desc->dbg_info;
1124 
1125 	if (!info)
1126 		return;
1127 
1128 	if (flag == RX_DESC_REPLENISHED) {
1129 		qdf_str_lcopy(info->replenish_caller, func_name,
1130 			      QDF_MEM_FUNC_NAME_SIZE);
1131 		info->replenish_ts = qdf_get_log_timestamp();
1132 	} else {
1133 		qdf_str_lcopy(info->freelist_caller, func_name,
1134 			      QDF_MEM_FUNC_NAME_SIZE);
1135 		info->freelist_ts = qdf_get_log_timestamp();
1136 		info->prev_nbuf = rx_desc->nbuf;
1137 		info->prev_nbuf_data_addr = rx_desc->nbuf_data_addr;
1138 		rx_desc->nbuf_data_addr = NULL;
1139 	}
1140 }
1141 #else
1142 
1143 static inline
1144 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
1145 {
1146 }
1147 
1148 static inline
1149 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
1150 {
1151 }
1152 
1153 static inline
1154 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
1155 				const char *func_name, uint8_t flag)
1156 {
1157 }
1158 #endif /* RX_DESC_LOGGING */
1159 
1160 /**
1161  * __dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
1162  *
1163  * @head: pointer to the head of local free list
1164  * @tail: pointer to the tail of local free list
1165  * @new: new descriptor that is added to the free list
1166  * @func_name: caller func name
1167  *
1168  * Return: void:
1169  */
1170 static inline
1171 void __dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
1172 				 union dp_rx_desc_list_elem_t **tail,
1173 				 struct dp_rx_desc *new, const char *func_name)
1174 {
1175 	qdf_assert(head && new);
1176 
1177 	dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST);
1178 
1179 	new->nbuf = NULL;
1180 	new->in_use = 0;
1181 
1182 	((union dp_rx_desc_list_elem_t *)new)->next = *head;
1183 	*head = (union dp_rx_desc_list_elem_t *)new;
1184 	/* reset tail if head->next is NULL */
1185 	if (!*tail || !(*head)->next)
1186 		*tail = *head;
1187 }
1188 
1189 /**
1190  * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
1191  * @soc: DP SOC handle
1192  * @nbuf: network buffer
1193  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
1194  * pool_id has same mapping)
1195  *
1196  * Return: integer type
1197  */
1198 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
1199 				   uint8_t mac_id);
1200 
1201 /**
1202  * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
1203  * @soc: DP SOC handle
1204  * @mpdu: mpdu for which peer is invalid
1205  * @mpdu_done: if an mpdu is completed
1206  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
1207  * pool_id has same mapping)
1208  *
1209  * Return: integer type
1210  */
1211 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
1212 		qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id);
1213 
1214 /**
1215  * dp_rx_process_mic_error(): Function to pass mic error indication to umac
1216  * @soc: core DP main context
1217  * @nbuf: buffer pointer
1218  * @rx_tlv_hdr: start of rx tlv header
1219  * @txrx_peer: txrx peer handle
1220  *
1221  * Return: void
1222  */
1223 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
1224 			     uint8_t *rx_tlv_hdr,
1225 			     struct dp_txrx_peer *txrx_peer);
1226 
1227 /**
1228  * dp_2k_jump_handle() - Function to handle 2k jump exception
1229  *                        on WBM ring
1230  * @soc: core DP main context
1231  * @nbuf: buffer pointer
1232  * @rx_tlv_hdr: start of rx tlv header
1233  * @peer_id: peer id of first msdu
1234  * @tid: Tid for which exception occurred
1235  *
1236  * This function handles 2k jump violations arising out
1237  * of receiving aggregates in non BA case. This typically
1238  * may happen if aggregates are received on a QOS enabled TID
1239  * while Rx window size is still initialized to value of 2. Or
1240  * it may also happen if negotiated window size is 1 but peer
1241  * sends aggregates.
1242  */
1243 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
1244 		       uint16_t peer_id, uint8_t tid);
1245 
1246 #define DP_RX_HEAD_APPEND(head, elem) \
1247 	do {                                                            \
1248 		qdf_nbuf_set_next((elem), (head));			\
1249 		(head) = (elem);                                        \
1250 	} while (0)
1251 
1252 
1253 #define DP_RX_LIST_APPEND(head, tail, elem) \
1254 	do {                                                          \
1255 		if (!(head)) {                                        \
1256 			(head) = (elem);                              \
1257 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\
1258 		} else {                                              \
1259 			qdf_nbuf_set_next((tail), (elem));            \
1260 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++;  \
1261 		}                                                     \
1262 		(tail) = (elem);                                      \
1263 		qdf_nbuf_set_next((tail), NULL);                      \
1264 	} while (0)
1265 
1266 #define DP_RX_MERGE_TWO_LIST(phead, ptail, chead, ctail) \
1267 	do {                                                          \
1268 		if (!(phead)) {                                       \
1269 			(phead) = (chead);                            \
1270 		} else {                                              \
1271 			qdf_nbuf_set_next((ptail), (chead));          \
1272 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(phead) += \
1273 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(chead);   \
1274 		}                                                     \
1275 		(ptail) = (ctail);                                    \
1276 		qdf_nbuf_set_next((ptail), NULL);                     \
1277 	} while (0)
1278 
1279 #if defined(QCA_PADDR_CHECK_ON_3TH_PLATFORM)
1280 /*
1281  * on some third-party platform, the memory below 0x2000
1282  * is reserved for target use, so any memory allocated in this
1283  * region should not be used by host
1284  */
1285 #define MAX_RETRY 50
1286 #define DP_PHY_ADDR_RESERVED	0x2000
1287 #elif defined(BUILD_X86)
1288 /*
1289  * in M2M emulation platforms (x86) the memory below 0x50000000
1290  * is reserved for target use, so any memory allocated in this
1291  * region should not be used by host
1292  */
1293 #define MAX_RETRY 100
1294 #define DP_PHY_ADDR_RESERVED	0x50000000
1295 #endif
1296 
1297 #if defined(QCA_PADDR_CHECK_ON_3TH_PLATFORM) || defined(BUILD_X86)
1298 /**
1299  * dp_check_paddr() - check if current phy address is valid or not
1300  * @dp_soc: core txrx main context
1301  * @rx_netbuf: skb buffer
1302  * @paddr: physical address
1303  * @rx_desc_pool: struct of rx descriptor pool
1304  * check if the physical address of the nbuf->data is less
1305  * than DP_PHY_ADDR_RESERVED then free the nbuf and try
1306  * allocating new nbuf. We can try for 100 times.
1307  *
1308  * This is a temp WAR till we fix it properly.
1309  *
1310  * Return: success or failure.
1311  */
1312 static inline
1313 int dp_check_paddr(struct dp_soc *dp_soc,
1314 		   qdf_nbuf_t *rx_netbuf,
1315 		   qdf_dma_addr_t *paddr,
1316 		   struct rx_desc_pool *rx_desc_pool)
1317 {
1318 	uint32_t nbuf_retry = 0;
1319 	int32_t ret;
1320 
1321 	if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED))
1322 		return QDF_STATUS_SUCCESS;
1323 
1324 	do {
1325 		dp_debug("invalid phy addr 0x%llx, trying again",
1326 			 (uint64_t)(*paddr));
1327 		nbuf_retry++;
1328 		if ((*rx_netbuf)) {
1329 			/* Not freeing buffer intentionally.
1330 			 * Observed that same buffer is getting
1331 			 * re-allocated resulting in longer load time
1332 			 * WMI init timeout.
1333 			 * This buffer is anyway not useful so skip it.
1334 			 *.Add such buffer to invalid list and free
1335 			 *.them when driver unload.
1336 			 **/
1337 			qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
1338 						     *rx_netbuf,
1339 						     QDF_DMA_FROM_DEVICE,
1340 						     rx_desc_pool->buf_size);
1341 			qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue,
1342 					   *rx_netbuf);
1343 		}
1344 
1345 		*rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
1346 					    rx_desc_pool->buf_size,
1347 					    RX_BUFFER_RESERVATION,
1348 					    rx_desc_pool->buf_alignment,
1349 					    FALSE);
1350 
1351 		if (qdf_unlikely(!(*rx_netbuf)))
1352 			return QDF_STATUS_E_FAILURE;
1353 
1354 		ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
1355 						 *rx_netbuf,
1356 						 QDF_DMA_FROM_DEVICE,
1357 						 rx_desc_pool->buf_size);
1358 
1359 		if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
1360 			qdf_nbuf_free(*rx_netbuf);
1361 			*rx_netbuf = NULL;
1362 			continue;
1363 		}
1364 
1365 		*paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
1366 
1367 		if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED))
1368 			return QDF_STATUS_SUCCESS;
1369 
1370 	} while (nbuf_retry < MAX_RETRY);
1371 
1372 	if ((*rx_netbuf)) {
1373 		qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
1374 					     *rx_netbuf,
1375 					     QDF_DMA_FROM_DEVICE,
1376 					     rx_desc_pool->buf_size);
1377 		qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue,
1378 				   *rx_netbuf);
1379 	}
1380 
1381 	return QDF_STATUS_E_FAILURE;
1382 }
1383 
1384 #else
1385 static inline
1386 int dp_check_paddr(struct dp_soc *dp_soc,
1387 		   qdf_nbuf_t *rx_netbuf,
1388 		   qdf_dma_addr_t *paddr,
1389 		   struct rx_desc_pool *rx_desc_pool)
1390 {
1391 	return QDF_STATUS_SUCCESS;
1392 }
1393 
1394 #endif
1395 
1396 /**
1397  * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
1398  *				   the MSDU Link Descriptor
1399  * @soc: core txrx main context
1400  * @buf_info: buf_info includes cookie that is used to lookup
1401  * virtual address of link descriptor after deriving the page id
1402  * and the offset or index of the desc on the associatde page.
1403  *
1404  * This is the VA of the link descriptor, that HAL layer later uses to
1405  * retrieve the list of MSDU's for a given MPDU.
1406  *
1407  * Return: void *: Virtual Address of the Rx descriptor
1408  */
1409 static inline
1410 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
1411 				  struct hal_buf_info *buf_info)
1412 {
1413 	void *link_desc_va;
1414 	struct qdf_mem_multi_page_t *pages;
1415 	uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie);
1416 
1417 	pages = &soc->link_desc_pages;
1418 	if (!pages)
1419 		return NULL;
1420 	if (qdf_unlikely(page_id >= pages->num_pages))
1421 		return NULL;
1422 	link_desc_va = pages->dma_pages[page_id].page_v_addr_start +
1423 		(buf_info->paddr - pages->dma_pages[page_id].page_p_addr);
1424 	return link_desc_va;
1425 }
1426 
1427 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1428 #ifdef DISABLE_EAPOL_INTRABSS_FWD
1429 #ifdef WLAN_FEATURE_11BE_MLO
1430 static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev,
1431 						qdf_nbuf_t nbuf)
1432 {
1433 	struct qdf_mac_addr *self_mld_mac_addr =
1434 				(struct qdf_mac_addr *)vdev->mld_mac_addr.raw;
1435 	return qdf_is_macaddr_equal(self_mld_mac_addr,
1436 				    (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) +
1437 				    QDF_NBUF_DEST_MAC_OFFSET);
1438 }
1439 #else
1440 static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev,
1441 						qdf_nbuf_t nbuf)
1442 {
1443 	return false;
1444 }
1445 #endif
1446 
1447 static inline bool dp_nbuf_dst_addr_is_self_addr(struct dp_vdev *vdev,
1448 						 qdf_nbuf_t nbuf)
1449 {
1450 	return qdf_is_macaddr_equal((struct qdf_mac_addr *)vdev->mac_addr.raw,
1451 				    (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) +
1452 				    QDF_NBUF_DEST_MAC_OFFSET);
1453 }
1454 
1455 /**
1456  * dp_rx_intrabss_eapol_drop_check() - API For EAPOL
1457  *  pkt with DA not equal to vdev mac addr, fwd is not allowed.
1458  * @soc: core txrx main context
1459  * @ta_txrx_peer: source peer entry
1460  * @rx_tlv_hdr: start address of rx tlvs
1461  * @nbuf: nbuf that has to be intrabss forwarded
1462  *
1463  * Return: true if it is forwarded else false
1464  */
1465 static inline
1466 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc,
1467 				     struct dp_txrx_peer *ta_txrx_peer,
1468 				     uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf)
1469 {
1470 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) &&
1471 			 !(dp_nbuf_dst_addr_is_self_addr(ta_txrx_peer->vdev,
1472 							 nbuf) ||
1473 			   dp_nbuf_dst_addr_is_mld_addr(ta_txrx_peer->vdev,
1474 							nbuf)))) {
1475 		qdf_nbuf_free(nbuf);
1476 		DP_STATS_INC(soc, rx.err.intrabss_eapol_drop, 1);
1477 		return true;
1478 	}
1479 
1480 	return false;
1481 }
1482 #else /* DISABLE_EAPOL_INTRABSS_FWD */
1483 
1484 static inline
1485 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc,
1486 				     struct dp_txrx_peer *ta_txrx_peer,
1487 				     uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf)
1488 {
1489 	return false;
1490 }
1491 #endif /* DISABLE_EAPOL_INTRABSS_FWD */
1492 
1493 /**
1494  * dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets
1495  * @soc: core txrx main context
1496  * @ta_peer: source peer entry
1497  * @rx_tlv_hdr: start address of rx tlvs
1498  * @nbuf: nbuf that has to be intrabss forwarded
1499  * @tid_stats: tid stats pointer
1500  *
1501  * Return: bool: true if it is forwarded else false
1502  */
1503 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc,
1504 			     struct dp_txrx_peer *ta_peer,
1505 			     uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
1506 			     struct cdp_tid_rx_stats *tid_stats);
1507 
1508 /**
1509  * dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets
1510  * @soc: core txrx main context
1511  * @ta_peer: source peer entry
1512  * @tx_vdev_id: VDEV ID for Intra-BSS TX
1513  * @rx_tlv_hdr: start address of rx tlvs
1514  * @nbuf: nbuf that has to be intrabss forwarded
1515  * @tid_stats: tid stats pointer
1516  *
1517  * Return: bool: true if it is forwarded else false
1518  */
1519 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc,
1520 			      struct dp_txrx_peer *ta_peer,
1521 			      uint8_t tx_vdev_id,
1522 			      uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
1523 			      struct cdp_tid_rx_stats *tid_stats);
1524 
1525 /**
1526  * dp_rx_defrag_concat() - Concatenate the fragments
1527  *
1528  * @dst: destination pointer to the buffer
1529  * @src: source pointer from where the fragment payload is to be copied
1530  *
1531  * Return: QDF_STATUS
1532  */
1533 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
1534 {
1535 	/*
1536 	 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
1537 	 * to provide space for src, the headroom portion is copied from
1538 	 * the original dst buffer to the larger new dst buffer.
1539 	 * (This is needed, because the headroom of the dst buffer
1540 	 * contains the rx desc.)
1541 	 */
1542 	if (!qdf_nbuf_cat(dst, src)) {
1543 		/*
1544 		 * qdf_nbuf_cat does not free the src memory.
1545 		 * Free src nbuf before returning
1546 		 * For failure case the caller takes of freeing the nbuf
1547 		 */
1548 		qdf_nbuf_free(src);
1549 		return QDF_STATUS_SUCCESS;
1550 	}
1551 
1552 	return QDF_STATUS_E_DEFRAG_ERROR;
1553 }
1554 
1555 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1556 
1557 #ifndef FEATURE_WDS
1558 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
1559 		    struct dp_txrx_peer *ta_txrx_peer, qdf_nbuf_t nbuf);
1560 
1561 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
1562 {
1563 	return QDF_STATUS_SUCCESS;
1564 }
1565 
1566 static inline void
1567 dp_rx_wds_srcport_learn(struct dp_soc *soc,
1568 			uint8_t *rx_tlv_hdr,
1569 			struct dp_txrx_peer *txrx_peer,
1570 			qdf_nbuf_t nbuf,
1571 			struct hal_rx_msdu_metadata msdu_metadata)
1572 {
1573 }
1574 
1575 static inline void
1576 dp_rx_ipa_wds_srcport_learn(struct dp_soc *soc,
1577 			    struct dp_peer *ta_peer, qdf_nbuf_t nbuf,
1578 			    struct hal_rx_msdu_metadata msdu_end_info,
1579 			    bool ad4_valid, bool chfrag_start)
1580 {
1581 }
1582 #endif
1583 
1584 /**
1585  * dp_rx_desc_dump() - dump the sw rx descriptor
1586  *
1587  * @rx_desc: sw rx descriptor
1588  */
1589 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc)
1590 {
1591 	dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d",
1592 		rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id,
1593 		rx_desc->in_use, rx_desc->unmapped);
1594 }
1595 
1596 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1597 
1598 /**
1599  * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
1600  *					In qwrap mode, packets originated from
1601  *					any vdev should not loopback and
1602  *					should be dropped.
1603  * @vdev: vdev on which rx packet is received
1604  * @nbuf: rx pkt
1605  *
1606  */
1607 #if ATH_SUPPORT_WRAP
1608 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1609 						qdf_nbuf_t nbuf)
1610 {
1611 	struct dp_vdev *psta_vdev;
1612 	struct dp_pdev *pdev = vdev->pdev;
1613 	uint8_t *data = qdf_nbuf_data(nbuf);
1614 
1615 	if (qdf_unlikely(vdev->proxysta_vdev)) {
1616 		/* In qwrap isolation mode, allow loopback packets as all
1617 		 * packets go to RootAP and Loopback on the mpsta.
1618 		 */
1619 		if (vdev->isolation_vdev)
1620 			return false;
1621 		TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
1622 			if (qdf_unlikely(psta_vdev->proxysta_vdev &&
1623 					 !qdf_mem_cmp(psta_vdev->mac_addr.raw,
1624 						      &data[QDF_MAC_ADDR_SIZE],
1625 						      QDF_MAC_ADDR_SIZE))) {
1626 				/* Drop packet if source address is equal to
1627 				 * any of the vdev addresses.
1628 				 */
1629 				return true;
1630 			}
1631 		}
1632 	}
1633 	return false;
1634 }
1635 #else
1636 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1637 						qdf_nbuf_t nbuf)
1638 {
1639 	return false;
1640 }
1641 #endif
1642 
1643 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1644 
1645 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
1646 	defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\
1647 	defined(WLAN_SUPPORT_RX_FLOW_TAG)
1648 #include "dp_rx_tag.h"
1649 #endif
1650 
1651 #if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\
1652 	!defined(WLAN_SUPPORT_RX_FLOW_TAG)
1653 /**
1654  * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV
1655  *                              and set the corresponding tag in QDF packet
1656  * @soc: core txrx main context
1657  * @vdev: vdev on which the packet is received
1658  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1659  * @rx_tlv_hdr: rBbase address where the RX TLVs starts
1660  * @ring_index: REO ring number, not used for error & monitor ring
1661  * @is_reo_exception: flag to indicate if rx from REO ring or exception ring
1662  * @is_update_stats: flag to indicate whether to update stats or not
1663  *
1664  * Return: void
1665  */
1666 static inline void
1667 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1668 			  qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
1669 			  uint16_t ring_index,
1670 			  bool is_reo_exception, bool is_update_stats)
1671 {
1672 }
1673 #endif
1674 
1675 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
1676 /**
1677  * dp_rx_err_cce_drop() - Reads CCE metadata from the RX MSDU end TLV
1678  *                        and returns whether cce metadata matches
1679  * @soc: core txrx main context
1680  * @vdev: vdev on which the packet is received
1681  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1682  * @rx_tlv_hdr: rBbase address where the RX TLVs starts
1683  *
1684  * Return: bool
1685  */
1686 static inline bool
1687 dp_rx_err_cce_drop(struct dp_soc *soc, struct dp_vdev *vdev,
1688 		   qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
1689 {
1690 	return false;
1691 }
1692 
1693 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
1694 
1695 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
1696 /**
1697  * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV
1698  *                           and set the corresponding tag in QDF packet
1699  * @soc: core txrx main context
1700  * @vdev: vdev on which the packet is received
1701  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1702  * @rx_tlv_hdr: base address where the RX TLVs starts
1703  * @update_stats: flag to indicate whether to update stats or not
1704  *
1705  * Return: void
1706  */
1707 static inline void
1708 dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1709 		      qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats)
1710 {
1711 }
1712 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
1713 
1714 #define CRITICAL_BUFFER_THRESHOLD	64
1715 /**
1716  * __dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
1717  *			       called during dp rx initialization
1718  *			       and at the end of dp_rx_process.
1719  *
1720  * @dp_soc: core txrx main context
1721  * @mac_id: mac_id which is one of 3 mac_ids
1722  * @dp_rxdma_srng: dp rxdma circular ring
1723  * @rx_desc_pool: Pointer to free Rx descriptor pool
1724  * @num_req_buffers: number of buffer to be replenished
1725  * @desc_list: list of descs if called from dp_rx_process
1726  *	       or NULL during dp rx initialization or out of buffer
1727  *	       interrupt.
1728  * @tail: tail of descs list
1729  * @req_only: If true don't replenish more than req buffers
1730  * @func_name: name of the caller function
1731  *
1732  * Return: return success or failure
1733  */
1734 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1735 				 struct dp_srng *dp_rxdma_srng,
1736 				 struct rx_desc_pool *rx_desc_pool,
1737 				 uint32_t num_req_buffers,
1738 				 union dp_rx_desc_list_elem_t **desc_list,
1739 				 union dp_rx_desc_list_elem_t **tail,
1740 				 bool req_only,
1741 				 const char *func_name);
1742 
1743 /**
1744  * __dp_rx_buffers_no_map_replenish() - replenish rxdma ring with rx nbufs
1745  *					use direct APIs to get invalidate
1746  *					and get the physical address of the
1747  *					nbuf instead of map api,called during
1748  *					dp rx initialization and at the end
1749  *					of dp_rx_process.
1750  *
1751  * @dp_soc: core txrx main context
1752  * @mac_id: mac_id which is one of 3 mac_ids
1753  * @dp_rxdma_srng: dp rxdma circular ring
1754  * @rx_desc_pool: Pointer to free Rx descriptor pool
1755  * @num_req_buffers: number of buffer to be replenished
1756  * @desc_list: list of descs if called from dp_rx_process
1757  *	       or NULL during dp rx initialization or out of buffer
1758  *	       interrupt.
1759  * @tail: tail of descs list
1760  *
1761  * Return: return success or failure
1762  */
1763 QDF_STATUS
1764 __dp_rx_buffers_no_map_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1765 				 struct dp_srng *dp_rxdma_srng,
1766 				 struct rx_desc_pool *rx_desc_pool,
1767 				 uint32_t num_req_buffers,
1768 				 union dp_rx_desc_list_elem_t **desc_list,
1769 				 union dp_rx_desc_list_elem_t **tail);
1770 
1771 /**
1772  * __dp_rx_buffers_no_map_lt_replenish() - replenish rxdma ring with rx nbufs
1773  *					use direct APIs to get invalidate
1774  *					and get the physical address of the
1775  *					nbuf instead of map api,called when
1776  *					low threshold interrupt is triggered
1777  *
1778  * @dp_soc: core txrx main context
1779  * @mac_id: mac_id which is one of 3 mac_ids
1780  * @dp_rxdma_srng: dp rxdma circular ring
1781  * @rx_desc_pool: Pointer to free Rx descriptor pool
1782  *
1783  * Return: return success or failure
1784  */
1785 QDF_STATUS
1786 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1787 				    struct dp_srng *dp_rxdma_srng,
1788 				    struct rx_desc_pool *rx_desc_pool);
1789 
1790 /**
1791  * __dp_pdev_rx_buffers_no_map_attach() - replenish rxdma ring with rx nbufs
1792  *					use direct APIs to get invalidate
1793  *					and get the physical address of the
1794  *					nbuf instead of map api,called during
1795  *					dp rx initialization.
1796  *
1797  * @dp_soc: core txrx main context
1798  * @mac_id: mac_id which is one of 3 mac_ids
1799  * @dp_rxdma_srng: dp rxdma circular ring
1800  * @rx_desc_pool: Pointer to free Rx descriptor pool
1801  * @num_req_buffers: number of buffer to be replenished
1802  *
1803  * Return: return success or failure
1804  */
1805 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *dp_soc,
1806 					      uint32_t mac_id,
1807 					      struct dp_srng *dp_rxdma_srng,
1808 					      struct rx_desc_pool *rx_desc_pool,
1809 					      uint32_t num_req_buffers);
1810 
1811 /**
1812  * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs
1813  *                               called during dp rx initialization
1814  *
1815  * @dp_soc: core txrx main context
1816  * @mac_id: mac_id which is one of 3 mac_ids
1817  * @dp_rxdma_srng: dp rxdma circular ring
1818  * @rx_desc_pool: Pointer to free Rx descriptor pool
1819  * @num_req_buffers: number of buffer to be replenished
1820  *
1821  * Return: return success or failure
1822  */
1823 QDF_STATUS
1824 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
1825 			  struct dp_srng *dp_rxdma_srng,
1826 			  struct rx_desc_pool *rx_desc_pool,
1827 			  uint32_t num_req_buffers);
1828 
1829 /**
1830  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
1831  *			      (WBM), following error handling
1832  *
1833  * @soc: core DP main context
1834  * @ring_desc: opaque pointer to the REO error ring descriptor
1835  * @bm_action: put to idle_list or release to msdu_list
1836  *
1837  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
1838  */
1839 QDF_STATUS
1840 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
1841 		       uint8_t bm_action);
1842 
1843 /**
1844  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
1845  *					(WBM) by address
1846  *
1847  * @soc: core DP main context
1848  * @link_desc_addr: link descriptor addr
1849  * @bm_action: put to idle_list or release to msdu_list
1850  *
1851  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
1852  */
1853 QDF_STATUS
1854 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
1855 			       hal_buff_addrinfo_t link_desc_addr,
1856 			       uint8_t bm_action);
1857 
1858 /**
1859  * dp_rxdma_err_process() - RxDMA error processing functionality
1860  * @int_ctx: pointer to DP interrupt context
1861  * @soc: core txrx main context
1862  * @mac_id: mac id which is one of 3 mac_ids
1863  * @quota: No. of units (packets) that can be serviced in one shot.
1864  *
1865  * Return: num of buffers processed
1866  */
1867 uint32_t
1868 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
1869 		     uint32_t mac_id, uint32_t quota);
1870 
1871 /**
1872  * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
1873  * @vdev: DP Virtual device handle
1874  * @nbuf: Buffer pointer
1875  * @rx_tlv_hdr: start of rx tlv header
1876  * @txrx_peer: pointer to peer
1877  *
1878  * This function allocated memory for mesh receive stats and fill the
1879  * required stats. Stores the memory address in skb cb.
1880  *
1881  * Return: void
1882  */
1883 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1884 			   uint8_t *rx_tlv_hdr,
1885 			   struct dp_txrx_peer *txrx_peer);
1886 
1887 /**
1888  * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
1889  * @vdev: DP Virtual device handle
1890  * @nbuf: Buffer pointer
1891  * @rx_tlv_hdr: start of rx tlv header
1892  *
1893  * This checks if the received packet is matching any filter out
1894  * catogery and and drop the packet if it matches.
1895  *
1896  * Return: QDF_STATUS_SUCCESS indicates drop,
1897  *         QDF_STATUS_E_FAILURE indicate to not drop
1898  */
1899 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1900 					uint8_t *rx_tlv_hdr);
1901 
1902 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
1903 			   struct dp_txrx_peer *peer);
1904 
1905 /**
1906  * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
1907  *
1908  * @soc: core txrx main context
1909  * @hal_ring_hdl: opaque pointer to the HAL Rx Ring, which will be serviced
1910  * @ring_desc: opaque pointer to the RX ring descriptor
1911  * @rx_desc: host rx descriptor
1912  *
1913  * Return: void
1914  */
1915 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
1916 				hal_ring_handle_t hal_ring_hdl,
1917 				hal_ring_desc_t ring_desc,
1918 				struct dp_rx_desc *rx_desc);
1919 
1920 /**
1921  * dp_rx_compute_delay() - Compute and fill in all timestamps
1922  *				to pass in correct fields
1923  * @vdev: pdev handle
1924  * @nbuf: network buffer
1925  *
1926  * Return: none
1927  */
1928 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
1929 
1930 #ifdef QCA_PEER_EXT_STATS
1931 
1932 /**
1933  * dp_rx_compute_tid_delay - Compute per TID delay stats
1934  * @stats: TID delay stats to update
1935  * @nbuf: NBuffer
1936  *
1937  * Return: Void
1938  */
1939 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
1940 			     qdf_nbuf_t nbuf);
1941 #endif /* QCA_PEER_EXT_STATS */
1942 
1943 #ifdef RX_DESC_DEBUG_CHECK
1944 /**
1945  * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc
1946  * @rx_desc: rx descriptor pointer
1947  *
1948  * Return: true, if magic is correct, else false.
1949  */
1950 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1951 {
1952 	if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC))
1953 		return false;
1954 
1955 	rx_desc->magic = 0;
1956 	return true;
1957 }
1958 
1959 /**
1960  * dp_rx_desc_prep() - prepare rx desc
1961  * @rx_desc: rx descriptor pointer to be prepared
1962  * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
1963  *
1964  * Note: assumption is that we are associating a nbuf which is mapped
1965  *
1966  * Return: none
1967  */
1968 static inline
1969 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
1970 		     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1971 {
1972 	rx_desc->magic = DP_RX_DESC_MAGIC;
1973 	rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
1974 	rx_desc->unmapped = 0;
1975 	rx_desc->nbuf_data_addr = (uint8_t *)qdf_nbuf_data(rx_desc->nbuf);
1976 }
1977 
1978 /**
1979  * dp_rx_desc_frag_prep() - prepare rx desc
1980  * @rx_desc: rx descriptor pointer to be prepared
1981  * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
1982  *
1983  * Note: assumption is that we frag address is mapped
1984  *
1985  * Return: none
1986  */
1987 #ifdef DP_RX_MON_MEM_FRAG
1988 static inline
1989 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1990 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1991 {
1992 	rx_desc->magic = DP_RX_DESC_MAGIC;
1993 	rx_desc->rx_buf_start =
1994 		(uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
1995 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
1996 	rx_desc->unmapped = 0;
1997 }
1998 #else
1999 static inline
2000 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
2001 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
2002 {
2003 }
2004 #endif /* DP_RX_MON_MEM_FRAG */
2005 
2006 /**
2007  * dp_rx_desc_paddr_sanity_check() - paddr sanity for ring desc vs rx_desc
2008  * @rx_desc: rx descriptor
2009  * @ring_paddr: paddr obatined from the ring
2010  *
2011  * Return: QDF_STATUS
2012  */
2013 static inline
2014 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
2015 				   uint64_t ring_paddr)
2016 {
2017 	return (ring_paddr == qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0));
2018 }
2019 #else
2020 
2021 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
2022 {
2023 	return true;
2024 }
2025 
2026 static inline
2027 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
2028 		     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
2029 {
2030 	rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
2031 	rx_desc->unmapped = 0;
2032 }
2033 
2034 #ifdef DP_RX_MON_MEM_FRAG
2035 static inline
2036 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
2037 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
2038 {
2039 	rx_desc->rx_buf_start =
2040 		(uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
2041 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
2042 	rx_desc->unmapped = 0;
2043 }
2044 #else
2045 static inline
2046 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
2047 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
2048 {
2049 }
2050 #endif /* DP_RX_MON_MEM_FRAG */
2051 
2052 static inline
2053 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
2054 				   uint64_t ring_paddr)
2055 {
2056 	return true;
2057 }
2058 #endif /* RX_DESC_DEBUG_CHECK */
2059 
2060 /**
2061  * dp_rx_enable_mon_dest_frag() - Enable frag processing for
2062  *              monitor destination ring via frag.
2063  * @rx_desc_pool: Rx desc pool
2064  * @is_mon_dest_desc: Is it for monitor dest buffer
2065  *
2066  * Enable this flag only for monitor destination buffer processing
2067  * if DP_RX_MON_MEM_FRAG feature is enabled.
2068  * If flag is set then frag based function will be called for alloc,
2069  * map, prep desc and free ops for desc buffer else normal nbuf based
2070  * function will be called.
2071  *
2072  * Return: None
2073  */
2074 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
2075 				bool is_mon_dest_desc);
2076 
2077 /**
2078  * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
2079  *			       frames to OS or wifi parse errors.
2080  * @soc: core DP main context
2081  * @nbuf: buffer pointer
2082  * @rx_tlv_hdr: start of rx tlv header
2083  * @txrx_peer: peer reference
2084  * @err_code: rxdma err code
2085  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
2086  * pool_id has same mapping)
2087  *
2088  * Return: None
2089  */
2090 void
2091 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
2092 			uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
2093 			uint8_t err_code, uint8_t mac_id);
2094 
2095 #ifndef QCA_MULTIPASS_SUPPORT
2096 static inline
2097 bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf,
2098 			     uint8_t tid)
2099 {
2100 	return false;
2101 }
2102 #else
2103 /**
2104  * dp_rx_multipass_process - insert vlan tag on frames for traffic separation
2105  * @txrx_peer: DP txrx peer handle
2106  * @nbuf: skb
2107  * @tid: traffic priority
2108  *
2109  * Return: bool: true in case of success else false
2110  * Success is considered if:
2111  *  i. If frame has vlan header
2112  *  ii. If the frame comes from different peer and dont need multipass processing
2113  * Failure is considered if:
2114  *  i. Frame comes from multipass peer but doesn't contain vlan header.
2115  *  In failure case, drop such frames.
2116  */
2117 bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf,
2118 			     uint8_t tid);
2119 #endif
2120 
2121 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2122 
2123 #ifndef WLAN_RX_PKT_CAPTURE_ENH
2124 static inline
2125 QDF_STATUS dp_peer_set_rx_capture_enabled(struct dp_pdev *pdev,
2126 					  struct dp_peer *peer_handle,
2127 					  bool value, uint8_t *mac_addr)
2128 {
2129 	return QDF_STATUS_SUCCESS;
2130 }
2131 #endif
2132 
2133 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2134 
2135 /**
2136  * dp_rx_deliver_to_stack() - deliver pkts to network stack
2137  * Caller to hold peer refcount and check for valid peer
2138  * @soc: soc
2139  * @vdev: vdev
2140  * @peer: txrx peer
2141  * @nbuf_head: skb list head
2142  * @nbuf_tail: skb list tail
2143  *
2144  * Return: QDF_STATUS
2145  */
2146 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc,
2147 				  struct dp_vdev *vdev,
2148 				  struct dp_txrx_peer *peer,
2149 				  qdf_nbuf_t nbuf_head,
2150 				  qdf_nbuf_t nbuf_tail);
2151 
2152 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
2153 /**
2154  * dp_rx_eapol_deliver_to_stack() - deliver pkts to network stack
2155  * caller to hold peer refcount and check for valid peer
2156  * @soc: soc
2157  * @vdev: vdev
2158  * @peer: peer
2159  * @nbuf_head: skb list head
2160  * @nbuf_tail: skb list tail
2161  *
2162  * Return: QDF_STATUS
2163  */
2164 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
2165 					struct dp_vdev *vdev,
2166 					struct dp_txrx_peer *peer,
2167 					qdf_nbuf_t nbuf_head,
2168 					qdf_nbuf_t nbuf_tail);
2169 #endif
2170 
2171 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2172 
2173 #ifdef QCA_OL_RX_LOCK_LESS_ACCESS
2174 /**
2175  * dp_rx_srng_access_start()- Wrapper function to log access start of a hal ring
2176  * @int_ctx: pointer to DP interrupt context
2177  * @soc: DP soc structure pointer
2178  * @hal_ring_hdl: HAL ring handle
2179  *
2180  * Return: 0 on success; error on failure
2181  */
2182 static inline int
2183 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
2184 			hal_ring_handle_t hal_ring_hdl)
2185 {
2186 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
2187 }
2188 
2189 /**
2190  * dp_rx_srng_access_end()- Wrapper function to log access end of a hal ring
2191  * @int_ctx: pointer to DP interrupt context
2192  * @soc: DP soc structure pointer
2193  * @hal_ring_hdl: HAL ring handle
2194  *
2195  * Return: None
2196  */
2197 static inline void
2198 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
2199 		      hal_ring_handle_t hal_ring_hdl)
2200 {
2201 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
2202 }
2203 #else
2204 static inline int
2205 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
2206 			hal_ring_handle_t hal_ring_hdl)
2207 {
2208 	return dp_srng_access_start(int_ctx, soc, hal_ring_hdl);
2209 }
2210 
2211 static inline void
2212 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
2213 		      hal_ring_handle_t hal_ring_hdl)
2214 {
2215 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
2216 }
2217 #endif
2218 
2219 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2220 
2221 /**
2222  * dp_rx_wbm_sg_list_reset() - Initialize sg list
2223  *
2224  * This api should be called at soc init and afterevery sg processing.
2225  *@soc: DP SOC handle
2226  */
2227 static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc)
2228 {
2229 	if (soc) {
2230 		soc->wbm_sg_param.wbm_is_first_msdu_in_sg = false;
2231 		soc->wbm_sg_param.wbm_sg_nbuf_head = NULL;
2232 		soc->wbm_sg_param.wbm_sg_nbuf_tail = NULL;
2233 		soc->wbm_sg_param.wbm_sg_desc_msdu_len = 0;
2234 	}
2235 }
2236 
2237 /**
2238  * dp_rx_wbm_sg_list_deinit() - De-initialize sg list
2239  *
2240  * This api should be called in down path, to avoid any leak.
2241  *@soc: DP SOC handle
2242  */
2243 static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc)
2244 {
2245 	if (soc) {
2246 		if (soc->wbm_sg_param.wbm_sg_nbuf_head)
2247 			qdf_nbuf_list_free(soc->wbm_sg_param.wbm_sg_nbuf_head);
2248 
2249 		dp_rx_wbm_sg_list_reset(soc);
2250 	}
2251 }
2252 
2253 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2254 
2255 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
2256 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
2257 	do {								   \
2258 		if (!soc->rx_buff_pool[rx_desc->pool_id].is_initialized) { \
2259 			DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf);	   \
2260 			break;						   \
2261 		}							   \
2262 		DP_RX_LIST_APPEND(ebuf_head, ebuf_tail, rx_desc->nbuf);	   \
2263 		if (!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)) {	   \
2264 			if (!dp_rx_buffer_pool_refill(soc, ebuf_head,	   \
2265 						      rx_desc->pool_id))   \
2266 				DP_RX_MERGE_TWO_LIST(head, tail,	   \
2267 						     ebuf_head, ebuf_tail);\
2268 			ebuf_head = NULL;				   \
2269 			ebuf_tail = NULL;				   \
2270 		}							   \
2271 	} while (0)
2272 #else
2273 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
2274 	DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf)
2275 #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
2276 
2277 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2278 
2279 /**
2280  * dp_rx_link_desc_refill_duplicate_check() - check if link desc duplicate
2281  *					      to refill
2282  * @soc: DP SOC handle
2283  * @buf_info: the last link desc buf info
2284  * @ring_buf_info: current buf address pointor including link desc
2285  *
2286  * Return: none.
2287  */
2288 void dp_rx_link_desc_refill_duplicate_check(
2289 				struct dp_soc *soc,
2290 				struct hal_buf_info *buf_info,
2291 				hal_buff_addrinfo_t ring_buf_info);
2292 
2293 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
2294 /**
2295  * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture
2296  * @soc : dp_soc handle
2297  * @pdev: dp_pdev handle
2298  * @peer_id: peer_id of the peer for which completion came
2299  * @is_offload:
2300  * @netbuf: Buffer pointer
2301  *
2302  * This function is used to deliver rx packet to packet capture
2303  */
2304 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc,  struct dp_pdev *pdev,
2305 				  uint16_t peer_id, uint32_t is_offload,
2306 				  qdf_nbuf_t netbuf);
2307 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
2308 					  uint32_t is_offload);
2309 #else
2310 static inline void
2311 dp_rx_deliver_to_pkt_capture(struct dp_soc *soc,  struct dp_pdev *pdev,
2312 			     uint16_t peer_id, uint32_t is_offload,
2313 			     qdf_nbuf_t netbuf)
2314 {
2315 }
2316 
2317 static inline void
2318 dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
2319 				     uint32_t is_offload)
2320 {
2321 }
2322 #endif
2323 
2324 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2325 #ifdef FEATURE_MEC
2326 /**
2327  * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
2328  *			      back on same vap or a different vap.
2329  * @soc: core DP main context
2330  * @peer: dp peer handler
2331  * @rx_tlv_hdr: start of the rx TLV header
2332  * @nbuf: pkt buffer
2333  *
2334  * Return: bool (true if it is a looped back pkt else false)
2335  *
2336  */
2337 bool dp_rx_mcast_echo_check(struct dp_soc *soc,
2338 			    struct dp_txrx_peer *peer,
2339 			    uint8_t *rx_tlv_hdr,
2340 			    qdf_nbuf_t nbuf);
2341 #else
2342 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
2343 					  struct dp_txrx_peer *peer,
2344 					  uint8_t *rx_tlv_hdr,
2345 					  qdf_nbuf_t nbuf)
2346 {
2347 	return false;
2348 }
2349 #endif /* FEATURE_MEC */
2350 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2351 
2352 #ifdef RECEIVE_OFFLOAD
2353 /**
2354  * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
2355  * @soc: DP SOC handle
2356  * @rx_tlv: RX TLV received for the msdu
2357  * @msdu: msdu for which GRO info needs to be filled
2358  * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
2359  *
2360  * Return: None
2361  */
2362 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
2363 			 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt);
2364 #else
2365 static inline
2366 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
2367 			 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
2368 {
2369 }
2370 #endif
2371 
2372 /**
2373  * dp_rx_msdu_stats_update() - update per msdu stats.
2374  * @soc: core txrx main context
2375  * @nbuf: pointer to the first msdu of an amsdu.
2376  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
2377  * @txrx_peer: pointer to the txrx peer object.
2378  * @ring_id: reo dest ring number on which pkt is reaped.
2379  * @tid_stats: per tid rx stats.
2380  *
2381  * update all the per msdu stats for that nbuf.
2382  *
2383  * Return: void
2384  */
2385 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
2386 			     uint8_t *rx_tlv_hdr,
2387 			     struct dp_txrx_peer *txrx_peer,
2388 			     uint8_t ring_id,
2389 			     struct cdp_tid_rx_stats *tid_stats);
2390 
2391 /**
2392  * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
2393  *				      no corresbonding peer found
2394  * @soc: core txrx main context
2395  * @nbuf: pkt skb pointer
2396  *
2397  * This function will try to deliver some RX special frames to stack
2398  * even there is no peer matched found. for instance, LFR case, some
2399  * eapol data will be sent to host before peer_map done.
2400  *
2401  * Return: None
2402  */
2403 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf);
2404 
2405 /**
2406  * dp_rx_srng_get_num_pending() - get number of pending entries
2407  * @hal_soc: hal soc opaque pointer
2408  * @hal_ring_hdl: opaque pointer to the HAL Rx Ring
2409  * @num_entries: number of entries in the hal_ring.
2410  * @near_full: pointer to a boolean. This is set if ring is near full.
2411  *
2412  * The function returns the number of entries in a destination ring which are
2413  * yet to be reaped. The function also checks if the ring is near full.
2414  * If more than half of the ring needs to be reaped, the ring is considered
2415  * approaching full.
2416  * The function uses hal_srng_dst_num_valid_locked to get the number of valid
2417  * entries. It should not be called within a SRNG lock. HW pointer value is
2418  * synced into cached_hp.
2419  *
2420  * Return: Number of pending entries if any
2421  */
2422 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
2423 				    hal_ring_handle_t hal_ring_hdl,
2424 				    uint32_t num_entries,
2425 				    bool *near_full);
2426 
2427 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
2428 /**
2429  * dp_rx_ring_record_entry() - Record an entry into the rx ring history.
2430  * @soc: Datapath soc structure
2431  * @ring_num: REO ring number
2432  * @ring_desc: REO ring descriptor
2433  *
2434  * Return: None
2435  */
2436 void dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
2437 			     hal_ring_desc_t ring_desc);
2438 #else
2439 static inline void
2440 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
2441 			hal_ring_desc_t ring_desc)
2442 {
2443 }
2444 #endif
2445 
2446 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2447 #ifdef RX_DESC_SANITY_WAR
2448 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
2449 			     hal_ring_handle_t hal_ring_hdl,
2450 			     hal_ring_desc_t ring_desc,
2451 			     struct dp_rx_desc *rx_desc);
2452 #else
2453 static inline
2454 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
2455 			     hal_ring_handle_t hal_ring_hdl,
2456 			     hal_ring_desc_t ring_desc,
2457 			     struct dp_rx_desc *rx_desc)
2458 {
2459 	return QDF_STATUS_SUCCESS;
2460 }
2461 #endif
2462 
2463 #ifdef DP_RX_DROP_RAW_FRM
2464 /**
2465  * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop
2466  * @nbuf: pkt skb pointer
2467  *
2468  * Return: true - raw frame, dropped
2469  *	   false - not raw frame, do nothing
2470  */
2471 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf);
2472 #else
2473 static inline
2474 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
2475 {
2476 	return false;
2477 }
2478 #endif
2479 
2480 #ifdef RX_DESC_DEBUG_CHECK
2481 /**
2482  * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
2483  *				  corruption
2484  * @soc: DP SoC context
2485  * @ring_desc: REO ring descriptor
2486  * @rx_desc: Rx descriptor
2487  *
2488  * Return: NONE
2489  */
2490 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
2491 					hal_ring_desc_t ring_desc,
2492 					struct dp_rx_desc *rx_desc);
2493 #else
2494 static inline
2495 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
2496 					hal_ring_desc_t ring_desc,
2497 					struct dp_rx_desc *rx_desc)
2498 {
2499 	return QDF_STATUS_SUCCESS;
2500 }
2501 #endif
2502 
2503 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
2504 /**
2505  * dp_rx_update_stats() - Update soc level rx packet count
2506  * @soc: DP soc handle
2507  * @nbuf: nbuf received
2508  *
2509  * Return: none
2510  */
2511 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf);
2512 #else
2513 static inline
2514 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf)
2515 {
2516 }
2517 #endif
2518 
2519 /**
2520  * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
2521  * @pdev: dp_pdev handle
2522  * @nbuf: pointer to the first msdu of an amsdu.
2523  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
2524  *
2525  * The ipsumed field of the skb is set based on whether HW validated the
2526  * IP/TCP/UDP checksum.
2527  *
2528  * Return: void
2529  */
2530 #if defined(MAX_PDEV_CNT) && (MAX_PDEV_CNT == 1)
2531 static inline
2532 void dp_rx_cksum_offload(struct dp_pdev *pdev,
2533 			 qdf_nbuf_t nbuf,
2534 			 uint8_t *rx_tlv_hdr)
2535 {
2536 	qdf_nbuf_rx_cksum_t cksum = {0};
2537 	//TODO - Move this to ring desc api
2538 	//HAL_RX_MSDU_DESC_IP_CHKSUM_FAIL_GET
2539 	//HAL_RX_MSDU_DESC_TCP_UDP_CHKSUM_FAIL_GET
2540 	uint32_t ip_csum_err, tcp_udp_csum_er;
2541 
2542 	hal_rx_tlv_csum_err_get(pdev->soc->hal_soc, rx_tlv_hdr, &ip_csum_err,
2543 				&tcp_udp_csum_er);
2544 
2545 	if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) {
2546 		cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
2547 		qdf_nbuf_set_rx_cksum(nbuf, &cksum);
2548 	} else {
2549 		DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err);
2550 		DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er);
2551 	}
2552 }
2553 #else
2554 static inline
2555 void dp_rx_cksum_offload(struct dp_pdev *pdev,
2556 			 qdf_nbuf_t nbuf,
2557 			 uint8_t *rx_tlv_hdr)
2558 {
2559 }
2560 #endif
2561 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2562 
2563 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
2564 static inline
2565 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
2566 				   int max_reap_limit)
2567 {
2568 	bool limit_hit = false;
2569 
2570 	limit_hit =
2571 		(num_reaped >= max_reap_limit) ? true : false;
2572 
2573 	if (limit_hit)
2574 		DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1)
2575 
2576 	return limit_hit;
2577 }
2578 
2579 static inline
2580 bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
2581 {
2582 	return soc->wlan_cfg_ctx->rx_enable_eol_data_check;
2583 }
2584 
2585 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc)
2586 {
2587 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
2588 
2589 	return cfg->rx_reap_loop_pkt_limit;
2590 }
2591 #else
2592 static inline
2593 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
2594 				   int max_reap_limit)
2595 {
2596 	return false;
2597 }
2598 
2599 static inline
2600 bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
2601 {
2602 	return false;
2603 }
2604 
2605 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc)
2606 {
2607 	return 0;
2608 }
2609 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
2610 
2611 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf);
2612 
2613 #ifdef QCA_SUPPORT_WDS_EXTENDED
2614 /**
2615  * dp_rx_is_list_ready() - Make different lists for 4-address
2616  *			   and 3-address frames
2617  * @nbuf_head: skb list head
2618  * @vdev: vdev
2619  * @txrx_peer : txrx_peer
2620  * @peer_id: peer id of new received frame
2621  * @vdev_id: vdev_id of new received frame
2622  *
2623  * Return: true if peer_ids are different.
2624  */
2625 static inline bool
2626 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
2627 		    struct dp_vdev *vdev,
2628 		    struct dp_txrx_peer *txrx_peer,
2629 		    uint16_t peer_id,
2630 		    uint8_t vdev_id)
2631 {
2632 	if (nbuf_head && txrx_peer && txrx_peer->peer_id != peer_id)
2633 		return true;
2634 
2635 	return false;
2636 }
2637 #else
2638 static inline bool
2639 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
2640 		    struct dp_vdev *vdev,
2641 		    struct dp_txrx_peer *txrx_peer,
2642 		    uint16_t peer_id,
2643 		    uint8_t vdev_id)
2644 {
2645 	if (nbuf_head && vdev && (vdev->vdev_id != vdev_id))
2646 		return true;
2647 
2648 	return false;
2649 }
2650 #endif
2651 
2652 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
2653 /**
2654  * dp_rx_mark_first_packet_after_wow_wakeup - get first packet after wow wakeup
2655  * @pdev: pointer to dp_pdev structure
2656  * @rx_tlv: pointer to rx_pkt_tlvs structure
2657  * @nbuf: pointer to skb buffer
2658  *
2659  * Return: None
2660  */
2661 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
2662 					      uint8_t *rx_tlv,
2663 					      qdf_nbuf_t nbuf);
2664 #else
2665 static inline void
2666 dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
2667 					 uint8_t *rx_tlv,
2668 					 qdf_nbuf_t nbuf)
2669 {
2670 }
2671 #endif
2672 
2673 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
2674 static inline uint8_t
2675 dp_rx_get_defrag_bm_id(struct dp_soc *soc)
2676 {
2677 	return DP_DEFRAG_RBM(soc->wbm_sw0_bm_id);
2678 }
2679 
2680 static inline uint8_t
2681 dp_rx_get_rx_bm_id(struct dp_soc *soc)
2682 {
2683 	return DP_WBM2SW_RBM(soc->wbm_sw0_bm_id);
2684 }
2685 #else
2686 static inline uint8_t
2687 dp_rx_get_rx_bm_id(struct dp_soc *soc)
2688 {
2689 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
2690 	uint8_t wbm2_sw_rx_rel_ring_id;
2691 
2692 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
2693 
2694 	return HAL_RX_BUF_RBM_SW_BM(soc->wbm_sw0_bm_id,
2695 				    wbm2_sw_rx_rel_ring_id);
2696 }
2697 
2698 static inline uint8_t
2699 dp_rx_get_defrag_bm_id(struct dp_soc *soc)
2700 {
2701 	return dp_rx_get_rx_bm_id(soc);
2702 }
2703 #endif
2704 
2705 static inline uint16_t
2706 dp_rx_peer_metadata_peer_id_get(struct dp_soc *soc, uint32_t peer_metadata)
2707 {
2708 	return soc->arch_ops.dp_rx_peer_metadata_peer_id_get(soc,
2709 							     peer_metadata);
2710 }
2711 
2712 /**
2713  * dp_rx_desc_pool_init_generic() - Generic Rx descriptors initialization
2714  * @soc: SOC handle
2715  * @rx_desc_pool: pointer to RX descriptor pool
2716  * @pool_id: pool ID
2717  *
2718  * Return: None
2719  */
2720 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
2721 				  struct rx_desc_pool *rx_desc_pool,
2722 				  uint32_t pool_id);
2723 
2724 void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
2725 				  struct rx_desc_pool *rx_desc_pool,
2726 				  uint32_t pool_id);
2727 
2728 /**
2729  * dp_rx_pkt_tracepoints_enabled() - Get the state of rx pkt tracepoint
2730  *
2731  * Return: True if any rx pkt tracepoint is enabled else false
2732  */
2733 static inline
2734 bool dp_rx_pkt_tracepoints_enabled(void)
2735 {
2736 	return (qdf_trace_dp_rx_tcp_pkt_enabled() ||
2737 		qdf_trace_dp_rx_udp_pkt_enabled() ||
2738 		qdf_trace_dp_rx_pkt_enabled());
2739 }
2740 
2741 #ifdef FEATURE_DIRECT_LINK
2742 /**
2743  * dp_audio_smmu_map()- Map memory region into Audio SMMU CB
2744  * @qdf_dev: pointer to QDF device structure
2745  * @paddr: physical address
2746  * @iova: DMA address
2747  * @size: memory region size
2748  *
2749  * Return: 0 on success else failure code
2750  */
2751 static inline
2752 int dp_audio_smmu_map(qdf_device_t qdf_dev, qdf_dma_addr_t paddr,
2753 		      qdf_dma_addr_t iova, qdf_size_t size)
2754 {
2755 	return pld_audio_smmu_map(qdf_dev->dev, paddr, iova, size);
2756 }
2757 
2758 /**
2759  * dp_audio_smmu_unmap()- Remove memory region mapping from Audio SMMU CB
2760  * @qdf_dev: pointer to QDF device structure
2761  * @iova: DMA address
2762  * @size: memory region size
2763  *
2764  * Return: None
2765  */
2766 static inline
2767 void dp_audio_smmu_unmap(qdf_device_t qdf_dev, qdf_dma_addr_t iova,
2768 			 qdf_size_t size)
2769 {
2770 	pld_audio_smmu_unmap(qdf_dev->dev, iova, size);
2771 }
2772 #else
2773 static inline
2774 int dp_audio_smmu_map(qdf_device_t qdf_dev, qdf_dma_addr_t paddr,
2775 		      qdf_dma_addr_t iova, qdf_size_t size)
2776 {
2777 	return 0;
2778 }
2779 
2780 static inline
2781 void dp_audio_smmu_unmap(qdf_device_t qdf_dev, qdf_dma_addr_t iova,
2782 			 qdf_size_t size)
2783 {
2784 }
2785 #endif
2786 
2787 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
2788 static inline
2789 QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id,
2790 					    struct dp_srng *rxdma_srng,
2791 					    struct rx_desc_pool *rx_desc_pool,
2792 					    uint32_t num_req_buffers)
2793 {
2794 	return __dp_pdev_rx_buffers_no_map_attach(soc, mac_id,
2795 						  rxdma_srng,
2796 						  rx_desc_pool,
2797 						  num_req_buffers);
2798 }
2799 
2800 static inline
2801 void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
2802 				    struct dp_srng *rxdma_srng,
2803 				    struct rx_desc_pool *rx_desc_pool,
2804 				    uint32_t num_req_buffers,
2805 				    union dp_rx_desc_list_elem_t **desc_list,
2806 				    union dp_rx_desc_list_elem_t **tail)
2807 {
2808 	__dp_rx_buffers_no_map_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
2809 					 num_req_buffers, desc_list, tail);
2810 }
2811 
2812 static inline
2813 void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
2814 				       struct dp_srng *rxdma_srng,
2815 				       struct rx_desc_pool *rx_desc_pool,
2816 				       uint32_t num_req_buffers,
2817 				       union dp_rx_desc_list_elem_t **desc_list,
2818 				       union dp_rx_desc_list_elem_t **tail)
2819 {
2820 	__dp_rx_buffers_no_map_lt_replenish(soc, mac_id, rxdma_srng,
2821 					    rx_desc_pool);
2822 }
2823 
2824 #ifndef QCA_DP_NBUF_FAST_RECYCLE_CHECK
2825 static inline
2826 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
2827 				      qdf_nbuf_t nbuf,
2828 				      uint32_t buf_size)
2829 {
2830 	qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2831 				      (void *)(nbuf->data + buf_size));
2832 
2833 	return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2834 }
2835 #else
2836 #define L3_HEADER_PAD 2
2837 static inline
2838 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
2839 				      qdf_nbuf_t nbuf,
2840 				      uint32_t buf_size)
2841 {
2842 	if (nbuf->recycled_for_ds) {
2843 		nbuf->recycled_for_ds = 0;
2844 		return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2845 	}
2846 
2847 	if (unlikely(!nbuf->fast_recycled)) {
2848 		qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2849 					      (void *)(nbuf->data + buf_size));
2850 	} else {
2851 		/*
2852 		 * In case of fast_recycled is set we can avoid invalidating
2853 		 * the complete buffer as it would have been invalidated
2854 		 * by tx driver before giving to recycler.
2855 		 *
2856 		 * But we need to still invalidate rx_pkt_tlv_size as this
2857 		 * area will not be invalidated in TX path
2858 		 */
2859 		DP_STATS_INC(dp_soc, rx.fast_recycled, 1);
2860 		qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2861 					      (void *)(nbuf->data +
2862 						       dp_soc->rx_pkt_tlv_size +
2863 						       L3_HEADER_PAD));
2864 	}
2865 
2866 	nbuf->fast_recycled = 0;
2867 	return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2868 }
2869 #endif
2870 
2871 static inline
2872 qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc,
2873 			       qdf_nbuf_t nbuf,
2874 			       uint32_t buf_size)
2875 {
2876 	qdf_nbuf_dma_inv_range((void *)nbuf->data,
2877 			       (void *)(nbuf->data + buf_size));
2878 
2879 	return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2880 }
2881 
2882 #if !defined(SPECULATIVE_READ_DISABLED)
2883 static inline
2884 void dp_rx_nbuf_unmap(struct dp_soc *soc,
2885 		      struct dp_rx_desc *rx_desc,
2886 		      uint8_t reo_ring_num)
2887 {
2888 	struct rx_desc_pool *rx_desc_pool;
2889 	qdf_nbuf_t nbuf;
2890 
2891 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2892 	nbuf = rx_desc->nbuf;
2893 
2894 	qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2895 			       (void *)(nbuf->data + rx_desc_pool->buf_size));
2896 }
2897 
2898 static inline
2899 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc,
2900 			   struct rx_desc_pool *rx_desc_pool,
2901 			   qdf_nbuf_t nbuf)
2902 {
2903 	qdf_nbuf_dma_inv_range((void *)nbuf->data,
2904 			       (void *)(nbuf->data + rx_desc_pool->buf_size));
2905 }
2906 
2907 #else
2908 static inline
2909 void dp_rx_nbuf_unmap(struct dp_soc *soc,
2910 		      struct dp_rx_desc *rx_desc,
2911 		      uint8_t reo_ring_num)
2912 {
2913 }
2914 
2915 static inline
2916 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc,
2917 			   struct rx_desc_pool *rx_desc_pool,
2918 			   qdf_nbuf_t nbuf)
2919 {
2920 }
2921 #endif
2922 
2923 static inline
2924 void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id,
2925 				 uint32_t bufs_reaped)
2926 {
2927 }
2928 
2929 static inline
2930 qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc,
2931 			    struct rx_desc_pool *rx_desc_pool)
2932 {
2933 	return qdf_nbuf_alloc_simple(soc->osdev, rx_desc_pool->buf_size,
2934 				     RX_BUFFER_RESERVATION,
2935 				     rx_desc_pool->buf_alignment, FALSE);
2936 }
2937 
2938 static inline
2939 void  dp_rx_nbuf_free(qdf_nbuf_t nbuf)
2940 {
2941 	qdf_nbuf_free_simple(nbuf);
2942 }
2943 #else
2944 static inline
2945 QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id,
2946 					    struct dp_srng *rxdma_srng,
2947 					    struct rx_desc_pool *rx_desc_pool,
2948 					    uint32_t num_req_buffers)
2949 {
2950 	return dp_pdev_rx_buffers_attach(soc, mac_id,
2951 					 rxdma_srng,
2952 					 rx_desc_pool,
2953 					 num_req_buffers);
2954 }
2955 
2956 static inline
2957 void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
2958 				    struct dp_srng *rxdma_srng,
2959 				    struct rx_desc_pool *rx_desc_pool,
2960 				    uint32_t num_req_buffers,
2961 				    union dp_rx_desc_list_elem_t **desc_list,
2962 				    union dp_rx_desc_list_elem_t **tail)
2963 {
2964 	dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
2965 				num_req_buffers, desc_list, tail, false);
2966 }
2967 
2968 static inline
2969 void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
2970 				       struct dp_srng *rxdma_srng,
2971 				       struct rx_desc_pool *rx_desc_pool,
2972 				       uint32_t num_req_buffers,
2973 				       union dp_rx_desc_list_elem_t **desc_list,
2974 				       union dp_rx_desc_list_elem_t **tail)
2975 {
2976 	dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
2977 				num_req_buffers, desc_list, tail, false);
2978 }
2979 
2980 static inline
2981 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
2982 				      qdf_nbuf_t nbuf,
2983 				      uint32_t buf_size)
2984 {
2985 	return (qdf_dma_addr_t)NULL;
2986 }
2987 
2988 static inline
2989 qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc,
2990 			       qdf_nbuf_t nbuf,
2991 			       uint32_t buf_size)
2992 {
2993 	return (qdf_dma_addr_t)NULL;
2994 }
2995 
2996 static inline
2997 void dp_rx_nbuf_unmap(struct dp_soc *soc,
2998 		      struct dp_rx_desc *rx_desc,
2999 		      uint8_t reo_ring_num)
3000 {
3001 	struct rx_desc_pool *rx_desc_pool;
3002 
3003 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
3004 	dp_ipa_reo_ctx_buf_mapping_lock(soc, reo_ring_num);
3005 
3006 	dp_audio_smmu_unmap(soc->osdev,
3007 			    QDF_NBUF_CB_PADDR(rx_desc->nbuf),
3008 			    rx_desc_pool->buf_size);
3009 
3010 	dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
3011 					  rx_desc_pool->buf_size,
3012 					  false, __func__, __LINE__);
3013 
3014 	qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
3015 				     QDF_DMA_FROM_DEVICE,
3016 				     rx_desc_pool->buf_size);
3017 
3018 	dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num);
3019 }
3020 
3021 static inline
3022 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc,
3023 			   struct rx_desc_pool *rx_desc_pool,
3024 			   qdf_nbuf_t nbuf)
3025 {
3026 	dp_audio_smmu_unmap(soc->osdev, QDF_NBUF_CB_PADDR(nbuf),
3027 			    rx_desc_pool->buf_size);
3028 	dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, rx_desc_pool->buf_size,
3029 					  false, __func__, __LINE__);
3030 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE,
3031 				     rx_desc_pool->buf_size);
3032 }
3033 
3034 static inline
3035 void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id,
3036 				 uint32_t bufs_reaped)
3037 {
3038 	int cpu_id = qdf_get_cpu();
3039 
3040 	DP_STATS_INC(soc, rx.ring_packets[cpu_id][ring_id], bufs_reaped);
3041 }
3042 
3043 static inline
3044 qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc,
3045 			    struct rx_desc_pool *rx_desc_pool)
3046 {
3047 	return qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
3048 			      RX_BUFFER_RESERVATION,
3049 			      rx_desc_pool->buf_alignment, FALSE);
3050 }
3051 
3052 static inline
3053 void dp_rx_nbuf_free(qdf_nbuf_t nbuf)
3054 {
3055 	qdf_nbuf_free(nbuf);
3056 }
3057 #endif
3058 
3059 #ifdef DP_UMAC_HW_RESET_SUPPORT
3060 /**
3061  * dp_rx_desc_reuse() - Reuse the rx descriptors to fill the rx buf ring
3062  * @soc: core txrx main context
3063  * @nbuf_list: nbuf list for delayed free
3064  *
3065  * Return: void
3066  */
3067 void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list);
3068 
3069 /**
3070  * dp_rx_desc_delayed_free() - Delayed free of the rx descs
3071  *
3072  * @soc: core txrx main context
3073  *
3074  * Return: void
3075  */
3076 void dp_rx_desc_delayed_free(struct dp_soc *soc);
3077 #endif
3078 
3079 /**
3080  * dp_rx_get_txrx_peer_and_vdev() - Get txrx peer and vdev from peer id
3081  * @soc: core txrx main context
3082  * @nbuf : pointer to the first msdu of an amsdu.
3083  * @peer_id : Peer id of the peer
3084  * @txrx_ref_handle : Buffer to save the handle for txrx peer's reference
3085  * @pkt_capture_offload : Flag indicating if pkt capture offload is needed
3086  * @vdev : Buffer to hold pointer to vdev
3087  * @rx_pdev : Buffer to hold pointer to rx pdev
3088  * @dsf : delay stats flag
3089  * @old_tid : Old tid
3090  *
3091  * Get txrx peer and vdev from peer id
3092  *
3093  * Return: Pointer to txrx peer
3094  */
3095 static inline struct dp_txrx_peer *
3096 dp_rx_get_txrx_peer_and_vdev(struct dp_soc *soc,
3097 			     qdf_nbuf_t nbuf,
3098 			     uint16_t peer_id,
3099 			     dp_txrx_ref_handle *txrx_ref_handle,
3100 			     bool pkt_capture_offload,
3101 			     struct dp_vdev **vdev,
3102 			     struct dp_pdev **rx_pdev,
3103 			     uint32_t *dsf,
3104 			     uint32_t *old_tid)
3105 {
3106 	struct dp_txrx_peer *txrx_peer = NULL;
3107 
3108 	txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, txrx_ref_handle,
3109 					       DP_MOD_ID_RX);
3110 
3111 	if (qdf_likely(txrx_peer)) {
3112 		*vdev = txrx_peer->vdev;
3113 	} else {
3114 		nbuf->next = NULL;
3115 		dp_rx_deliver_to_pkt_capture_no_peer(soc, nbuf,
3116 						     pkt_capture_offload);
3117 		if (!pkt_capture_offload)
3118 			dp_rx_deliver_to_stack_no_peer(soc, nbuf);
3119 
3120 		goto end;
3121 	}
3122 
3123 	if (qdf_unlikely(!(*vdev))) {
3124 		qdf_nbuf_free(nbuf);
3125 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
3126 		goto end;
3127 	}
3128 
3129 	*rx_pdev = (*vdev)->pdev;
3130 	*dsf = (*rx_pdev)->delay_stats_flag;
3131 	*old_tid = 0xff;
3132 
3133 end:
3134 	return txrx_peer;
3135 }
3136 
3137 static inline QDF_STATUS
3138 dp_peer_rx_reorder_queue_setup(struct dp_soc *soc, struct dp_peer *peer,
3139 			       int tid, uint32_t ba_window_size)
3140 {
3141 	return soc->arch_ops.dp_peer_rx_reorder_queue_setup(soc,
3142 							    peer, tid,
3143 							    ba_window_size);
3144 }
3145 
3146 static inline
3147 void dp_rx_nbuf_list_deliver(struct dp_soc *soc,
3148 			     struct dp_vdev *vdev,
3149 			     struct dp_txrx_peer *txrx_peer,
3150 			     uint16_t peer_id,
3151 			     uint8_t pkt_capture_offload,
3152 			     qdf_nbuf_t deliver_list_head,
3153 			     qdf_nbuf_t deliver_list_tail)
3154 {
3155 	qdf_nbuf_t nbuf, next;
3156 
3157 	if (qdf_likely(deliver_list_head)) {
3158 		if (qdf_likely(txrx_peer)) {
3159 			dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id,
3160 						     pkt_capture_offload,
3161 						     deliver_list_head);
3162 			if (!pkt_capture_offload)
3163 				dp_rx_deliver_to_stack(soc, vdev, txrx_peer,
3164 						       deliver_list_head,
3165 						       deliver_list_tail);
3166 		} else {
3167 			nbuf = deliver_list_head;
3168 			while (nbuf) {
3169 				next = nbuf->next;
3170 				nbuf->next = NULL;
3171 				dp_rx_deliver_to_stack_no_peer(soc, nbuf);
3172 				nbuf = next;
3173 			}
3174 		}
3175 	}
3176 }
3177 
3178 #ifdef DP_TX_RX_TPUT_SIMULATE
3179 /*
3180  * Change this macro value to simulate different RX T-put,
3181  * if OTA is 100 Mbps, to simulate 200 Mbps, then multiplication factor
3182  * is 2, set macro value as 1 (multiplication factor - 1).
3183  */
3184 #define DP_RX_PKTS_DUPLICATE_CNT 0
3185 static inline
3186 void dp_rx_nbuf_list_dup_deliver(struct dp_soc *soc,
3187 				 struct dp_vdev *vdev,
3188 				 struct dp_txrx_peer *txrx_peer,
3189 				 uint16_t peer_id,
3190 				 uint8_t pkt_capture_offload,
3191 				 qdf_nbuf_t ori_list_head,
3192 				 qdf_nbuf_t ori_list_tail)
3193 {
3194 	qdf_nbuf_t new_skb = NULL;
3195 	qdf_nbuf_t new_list_head = NULL;
3196 	qdf_nbuf_t new_list_tail = NULL;
3197 	qdf_nbuf_t nbuf = NULL;
3198 	int i;
3199 
3200 	for (i = 0; i < DP_RX_PKTS_DUPLICATE_CNT; i++) {
3201 		nbuf = ori_list_head;
3202 		new_list_head = NULL;
3203 		new_list_tail = NULL;
3204 
3205 		while (nbuf) {
3206 			new_skb = qdf_nbuf_copy(nbuf);
3207 			if (qdf_likely(new_skb))
3208 				DP_RX_LIST_APPEND(new_list_head,
3209 						  new_list_tail,
3210 						  new_skb);
3211 			else
3212 				dp_err("copy skb failed");
3213 
3214 			nbuf = qdf_nbuf_next(nbuf);
3215 		}
3216 
3217 		/* deliver the copied nbuf list */
3218 		dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id,
3219 					pkt_capture_offload,
3220 					new_list_head,
3221 					new_list_tail);
3222 	}
3223 
3224 	/* deliver the original skb_list */
3225 	dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id,
3226 				pkt_capture_offload,
3227 				ori_list_head,
3228 				ori_list_tail);
3229 }
3230 
3231 #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_dup_deliver
3232 
3233 #else /* !DP_TX_RX_TPUT_SIMULATE */
3234 
3235 #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_deliver
3236 
3237 #endif /* DP_TX_RX_TPUT_SIMULATE */
3238 
3239 /**
3240  * dp_rx_wbm_desc_nbuf_sanity_check() - Add sanity check to for WBM rx_desc
3241  *                                      paddr corruption
3242  * @soc: core txrx main context
3243  * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring
3244  * @ring_desc: REO ring descriptor
3245  * @rx_desc: Rx descriptor
3246  *
3247  * Return: NONE
3248  */
3249 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
3250 					    hal_ring_handle_t hal_ring_hdl,
3251 					    hal_ring_desc_t ring_desc,
3252 					    struct dp_rx_desc *rx_desc);
3253 /**
3254  * dp_rx_is_sg_formation_required() - Check if sg formation is required
3255  * @info: WBM desc info
3256  *
3257  * Return: True if sg is required else false
3258  */
3259 bool dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info);
3260 
3261 /**
3262  * dp_rx_err_tlv_invalidate() - Invalidate network buffer
3263  * @soc: core txrx main context
3264  * @nbuf: Network buffer to invalidate
3265  *
3266  * Return: NONE
3267  */
3268 void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
3269 			      qdf_nbuf_t nbuf);
3270 
3271 /*
3272  * dp_rx_wbm_sg_list_last_msdu_war() - war for HW issue
3273  *
3274  * This is a war for HW issue where length is only valid in last msdu
3275  * @soc: DP SOC handle
3276  *
3277  * Return: NONE
3278  */
3279 void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc);
3280 
3281 /**
3282  * dp_rx_check_pkt_len() - Check for pktlen validity
3283  * @soc: DP SOC context
3284  * @pkt_len: computed length of the pkt from caller in bytes
3285  *
3286  * Return: true if pktlen > RX_BUFFER_SIZE, else return false
3287  *
3288  */
3289 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len);
3290 
3291 /**
3292  * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
3293  * @soc: pointer to dp_soc struct
3294  * @pool_id: Pool id to find dp_pdev
3295  * @rx_tlv_hdr: TLV header of received packet
3296  * @nbuf: SKB
3297  *
3298  * In certain types of packets if peer_id is not correct then
3299  * driver may not be able find. Try finding peer by addr_2 of
3300  * received MPDU. If you find the peer then most likely sw_peer_id &
3301  * ast_idx is corrupted.
3302  *
3303  * Return: True if you find the peer by addr_2 of received MPDU else false
3304  */
3305 bool dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
3306 						   uint8_t pool_id,
3307 						   uint8_t *rx_tlv_hdr,
3308 						   qdf_nbuf_t nbuf);
3309 
3310 /**
3311  * dp_rx_err_drop_3addr_mcast() - Check if feature drop_3ddr_mcast is enabled
3312  *                                If so, drop the multicast frame.
3313  * @vdev: datapath vdev
3314  * @rx_tlv_hdr: TLV header
3315  *
3316  * Return: true if packet is to be dropped,
3317  *         false, if packet is not dropped.
3318  */
3319 bool dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr);
3320 
3321 /*
3322  * dp_rx_deliver_to_osif_stack() - function to deliver rx pkts to stack
3323  * @soc: DP soc
3324  * @vdev: DP vdev handle
3325  * @txrx_peer: pointer to the txrx_peer object
3326  * @nbuf: skb list head
3327  * @tail: skb list tail
3328  * @is_eapol: eapol pkt check
3329  *
3330  * Return: None
3331  */
3332 void
3333 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
3334 			    struct dp_vdev *vdev,
3335 			    struct dp_txrx_peer *txrx_peer,
3336 			    qdf_nbuf_t nbuf,
3337 			    qdf_nbuf_t tail,
3338 			    bool is_eapol);
3339 #endif /* _DP_RX_H */
3340