xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h (revision 737b028eeab9d1c8c0971fb81ffcb33313bb90f0)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #ifndef _DP_RX_H
21 #define _DP_RX_H
22 
23 #include "hal_rx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 #include <qdf_tracepoint.h>
27 #include "dp_ipa.h"
28 
29 #ifdef RXDMA_OPTIMIZATION
30 #ifndef RX_DATA_BUFFER_ALIGNMENT
31 #define RX_DATA_BUFFER_ALIGNMENT        128
32 #endif
33 #ifndef RX_MONITOR_BUFFER_ALIGNMENT
34 #define RX_MONITOR_BUFFER_ALIGNMENT     128
35 #endif
36 #else /* RXDMA_OPTIMIZATION */
37 #define RX_DATA_BUFFER_ALIGNMENT        4
38 #define RX_MONITOR_BUFFER_ALIGNMENT     4
39 #endif /* RXDMA_OPTIMIZATION */
40 
41 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
42 #define DP_WBM2SW_RBM(sw0_bm_id)	HAL_RX_BUF_RBM_SW1_BM(sw0_bm_id)
43 /* RBM value used for re-injecting defragmented packets into REO */
44 #define DP_DEFRAG_RBM(sw0_bm_id)	HAL_RX_BUF_RBM_SW3_BM(sw0_bm_id)
45 #endif
46 
47 /* Max buffer in invalid peer SG list*/
48 #define DP_MAX_INVALID_BUFFERS 10
49 #ifdef DP_INVALID_PEER_ASSERT
50 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \
51 		do {                                \
52 			qdf_assert_always(!(head)); \
53 			qdf_assert_always(!(tail)); \
54 		} while (0)
55 #else
56 #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */
57 #endif
58 
59 #define RX_BUFFER_RESERVATION   0
60 
61 #define DP_DEFAULT_NOISEFLOOR	(-96)
62 
63 #define DP_RX_DESC_MAGIC 0xdec0de
64 
65 #define dp_rx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX, params)
66 #define dp_rx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX, params)
67 #define dp_rx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX, params)
68 #define dp_rx_info(params...) \
69 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params)
70 #define dp_rx_info_rl(params...) \
71 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params)
72 #define dp_rx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX, params)
73 #define dp_rx_err_err(params...) \
74 	QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX_ERROR, params)
75 
76 /**
77  * enum dp_rx_desc_state
78  *
79  * @RX_DESC_REPLENISHED: rx desc replenished
80  * @RX_DESC_IN_FREELIST: rx desc in freelist
81  */
82 enum dp_rx_desc_state {
83 	RX_DESC_REPLENISHED,
84 	RX_DESC_IN_FREELIST,
85 };
86 
87 #ifndef QCA_HOST_MODE_WIFI_DISABLED
88 /**
89  * struct dp_rx_desc_dbg_info
90  *
91  * @freelist_caller: name of the function that put the
92  *  the rx desc in freelist
93  * @freelist_ts: timestamp when the rx desc is put in
94  *  a freelist
95  * @replenish_caller: name of the function that last
96  *  replenished the rx desc
97  * @replenish_ts: last replenish timestamp
98  * @prev_nbuf: previous nbuf info
99  * @prev_nbuf_data_addr: previous nbuf data address
100  */
101 struct dp_rx_desc_dbg_info {
102 	char freelist_caller[QDF_MEM_FUNC_NAME_SIZE];
103 	uint64_t freelist_ts;
104 	char replenish_caller[QDF_MEM_FUNC_NAME_SIZE];
105 	uint64_t replenish_ts;
106 	qdf_nbuf_t prev_nbuf;
107 	uint8_t *prev_nbuf_data_addr;
108 };
109 
110 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
111 
112 /**
113  * struct dp_rx_desc
114  *
115  * @nbuf:		VA of the "skb" posted
116  * @rx_buf_start:	VA of the original Rx buffer, before
117  *			movement of any skb->data pointer
118  * @paddr_buf_start:	PA of the original Rx buffer, before
119  *                      movement of any frag pointer
120  * @cookie:		index into the sw array which holds
121  *			the sw Rx descriptors
122  *			Cookie space is 21 bits:
123  *			lower 18 bits -- index
124  *			upper  3 bits -- pool_id
125  * @pool_id:		pool Id for which this allocated.
126  *			Can only be used if there is no flow
127  *			steering
128  * @chip_id:		chip_id indicating MLO chip_id
129  *			valid or used only in case of multi-chip MLO
130  * @reuse_nbuf:		VA of the "skb" which is being reused
131  * @magic:
132  * @nbuf_data_addr:	VA of nbuf data posted
133  * @dbg_info:
134  * @prev_paddr_buf_start: paddr of the prev nbuf attach to rx_desc
135  * @in_use:		rx_desc is in use
136  * @unmapped:		used to mark rx_desc an unmapped if the corresponding
137  *			nbuf is already unmapped
138  * @in_err_state:	Nbuf sanity failed for this descriptor.
139  * @has_reuse_nbuf:	the nbuf associated with this desc is also saved in
140  *			reuse_nbuf field
141  * @msdu_done_fail:	this particular rx_desc was dequeued from REO with
142  *			msdu_done bit not set in data buffer.
143  */
144 struct dp_rx_desc {
145 	qdf_nbuf_t nbuf;
146 #ifdef WLAN_SUPPORT_PPEDS
147 	qdf_nbuf_t reuse_nbuf;
148 #endif
149 	uint8_t *rx_buf_start;
150 	qdf_dma_addr_t paddr_buf_start;
151 	uint32_t cookie;
152 	uint8_t	 pool_id;
153 	uint8_t chip_id;
154 #ifdef RX_DESC_DEBUG_CHECK
155 	uint32_t magic;
156 	uint8_t *nbuf_data_addr;
157 	struct dp_rx_desc_dbg_info *dbg_info;
158 	qdf_dma_addr_t prev_paddr_buf_start;
159 #endif
160 	uint8_t	in_use:1,
161 		unmapped:1,
162 		in_err_state:1,
163 		has_reuse_nbuf:1,
164 		msdu_done_fail:1;
165 };
166 
167 #ifndef QCA_HOST_MODE_WIFI_DISABLED
168 #ifdef ATH_RX_PRI_SAVE
169 #define DP_RX_TID_SAVE(_nbuf, _tid) \
170 	(qdf_nbuf_set_priority(_nbuf, _tid))
171 #else
172 #define DP_RX_TID_SAVE(_nbuf, _tid)
173 #endif
174 
175 /* RX Descriptor Multi Page memory alloc related */
176 #define DP_RX_DESC_OFFSET_NUM_BITS 8
177 #define DP_RX_DESC_PAGE_ID_NUM_BITS 8
178 #define DP_RX_DESC_POOL_ID_NUM_BITS 4
179 
180 #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS
181 #define DP_RX_DESC_POOL_ID_SHIFT \
182 		(DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS)
183 #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \
184 	(((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT)
185 #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK	\
186 			(((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \
187 			 DP_RX_DESC_PAGE_ID_SHIFT)
188 #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \
189 			((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1)
190 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie)		\
191 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >>	\
192 			DP_RX_DESC_POOL_ID_SHIFT)
193 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie)		\
194 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >>	\
195 			DP_RX_DESC_PAGE_ID_SHIFT)
196 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie)		\
197 	((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK)
198 
199 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
200 
201 #define RX_DESC_COOKIE_INDEX_SHIFT		0
202 #define RX_DESC_COOKIE_INDEX_MASK		0x3ffff /* 18 bits */
203 #define RX_DESC_COOKIE_POOL_ID_SHIFT		18
204 #define RX_DESC_COOKIE_POOL_ID_MASK		0x1c0000
205 
206 #define DP_RX_DESC_COOKIE_MAX	\
207 	(RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK)
208 
209 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie)		\
210 	(((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >>	\
211 			RX_DESC_COOKIE_POOL_ID_SHIFT)
212 
213 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie)		\
214 	(((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >>	\
215 			RX_DESC_COOKIE_INDEX_SHIFT)
216 
217 #define dp_rx_add_to_free_desc_list(head, tail, new) \
218 	__dp_rx_add_to_free_desc_list(head, tail, new, __func__)
219 
220 #define dp_rx_add_to_free_desc_list_reuse(head, tail, new) \
221 	__dp_rx_add_to_free_desc_list_reuse(head, tail, new, __func__)
222 
223 #define dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
224 				num_buffers, desc_list, tail, req_only) \
225 	__dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
226 				  num_buffers, desc_list, tail, req_only, \
227 				  false, __func__)
228 
229 #ifdef WLAN_SUPPORT_RX_FISA
230 /**
231  * dp_rx_set_hdr_pad() - set l3 padding in nbuf cb
232  * @nbuf: pkt skb pointer
233  * @l3_padding: l3 padding
234  *
235  * Return: None
236  */
237 static inline
238 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
239 {
240 	QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
241 }
242 #else
243 static inline
244 void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
245 {
246 }
247 #endif
248 
249 #ifdef DP_RX_SPECIAL_FRAME_NEED
250 /**
251  * dp_rx_is_special_frame() - check is RX frame special needed
252  *
253  * @nbuf: RX skb pointer
254  * @frame_mask: the mask for special frame needed
255  *
256  * Check is RX frame wanted matched with mask
257  *
258  * Return: true - special frame needed, false - no
259  */
260 static inline
261 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
262 {
263 	if (((frame_mask & FRAME_MASK_IPV4_ARP) &&
264 	     qdf_nbuf_is_ipv4_arp_pkt(nbuf)) ||
265 	    ((frame_mask & FRAME_MASK_IPV4_DHCP) &&
266 	     qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) ||
267 	    ((frame_mask & FRAME_MASK_IPV4_EAPOL) &&
268 	     qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) ||
269 	    ((frame_mask & FRAME_MASK_IPV6_DHCP) &&
270 	     qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))
271 		return true;
272 
273 	return false;
274 }
275 
276 /**
277  * dp_rx_deliver_special_frame() - Deliver the RX special frame to stack
278  *				   if matches mask
279  *
280  * @soc: Datapath soc handler
281  * @peer: pointer to DP peer
282  * @nbuf: pointer to the skb of RX frame
283  * @frame_mask: the mask for special frame needed
284  * @rx_tlv_hdr: start of rx tlv header
285  *
286  * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
287  * single nbuf is expected.
288  *
289  * Return: true - nbuf has been delivered to stack, false - not.
290  */
291 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer,
292 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
293 				 uint8_t *rx_tlv_hdr);
294 #else
295 static inline
296 bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
297 {
298 	return false;
299 }
300 
301 static inline
302 bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer,
303 				 qdf_nbuf_t nbuf, uint32_t frame_mask,
304 				 uint8_t *rx_tlv_hdr)
305 {
306 	return false;
307 }
308 #endif
309 
310 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
311 /**
312  * dp_rx_data_is_specific() - Used to exclude specific frames
313  *                            not practical for getting rx
314  *                            stats like rate, mcs, nss, etc.
315  *
316  * @hal_soc_hdl: soc handler
317  * @rx_tlv_hdr: rx tlv header
318  * @nbuf: RX skb pointer
319  *
320  * Return: true - a specific frame  not suitable
321  *                for getting rx stats from it.
322  *         false - a common frame suitable for
323  *                 getting rx stats from it.
324  */
325 static inline
326 bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl,
327 			    uint8_t *rx_tlv_hdr,
328 			    qdf_nbuf_t nbuf)
329 {
330 	if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf)))
331 		return true;
332 
333 	if (!hal_rx_tlv_first_mpdu_get(hal_soc_hdl, rx_tlv_hdr))
334 		return true;
335 
336 	if (!hal_rx_msdu_end_first_msdu_get(hal_soc_hdl, rx_tlv_hdr))
337 		return true;
338 
339 	/* ARP, EAPOL is neither IPV6 ETH nor IPV4 ETH from L3 level */
340 	if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) ==
341 	    QDF_NBUF_TRAC_IPV4_ETH_TYPE)) {
342 		if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
343 			return true;
344 	} else if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) ==
345 		   QDF_NBUF_TRAC_IPV6_ETH_TYPE)) {
346 		if (qdf_nbuf_is_ipv6_dhcp_pkt(nbuf))
347 			return true;
348 	} else {
349 		return true;
350 	}
351 	return false;
352 }
353 #else
354 static inline
355 bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl,
356 			    uint8_t *rx_tlv_hdr,
357 			    qdf_nbuf_t nbuf)
358 
359 {
360 	/*
361 	 * default return is true to make sure that rx stats
362 	 * will not be handled when this feature is disabled
363 	 */
364 	return true;
365 }
366 #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */
367 
368 #ifndef QCA_HOST_MODE_WIFI_DISABLED
369 #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING
370 static inline
371 bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer,
372 				 qdf_nbuf_t nbuf, uint8_t link_id)
373 {
374 	if (ta_txrx_peer->vdev->opmode == wlan_op_mode_ndi &&
375 	    qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) {
376 		DP_PEER_PER_PKT_STATS_INC(ta_txrx_peer,
377 					  rx.intra_bss.mdns_no_fwd,
378 					  1, link_id);
379 		return false;
380 	}
381 	return true;
382 }
383 #else
384 static inline
385 bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer,
386 				 qdf_nbuf_t nbuf, uint8_t link_id)
387 {
388 	return true;
389 }
390 #endif
391 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
392 
393 /* DOC: Offset to obtain LLC hdr
394  *
395  * In the case of Wifi parse error
396  * to reach LLC header from beginning
397  * of VLAN tag we need to skip 8 bytes.
398  * Vlan_tag(4)+length(2)+length added
399  * by HW(2) = 8 bytes.
400  */
401 #define DP_SKIP_VLAN		8
402 
403 #ifndef QCA_HOST_MODE_WIFI_DISABLED
404 
405 /**
406  * struct dp_rx_cached_buf - rx cached buffer
407  * @node: linked list node
408  * @buf: skb buffer
409  */
410 struct dp_rx_cached_buf {
411 	qdf_list_node_t node;
412 	qdf_nbuf_t buf;
413 };
414 
415 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
416 
417 /**
418  * dp_rx_xor_block() - xor block of data
419  * @b: destination data block
420  * @a: source data block
421  * @len: length of the data to process
422  *
423  * Return: None
424  */
425 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
426 {
427 	qdf_size_t i;
428 
429 	for (i = 0; i < len; i++)
430 		b[i] ^= a[i];
431 }
432 
433 /**
434  * dp_rx_rotl() - rotate the bits left
435  * @val: unsigned integer input value
436  * @bits: number of bits
437  *
438  * Return: Integer with left rotated by number of 'bits'
439  */
440 static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
441 {
442 	return (val << bits) | (val >> (32 - bits));
443 }
444 
445 /**
446  * dp_rx_rotr() - rotate the bits right
447  * @val: unsigned integer input value
448  * @bits: number of bits
449  *
450  * Return: Integer with right rotated by number of 'bits'
451  */
452 static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
453 {
454 	return (val >> bits) | (val << (32 - bits));
455 }
456 
457 /**
458  * dp_set_rx_queue() - set queue_mapping in skb
459  * @nbuf: skb
460  * @queue_id: rx queue_id
461  *
462  * Return: void
463  */
464 #ifdef QCA_OL_RX_MULTIQ_SUPPORT
465 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
466 {
467 	qdf_nbuf_record_rx_queue(nbuf, queue_id);
468 	return;
469 }
470 #else
471 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
472 {
473 }
474 #endif
475 
476 /**
477  * dp_rx_xswap() - swap the bits left
478  * @val: unsigned integer input value
479  *
480  * Return: Integer with bits swapped
481  */
482 static inline uint32_t dp_rx_xswap(uint32_t val)
483 {
484 	return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
485 }
486 
487 /**
488  * dp_rx_get_le32_split() - get little endian 32 bits split
489  * @b0: byte 0
490  * @b1: byte 1
491  * @b2: byte 2
492  * @b3: byte 3
493  *
494  * Return: Integer with split little endian 32 bits
495  */
496 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
497 					uint8_t b3)
498 {
499 	return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
500 }
501 
502 /**
503  * dp_rx_get_le32() - get little endian 32 bits
504  * @p: source 32-bit value
505  *
506  * Return: Integer with little endian 32 bits
507  */
508 static inline uint32_t dp_rx_get_le32(const uint8_t *p)
509 {
510 	return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
511 }
512 
513 /**
514  * dp_rx_put_le32() - put little endian 32 bits
515  * @p: destination char array
516  * @v: source 32-bit integer
517  *
518  * Return: None
519  */
520 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
521 {
522 	p[0] = (v) & 0xff;
523 	p[1] = (v >> 8) & 0xff;
524 	p[2] = (v >> 16) & 0xff;
525 	p[3] = (v >> 24) & 0xff;
526 }
527 
528 /* Extract michal mic block of data */
529 #define dp_rx_michael_block(l, r)	\
530 	do {					\
531 		r ^= dp_rx_rotl(l, 17);	\
532 		l += r;				\
533 		r ^= dp_rx_xswap(l);		\
534 		l += r;				\
535 		r ^= dp_rx_rotl(l, 3);	\
536 		l += r;				\
537 		r ^= dp_rx_rotr(l, 2);	\
538 		l += r;				\
539 	} while (0)
540 
541 /**
542  * struct dp_rx_desc_list_elem_t
543  *
544  * @next: Next pointer to form free list
545  * @rx_desc: DP Rx descriptor
546  */
547 union dp_rx_desc_list_elem_t {
548 	union dp_rx_desc_list_elem_t *next;
549 	struct dp_rx_desc rx_desc;
550 };
551 
552 #ifdef RX_DESC_MULTI_PAGE_ALLOC
553 /**
554  * dp_rx_desc_find() - find dp rx descriptor from page ID and offset
555  * @page_id: Page ID
556  * @offset: Offset of the descriptor element
557  * @rx_pool: RX pool
558  *
559  * Return: RX descriptor element
560  */
561 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
562 					      struct rx_desc_pool *rx_pool);
563 
564 static inline
565 struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc,
566 					      struct rx_desc_pool *pool,
567 					      uint32_t cookie)
568 {
569 	uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
570 	uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
571 	uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
572 	struct rx_desc_pool *rx_desc_pool;
573 	union dp_rx_desc_list_elem_t *rx_desc_elem;
574 
575 	if (qdf_unlikely(pool_id >= MAX_PDEV_CNT))
576 		return NULL;
577 
578 	rx_desc_pool = &pool[pool_id];
579 	rx_desc_elem = (union dp_rx_desc_list_elem_t *)
580 		(rx_desc_pool->desc_pages.cacheable_pages[page_id] +
581 		rx_desc_pool->elem_size * offset);
582 
583 	return &rx_desc_elem->rx_desc;
584 }
585 
586 static inline
587 struct dp_rx_desc *dp_get_rx_mon_status_desc_from_cookie(struct dp_soc *soc,
588 							 struct rx_desc_pool *pool,
589 							 uint32_t cookie)
590 {
591 	uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
592 	uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
593 	uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
594 	struct rx_desc_pool *rx_desc_pool;
595 	union dp_rx_desc_list_elem_t *rx_desc_elem;
596 
597 	if (qdf_unlikely(pool_id >= NUM_RXDMA_STATUS_RINGS_PER_PDEV))
598 		return NULL;
599 
600 	rx_desc_pool = &pool[pool_id];
601 	rx_desc_elem = (union dp_rx_desc_list_elem_t *)
602 		(rx_desc_pool->desc_pages.cacheable_pages[page_id] +
603 		rx_desc_pool->elem_size * offset);
604 
605 	return &rx_desc_elem->rx_desc;
606 }
607 
608 /**
609  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
610  *			 the Rx descriptor on Rx DMA source ring buffer
611  * @soc: core txrx main context
612  * @cookie: cookie used to lookup virtual address
613  *
614  * Return: Pointer to the Rx descriptor
615  */
616 static inline
617 struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc,
618 					       uint32_t cookie)
619 {
620 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie);
621 }
622 
623 /**
624  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
625  *			 the Rx descriptor on monitor ring buffer
626  * @soc: core txrx main context
627  * @cookie: cookie used to lookup virtual address
628  *
629  * Return: Pointer to the Rx descriptor
630  */
631 static inline
632 struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc,
633 					     uint32_t cookie)
634 {
635 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie);
636 }
637 
638 /**
639  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
640  *			 the Rx descriptor on monitor status ring buffer
641  * @soc: core txrx main context
642  * @cookie: cookie used to lookup virtual address
643  *
644  * Return: Pointer to the Rx descriptor
645  */
646 static inline
647 struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc,
648 						uint32_t cookie)
649 {
650 	return dp_get_rx_mon_status_desc_from_cookie(soc,
651 						     &soc->rx_desc_status[0],
652 						     cookie);
653 }
654 #else
655 
656 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
657 			  uint32_t pool_size,
658 			  struct rx_desc_pool *rx_desc_pool);
659 
660 /**
661  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
662  *			 the Rx descriptor on Rx DMA source ring buffer
663  * @soc: core txrx main context
664  * @cookie: cookie used to lookup virtual address
665  *
666  * Return: void *: Virtual Address of the Rx descriptor
667  */
668 static inline
669 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
670 {
671 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
672 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
673 	struct rx_desc_pool *rx_desc_pool;
674 
675 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
676 		return NULL;
677 
678 	rx_desc_pool = &soc->rx_desc_buf[pool_id];
679 
680 	if (qdf_unlikely(index >= rx_desc_pool->pool_size))
681 		return NULL;
682 
683 	return &rx_desc_pool->array[index].rx_desc;
684 }
685 
686 /**
687  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
688  *			 the Rx descriptor on monitor ring buffer
689  * @soc: core txrx main context
690  * @cookie: cookie used to lookup virtual address
691  *
692  * Return: void *: Virtual Address of the Rx descriptor
693  */
694 static inline
695 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
696 {
697 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
698 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
699 	/* TODO */
700 	/* Add sanity for pool_id & index */
701 	return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
702 }
703 
704 /**
705  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
706  *			 the Rx descriptor on monitor status ring buffer
707  * @soc: core txrx main context
708  * @cookie: cookie used to lookup virtual address
709  *
710  * Return: void *: Virtual Address of the Rx descriptor
711  */
712 static inline
713 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
714 {
715 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
716 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
717 	/* TODO */
718 	/* Add sanity for pool_id & index */
719 	return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
720 }
721 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
722 
723 #ifndef QCA_HOST_MODE_WIFI_DISABLED
724 
725 static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
726 {
727 	return vdev->ap_bridge_enabled;
728 }
729 
730 #ifdef DP_RX_DESC_COOKIE_INVALIDATE
731 static inline QDF_STATUS
732 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
733 {
734 	if (qdf_unlikely(HAL_RX_REO_BUF_COOKIE_INVALID_GET(ring_desc)))
735 		return QDF_STATUS_E_FAILURE;
736 
737 	HAL_RX_REO_BUF_COOKIE_INVALID_SET(ring_desc);
738 	return QDF_STATUS_SUCCESS;
739 }
740 
741 /**
742  * dp_rx_cookie_reset_invalid_bit() - Reset the invalid bit of the cookie
743  *  field in ring descriptor
744  * @ring_desc: ring descriptor
745  *
746  * Return: None
747  */
748 static inline void
749 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc)
750 {
751 	HAL_RX_REO_BUF_COOKIE_INVALID_RESET(ring_desc);
752 }
753 #else
754 static inline QDF_STATUS
755 dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
756 {
757 	return QDF_STATUS_SUCCESS;
758 }
759 
760 static inline void
761 dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc)
762 {
763 }
764 #endif
765 
766 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
767 
768 #if defined(RX_DESC_MULTI_PAGE_ALLOC) && \
769 	defined(DP_WAR_VALIDATE_RX_ERR_MSDU_COOKIE)
770 /**
771  * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid
772  * @soc: dp soc ref
773  * @cookie: Rx buf SW cookie value
774  *
775  * Return: true if cookie is valid else false
776  */
777 static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc,
778 					    uint32_t cookie)
779 {
780 	uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
781 	uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
782 	uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
783 	struct rx_desc_pool *rx_desc_pool;
784 
785 	if (qdf_unlikely(pool_id >= MAX_PDEV_CNT))
786 		goto fail;
787 
788 	rx_desc_pool = &soc->rx_desc_buf[pool_id];
789 
790 	if (page_id >= rx_desc_pool->desc_pages.num_pages ||
791 	    offset >= rx_desc_pool->desc_pages.num_element_per_page)
792 		goto fail;
793 
794 	return true;
795 
796 fail:
797 	DP_STATS_INC(soc, rx.err.invalid_cookie, 1);
798 	return false;
799 }
800 #else
801 /**
802  * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid
803  * @soc: dp soc ref
804  * @cookie: Rx buf SW cookie value
805  *
806  * When multi page alloc is disabled SW cookie validness is
807  * checked while fetching Rx descriptor, so no need to check here
808  *
809  * Return: true if cookie is valid else false
810  */
811 static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc,
812 					    uint32_t cookie)
813 {
814 	return true;
815 }
816 #endif
817 
818 /**
819  * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
820  *					rx descriptor pool
821  * @rx_desc_pool: rx descriptor pool pointer
822  *
823  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
824  *		       QDF_STATUS_E_NOMEM
825  */
826 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool);
827 
828 /**
829  * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
830  *			     descriptors
831  * @soc: core txrx main context
832  * @pool_size: number of rx descriptors (size of the pool)
833  * @rx_desc_pool: rx descriptor pool pointer
834  *
835  * Return: QDF_STATUS  QDF_STATUS_SUCCESS
836  *		       QDF_STATUS_E_NOMEM
837  *		       QDF_STATUS_E_FAULT
838  */
839 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
840 				 uint32_t pool_size,
841 				 struct rx_desc_pool *rx_desc_pool);
842 
843 /**
844  * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
845  * @soc: core txrx main context
846  * @pool_id: pool_id which is one of 3 mac_ids
847  * @pool_size: size of the rx descriptor pool
848  * @rx_desc_pool: rx descriptor pool pointer
849  *
850  * Convert the pool of memory into a list of rx descriptors and create
851  * locks to access this list of rx descriptors.
852  *
853  */
854 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
855 			  uint32_t pool_size,
856 			  struct rx_desc_pool *rx_desc_pool);
857 
858 /**
859  * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
860  *					freelist.
861  * @soc: core txrx main context
862  * @local_desc_list: local desc list provided by the caller
863  * @tail: attach the point to last desc of local desc list
864  * @pool_id: pool_id which is one of 3 mac_ids
865  * @rx_desc_pool: rx descriptor pool pointer
866  */
867 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
868 				union dp_rx_desc_list_elem_t **local_desc_list,
869 				union dp_rx_desc_list_elem_t **tail,
870 				uint16_t pool_id,
871 				struct rx_desc_pool *rx_desc_pool);
872 
873 /**
874  * dp_rx_get_free_desc_list() - provide a list of descriptors from
875  *				the free rx desc pool.
876  * @soc: core txrx main context
877  * @pool_id: pool_id which is one of 3 mac_ids
878  * @rx_desc_pool: rx descriptor pool pointer
879  * @num_descs: number of descs requested from freelist
880  * @desc_list: attach the descs to this list (output parameter)
881  * @tail: attach the point to last desc of free list (output parameter)
882  *
883  * Return: number of descs allocated from free list.
884  */
885 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
886 				struct rx_desc_pool *rx_desc_pool,
887 				uint16_t num_descs,
888 				union dp_rx_desc_list_elem_t **desc_list,
889 				union dp_rx_desc_list_elem_t **tail);
890 
891 /**
892  * dp_rx_pdev_desc_pool_alloc() -  allocate memory for software rx descriptor
893  *				   pool
894  * @pdev: core txrx pdev context
895  *
896  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
897  *			QDF_STATUS_E_NOMEM
898  */
899 QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev);
900 
901 /**
902  * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool
903  * @pdev: core txrx pdev context
904  */
905 void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev);
906 
907 /**
908  * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors
909  * @pdev: core txrx pdev context
910  *
911  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
912  *			QDF_STATUS_E_NOMEM
913  */
914 QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev);
915 
916 /**
917  * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools
918  * @pdev: core txrx pdev context
919  *
920  * This function resets the freelist of rx descriptors and destroys locks
921  * associated with this list of descriptors.
922  */
923 void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev);
924 
925 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
926 			    struct rx_desc_pool *rx_desc_pool,
927 			    uint32_t pool_id);
928 
929 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
930 
931 /**
932  * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring
933  * @pdev: core txrx pdev context
934  *
935  * Return: QDF_STATUS - QDF_STATUS_SUCCESS
936  *			QDF_STATUS_E_NOMEM
937  */
938 QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev);
939 
940 /**
941  * dp_rx_pdev_buffers_free() - Free nbufs (skbs)
942  * @pdev: core txrx pdev context
943  */
944 void dp_rx_pdev_buffers_free(struct dp_pdev *pdev);
945 
946 void dp_rx_pdev_detach(struct dp_pdev *pdev);
947 
948 /**
949  * dp_print_napi_stats() - NAPI stats
950  * @soc: soc handle
951  */
952 void dp_print_napi_stats(struct dp_soc *soc);
953 
954 /**
955  * dp_rx_vdev_detach() - detach vdev from dp rx
956  * @vdev: virtual device instance
957  *
958  * Return: QDF_STATUS_SUCCESS: success
959  *         QDF_STATUS_E_RESOURCES: Error return
960  */
961 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev);
962 
963 #ifndef QCA_HOST_MODE_WIFI_DISABLED
964 
965 uint32_t
966 dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
967 	      uint8_t reo_ring_num,
968 	      uint32_t quota);
969 
970 /**
971  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
972  *		     multiple nbufs.
973  * @soc: core txrx main context
974  * @nbuf: pointer to the first msdu of an amsdu.
975  *
976  * This function implements the creation of RX frag_list for cases
977  * where an MSDU is spread across multiple nbufs.
978  *
979  * Return: returns the head nbuf which contains complete frag_list.
980  */
981 qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf);
982 
983 /**
984  * dp_rx_is_sg_supported() - SG packets processing supported or not.
985  *
986  * Return: returns true when processing is supported else false.
987  */
988 bool dp_rx_is_sg_supported(void);
989 
990 /**
991  * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during
992  *				     de-initialization of wifi module.
993  *
994  * @soc: core txrx main context
995  * @pool_id: pool_id which is one of 3 mac_ids
996  * @rx_desc_pool: rx descriptor pool pointer
997  *
998  * Return: None
999  */
1000 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
1001 				   struct rx_desc_pool *rx_desc_pool);
1002 
1003 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1004 
1005 /**
1006  * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during
1007  *			    de-initialization of wifi module.
1008  *
1009  * @soc: core txrx main context
1010  * @rx_desc_pool: rx descriptor pool pointer
1011  * @is_mon_pool: true if this is a monitor pool
1012  *
1013  * Return: None
1014  */
1015 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
1016 			  struct rx_desc_pool *rx_desc_pool,
1017 			  bool is_mon_pool);
1018 
1019 #ifdef DP_RX_MON_MEM_FRAG
1020 /**
1021  * dp_rx_desc_frag_free() - free the sw rx desc frag called during
1022  *			    de-initialization of wifi module.
1023  *
1024  * @soc: core txrx main context
1025  * @rx_desc_pool: rx descriptor pool pointer
1026  *
1027  * Return: None
1028  */
1029 void dp_rx_desc_frag_free(struct dp_soc *soc,
1030 			  struct rx_desc_pool *rx_desc_pool);
1031 #else
1032 static inline
1033 void dp_rx_desc_frag_free(struct dp_soc *soc,
1034 			  struct rx_desc_pool *rx_desc_pool)
1035 {
1036 }
1037 #endif
1038 /**
1039  * dp_rx_desc_pool_free() - free the sw rx desc array called during
1040  *			    de-initialization of wifi module.
1041  *
1042  * @soc: core txrx main context
1043  * @rx_desc_pool: rx descriptor pool pointer
1044  *
1045  * Return: None
1046  */
1047 void dp_rx_desc_pool_free(struct dp_soc *soc,
1048 			  struct rx_desc_pool *rx_desc_pool);
1049 
1050 /**
1051  * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
1052  *				pkts to RAW mode simulation to
1053  *				decapsulate the pkt.
1054  * @vdev: vdev on which RAW mode is enabled
1055  * @nbuf_list: list of RAW pkts to process
1056  * @peer: peer object from which the pkt is rx
1057  * @link_id: link Id on which the packet is received
1058  *
1059  * Return: void
1060  */
1061 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
1062 		       struct dp_txrx_peer *peer, uint8_t link_id);
1063 
1064 #ifdef RX_DESC_LOGGING
1065 /**
1066  * dp_rx_desc_alloc_dbg_info() - Alloc memory for rx descriptor debug
1067  *  structure
1068  * @rx_desc: rx descriptor pointer
1069  *
1070  * Return: None
1071  */
1072 static inline
1073 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
1074 {
1075 	rx_desc->dbg_info = qdf_mem_malloc(sizeof(struct dp_rx_desc_dbg_info));
1076 }
1077 
1078 /**
1079  * dp_rx_desc_free_dbg_info() - Free rx descriptor debug
1080  *  structure memory
1081  * @rx_desc: rx descriptor pointer
1082  *
1083  * Return: None
1084  */
1085 static inline
1086 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
1087 {
1088 	qdf_mem_free(rx_desc->dbg_info);
1089 }
1090 
1091 /**
1092  * dp_rx_desc_update_dbg_info() - Update rx descriptor debug info
1093  *  structure memory
1094  * @rx_desc: rx descriptor pointer
1095  * @func_name: name of calling function
1096  * @flag:
1097  *
1098  * Return: None
1099  */
1100 static
1101 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
1102 				const char *func_name, uint8_t flag)
1103 {
1104 	struct dp_rx_desc_dbg_info *info = rx_desc->dbg_info;
1105 
1106 	if (!info)
1107 		return;
1108 
1109 	if (flag == RX_DESC_REPLENISHED) {
1110 		qdf_str_lcopy(info->replenish_caller, func_name,
1111 			      QDF_MEM_FUNC_NAME_SIZE);
1112 		info->replenish_ts = qdf_get_log_timestamp();
1113 	} else {
1114 		qdf_str_lcopy(info->freelist_caller, func_name,
1115 			      QDF_MEM_FUNC_NAME_SIZE);
1116 		info->freelist_ts = qdf_get_log_timestamp();
1117 		info->prev_nbuf = rx_desc->nbuf;
1118 		info->prev_nbuf_data_addr = rx_desc->nbuf_data_addr;
1119 		rx_desc->nbuf_data_addr = NULL;
1120 	}
1121 }
1122 #else
1123 
1124 static inline
1125 void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
1126 {
1127 }
1128 
1129 static inline
1130 void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
1131 {
1132 }
1133 
1134 static inline
1135 void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
1136 				const char *func_name, uint8_t flag)
1137 {
1138 }
1139 #endif /* RX_DESC_LOGGING */
1140 
1141 /**
1142  * __dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
1143  *
1144  * @head: pointer to the head of local free list
1145  * @tail: pointer to the tail of local free list
1146  * @new: new descriptor that is added to the free list
1147  * @func_name: caller func name
1148  *
1149  * Return: void:
1150  */
1151 static inline
1152 void __dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
1153 				 union dp_rx_desc_list_elem_t **tail,
1154 				 struct dp_rx_desc *new, const char *func_name)
1155 {
1156 	qdf_assert(head && new);
1157 
1158 	dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST);
1159 
1160 	new->nbuf = NULL;
1161 	new->in_use = 0;
1162 
1163 	((union dp_rx_desc_list_elem_t *)new)->next = *head;
1164 	*head = (union dp_rx_desc_list_elem_t *)new;
1165 	/* reset tail if head->next is NULL */
1166 	if (!*tail || !(*head)->next)
1167 		*tail = *head;
1168 }
1169 
1170 /**
1171  * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
1172  * @soc: DP SOC handle
1173  * @nbuf: network buffer
1174  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
1175  * pool_id has same mapping)
1176  *
1177  * Return: integer type
1178  */
1179 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
1180 				   uint8_t mac_id);
1181 
1182 /**
1183  * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
1184  * @soc: DP SOC handle
1185  * @mpdu: mpdu for which peer is invalid
1186  * @mpdu_done: if an mpdu is completed
1187  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
1188  * pool_id has same mapping)
1189  *
1190  * Return: integer type
1191  */
1192 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
1193 		qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id);
1194 
1195 #define DP_RX_HEAD_APPEND(head, elem) \
1196 	do {                                                            \
1197 		qdf_nbuf_set_next((elem), (head));			\
1198 		(head) = (elem);                                        \
1199 	} while (0)
1200 
1201 
1202 #define DP_RX_LIST_APPEND(head, tail, elem) \
1203 	do {                                                          \
1204 		if (!(head)) {                                        \
1205 			(head) = (elem);                              \
1206 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\
1207 		} else {                                              \
1208 			qdf_nbuf_set_next((tail), (elem));            \
1209 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++;  \
1210 		}                                                     \
1211 		(tail) = (elem);                                      \
1212 		qdf_nbuf_set_next((tail), NULL);                      \
1213 	} while (0)
1214 
1215 #define DP_RX_MERGE_TWO_LIST(phead, ptail, chead, ctail) \
1216 	do {                                                          \
1217 		if (!(phead)) {                                       \
1218 			(phead) = (chead);                            \
1219 		} else {                                              \
1220 			qdf_nbuf_set_next((ptail), (chead));          \
1221 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(phead) += \
1222 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(chead);   \
1223 		}                                                     \
1224 		(ptail) = (ctail);                                    \
1225 		qdf_nbuf_set_next((ptail), NULL);                     \
1226 	} while (0)
1227 
1228 #if defined(QCA_PADDR_CHECK_ON_3RD_PARTY_PLATFORM)
1229 /*
1230  * on some third-party platform, the memory below 0x2000
1231  * is reserved for target use, so any memory allocated in this
1232  * region should not be used by host
1233  */
1234 #define MAX_RETRY 50
1235 #define DP_PHY_ADDR_RESERVED	0x2000
1236 #elif defined(BUILD_X86)
1237 /*
1238  * in M2M emulation platforms (x86) the memory below 0x50000000
1239  * is reserved for target use, so any memory allocated in this
1240  * region should not be used by host
1241  */
1242 #define MAX_RETRY 100
1243 #define DP_PHY_ADDR_RESERVED	0x50000000
1244 #endif
1245 
1246 #if defined(QCA_PADDR_CHECK_ON_3RD_PARTY_PLATFORM) || defined(BUILD_X86)
1247 /**
1248  * dp_check_paddr() - check if current phy address is valid or not
1249  * @dp_soc: core txrx main context
1250  * @rx_netbuf: skb buffer
1251  * @paddr: physical address
1252  * @rx_desc_pool: struct of rx descriptor pool
1253  * check if the physical address of the nbuf->data is less
1254  * than DP_PHY_ADDR_RESERVED then free the nbuf and try
1255  * allocating new nbuf. We can try for 100 times.
1256  *
1257  * This is a temp WAR till we fix it properly.
1258  *
1259  * Return: success or failure.
1260  */
1261 static inline
1262 int dp_check_paddr(struct dp_soc *dp_soc,
1263 		   qdf_nbuf_t *rx_netbuf,
1264 		   qdf_dma_addr_t *paddr,
1265 		   struct rx_desc_pool *rx_desc_pool)
1266 {
1267 	uint32_t nbuf_retry = 0;
1268 	int32_t ret;
1269 
1270 	if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED))
1271 		return QDF_STATUS_SUCCESS;
1272 
1273 	do {
1274 		dp_debug("invalid phy addr 0x%llx, trying again",
1275 			 (uint64_t)(*paddr));
1276 		nbuf_retry++;
1277 		if ((*rx_netbuf)) {
1278 			/* Not freeing buffer intentionally.
1279 			 * Observed that same buffer is getting
1280 			 * re-allocated resulting in longer load time
1281 			 * WMI init timeout.
1282 			 * This buffer is anyway not useful so skip it.
1283 			 *.Add such buffer to invalid list and free
1284 			 *.them when driver unload.
1285 			 **/
1286 			qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
1287 						     *rx_netbuf,
1288 						     QDF_DMA_FROM_DEVICE,
1289 						     rx_desc_pool->buf_size);
1290 			qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue,
1291 					   *rx_netbuf);
1292 		}
1293 
1294 		*rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
1295 					    rx_desc_pool->buf_size,
1296 					    RX_BUFFER_RESERVATION,
1297 					    rx_desc_pool->buf_alignment,
1298 					    FALSE);
1299 
1300 		if (qdf_unlikely(!(*rx_netbuf)))
1301 			return QDF_STATUS_E_FAILURE;
1302 
1303 		ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
1304 						 *rx_netbuf,
1305 						 QDF_DMA_FROM_DEVICE,
1306 						 rx_desc_pool->buf_size);
1307 
1308 		if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
1309 			qdf_nbuf_free(*rx_netbuf);
1310 			*rx_netbuf = NULL;
1311 			continue;
1312 		}
1313 
1314 		*paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
1315 
1316 		if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED))
1317 			return QDF_STATUS_SUCCESS;
1318 
1319 	} while (nbuf_retry < MAX_RETRY);
1320 
1321 	if ((*rx_netbuf)) {
1322 		qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
1323 					     *rx_netbuf,
1324 					     QDF_DMA_FROM_DEVICE,
1325 					     rx_desc_pool->buf_size);
1326 		qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue,
1327 				   *rx_netbuf);
1328 	}
1329 
1330 	return QDF_STATUS_E_FAILURE;
1331 }
1332 
1333 #else
1334 static inline
1335 int dp_check_paddr(struct dp_soc *dp_soc,
1336 		   qdf_nbuf_t *rx_netbuf,
1337 		   qdf_dma_addr_t *paddr,
1338 		   struct rx_desc_pool *rx_desc_pool)
1339 {
1340 	return QDF_STATUS_SUCCESS;
1341 }
1342 
1343 #endif
1344 
1345 /**
1346  * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
1347  *				   the MSDU Link Descriptor
1348  * @soc: core txrx main context
1349  * @buf_info: buf_info includes cookie that is used to lookup
1350  * virtual address of link descriptor after deriving the page id
1351  * and the offset or index of the desc on the associatde page.
1352  *
1353  * This is the VA of the link descriptor, that HAL layer later uses to
1354  * retrieve the list of MSDU's for a given MPDU.
1355  *
1356  * Return: void *: Virtual Address of the Rx descriptor
1357  */
1358 static inline
1359 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
1360 				  struct hal_buf_info *buf_info)
1361 {
1362 	void *link_desc_va;
1363 	struct qdf_mem_multi_page_t *pages;
1364 	uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie);
1365 
1366 	pages = &soc->link_desc_pages;
1367 	if (!pages)
1368 		return NULL;
1369 	if (qdf_unlikely(page_id >= pages->num_pages))
1370 		return NULL;
1371 	link_desc_va = pages->dma_pages[page_id].page_v_addr_start +
1372 		(buf_info->paddr - pages->dma_pages[page_id].page_p_addr);
1373 	return link_desc_va;
1374 }
1375 
1376 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1377 #ifdef DISABLE_EAPOL_INTRABSS_FWD
1378 #ifdef WLAN_FEATURE_11BE_MLO
1379 static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev,
1380 						qdf_nbuf_t nbuf)
1381 {
1382 	struct qdf_mac_addr *self_mld_mac_addr =
1383 				(struct qdf_mac_addr *)vdev->mld_mac_addr.raw;
1384 	return qdf_is_macaddr_equal(self_mld_mac_addr,
1385 				    (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) +
1386 				    QDF_NBUF_DEST_MAC_OFFSET);
1387 }
1388 #else
1389 static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev,
1390 						qdf_nbuf_t nbuf)
1391 {
1392 	return false;
1393 }
1394 #endif
1395 
1396 static inline bool dp_nbuf_dst_addr_is_self_addr(struct dp_vdev *vdev,
1397 						 qdf_nbuf_t nbuf)
1398 {
1399 	return qdf_is_macaddr_equal((struct qdf_mac_addr *)vdev->mac_addr.raw,
1400 				    (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) +
1401 				    QDF_NBUF_DEST_MAC_OFFSET);
1402 }
1403 
1404 /**
1405  * dp_rx_intrabss_eapol_drop_check() - API For EAPOL
1406  *  pkt with DA not equal to vdev mac addr, fwd is not allowed.
1407  * @soc: core txrx main context
1408  * @ta_txrx_peer: source peer entry
1409  * @rx_tlv_hdr: start address of rx tlvs
1410  * @nbuf: nbuf that has to be intrabss forwarded
1411  *
1412  * Return: true if it is forwarded else false
1413  */
1414 static inline
1415 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc,
1416 				     struct dp_txrx_peer *ta_txrx_peer,
1417 				     uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf)
1418 {
1419 	if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) &&
1420 			 !(dp_nbuf_dst_addr_is_self_addr(ta_txrx_peer->vdev,
1421 							 nbuf) ||
1422 			   dp_nbuf_dst_addr_is_mld_addr(ta_txrx_peer->vdev,
1423 							nbuf)))) {
1424 		qdf_nbuf_free(nbuf);
1425 		DP_STATS_INC(soc, rx.err.intrabss_eapol_drop, 1);
1426 		return true;
1427 	}
1428 
1429 	return false;
1430 }
1431 #else /* DISABLE_EAPOL_INTRABSS_FWD */
1432 
1433 static inline
1434 bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc,
1435 				     struct dp_txrx_peer *ta_txrx_peer,
1436 				     uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf)
1437 {
1438 	return false;
1439 }
1440 #endif /* DISABLE_EAPOL_INTRABSS_FWD */
1441 
1442 /**
1443  * dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets
1444  * @soc: core txrx main context
1445  * @ta_peer: source peer entry
1446  * @rx_tlv_hdr: start address of rx tlvs
1447  * @nbuf: nbuf that has to be intrabss forwarded
1448  * @tid_stats: tid stats pointer
1449  * @link_id: link Id on which packet is received
1450  *
1451  * Return: bool: true if it is forwarded else false
1452  */
1453 bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc,
1454 			     struct dp_txrx_peer *ta_peer,
1455 			     uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
1456 			     struct cdp_tid_rx_stats *tid_stats,
1457 			     uint8_t link_id);
1458 
1459 /**
1460  * dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets
1461  * @soc: core txrx main context
1462  * @ta_peer: source peer entry
1463  * @tx_vdev_id: VDEV ID for Intra-BSS TX
1464  * @rx_tlv_hdr: start address of rx tlvs
1465  * @nbuf: nbuf that has to be intrabss forwarded
1466  * @tid_stats: tid stats pointer
1467  * @link_id: link Id on which packet is received
1468  *
1469  * Return: bool: true if it is forwarded else false
1470  */
1471 bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc,
1472 			      struct dp_txrx_peer *ta_peer,
1473 			      uint8_t tx_vdev_id,
1474 			      uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
1475 			      struct cdp_tid_rx_stats *tid_stats,
1476 			      uint8_t link_id);
1477 
1478 /**
1479  * dp_rx_defrag_concat() - Concatenate the fragments
1480  *
1481  * @dst: destination pointer to the buffer
1482  * @src: source pointer from where the fragment payload is to be copied
1483  *
1484  * Return: QDF_STATUS
1485  */
1486 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
1487 {
1488 	/*
1489 	 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
1490 	 * to provide space for src, the headroom portion is copied from
1491 	 * the original dst buffer to the larger new dst buffer.
1492 	 * (This is needed, because the headroom of the dst buffer
1493 	 * contains the rx desc.)
1494 	 */
1495 	if (!qdf_nbuf_cat(dst, src)) {
1496 		/*
1497 		 * qdf_nbuf_cat does not free the src memory.
1498 		 * Free src nbuf before returning
1499 		 * For failure case the caller takes of freeing the nbuf
1500 		 */
1501 		qdf_nbuf_free(src);
1502 		return QDF_STATUS_SUCCESS;
1503 	}
1504 
1505 	return QDF_STATUS_E_DEFRAG_ERROR;
1506 }
1507 
1508 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1509 
1510 #ifndef FEATURE_WDS
1511 void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
1512 		    struct dp_txrx_peer *ta_txrx_peer, qdf_nbuf_t nbuf);
1513 
1514 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
1515 {
1516 	return QDF_STATUS_SUCCESS;
1517 }
1518 
1519 static inline void
1520 dp_rx_wds_srcport_learn(struct dp_soc *soc,
1521 			uint8_t *rx_tlv_hdr,
1522 			struct dp_txrx_peer *txrx_peer,
1523 			qdf_nbuf_t nbuf,
1524 			struct hal_rx_msdu_metadata msdu_metadata)
1525 {
1526 }
1527 
1528 static inline void
1529 dp_rx_ipa_wds_srcport_learn(struct dp_soc *soc,
1530 			    struct dp_peer *ta_peer, qdf_nbuf_t nbuf,
1531 			    struct hal_rx_msdu_metadata msdu_end_info,
1532 			    bool ad4_valid, bool chfrag_start)
1533 {
1534 }
1535 #endif
1536 
1537 /**
1538  * dp_rx_desc_dump() - dump the sw rx descriptor
1539  *
1540  * @rx_desc: sw rx descriptor
1541  */
1542 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc)
1543 {
1544 	dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d",
1545 		rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id,
1546 		rx_desc->in_use, rx_desc->unmapped);
1547 }
1548 
1549 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1550 
1551 /**
1552  * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
1553  *					In qwrap mode, packets originated from
1554  *					any vdev should not loopback and
1555  *					should be dropped.
1556  * @vdev: vdev on which rx packet is received
1557  * @nbuf: rx pkt
1558  *
1559  */
1560 #if ATH_SUPPORT_WRAP
1561 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1562 						qdf_nbuf_t nbuf)
1563 {
1564 	struct dp_vdev *psta_vdev;
1565 	struct dp_pdev *pdev = vdev->pdev;
1566 	uint8_t *data = qdf_nbuf_data(nbuf);
1567 
1568 	if (qdf_unlikely(vdev->proxysta_vdev)) {
1569 		/* In qwrap isolation mode, allow loopback packets as all
1570 		 * packets go to RootAP and Loopback on the mpsta.
1571 		 */
1572 		if (vdev->isolation_vdev)
1573 			return false;
1574 		TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
1575 			if (qdf_unlikely(psta_vdev->proxysta_vdev &&
1576 					 !qdf_mem_cmp(psta_vdev->mac_addr.raw,
1577 						      &data[QDF_MAC_ADDR_SIZE],
1578 						      QDF_MAC_ADDR_SIZE))) {
1579 				/* Drop packet if source address is equal to
1580 				 * any of the vdev addresses.
1581 				 */
1582 				return true;
1583 			}
1584 		}
1585 	}
1586 	return false;
1587 }
1588 #else
1589 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
1590 						qdf_nbuf_t nbuf)
1591 {
1592 	return false;
1593 }
1594 #endif
1595 
1596 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1597 
1598 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
1599 	defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\
1600 	defined(WLAN_SUPPORT_RX_FLOW_TAG)
1601 #include "dp_rx_tag.h"
1602 #endif
1603 
1604 #if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\
1605 	!defined(WLAN_SUPPORT_RX_FLOW_TAG)
1606 /**
1607  * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV
1608  *                              and set the corresponding tag in QDF packet
1609  * @soc: core txrx main context
1610  * @vdev: vdev on which the packet is received
1611  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1612  * @rx_tlv_hdr: rBbase address where the RX TLVs starts
1613  * @ring_index: REO ring number, not used for error & monitor ring
1614  * @is_reo_exception: flag to indicate if rx from REO ring or exception ring
1615  * @is_update_stats: flag to indicate whether to update stats or not
1616  *
1617  * Return: void
1618  */
1619 static inline void
1620 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1621 			  qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
1622 			  uint16_t ring_index,
1623 			  bool is_reo_exception, bool is_update_stats)
1624 {
1625 }
1626 #endif
1627 
1628 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
1629 /**
1630  * dp_rx_err_cce_drop() - Reads CCE metadata from the RX MSDU end TLV
1631  *                        and returns whether cce metadata matches
1632  * @soc: core txrx main context
1633  * @vdev: vdev on which the packet is received
1634  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1635  * @rx_tlv_hdr: rBbase address where the RX TLVs starts
1636  *
1637  * Return: bool
1638  */
1639 static inline bool
1640 dp_rx_err_cce_drop(struct dp_soc *soc, struct dp_vdev *vdev,
1641 		   qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
1642 {
1643 	return false;
1644 }
1645 
1646 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
1647 
1648 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
1649 /**
1650  * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV
1651  *                           and set the corresponding tag in QDF packet
1652  * @soc: core txrx main context
1653  * @vdev: vdev on which the packet is received
1654  * @nbuf: QDF pkt buffer on which the protocol tag should be set
1655  * @rx_tlv_hdr: base address where the RX TLVs starts
1656  * @update_stats: flag to indicate whether to update stats or not
1657  *
1658  * Return: void
1659  */
1660 static inline void
1661 dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev,
1662 		      qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats)
1663 {
1664 }
1665 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
1666 
1667 #define CRITICAL_BUFFER_THRESHOLD	64
1668 /**
1669  * __dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
1670  *			       called during dp rx initialization
1671  *			       and at the end of dp_rx_process.
1672  *
1673  * @dp_soc: core txrx main context
1674  * @mac_id: mac_id which is one of 3 mac_ids
1675  * @dp_rxdma_srng: dp rxdma circular ring
1676  * @rx_desc_pool: Pointer to free Rx descriptor pool
1677  * @num_req_buffers: number of buffer to be replenished
1678  * @desc_list: list of descs if called from dp_rx_process
1679  *	       or NULL during dp rx initialization or out of buffer
1680  *	       interrupt.
1681  * @tail: tail of descs list
1682  * @req_only: If true don't replenish more than req buffers
1683  * @force_replenish: replenish full ring without limit check this
1684  *                   this field will be considered only when desc_list
1685  *                   is NULL and req_only is false
1686  * @func_name: name of the caller function
1687  *
1688  * Return: return success or failure
1689  */
1690 QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1691 				 struct dp_srng *dp_rxdma_srng,
1692 				 struct rx_desc_pool *rx_desc_pool,
1693 				 uint32_t num_req_buffers,
1694 				 union dp_rx_desc_list_elem_t **desc_list,
1695 				 union dp_rx_desc_list_elem_t **tail,
1696 				 bool req_only,
1697 				 bool force_replenish,
1698 				 const char *func_name);
1699 
1700 /**
1701  * __dp_rx_buffers_no_map_replenish() - replenish rxdma ring with rx nbufs
1702  *					use direct APIs to get invalidate
1703  *					and get the physical address of the
1704  *					nbuf instead of map api,called during
1705  *					dp rx initialization and at the end
1706  *					of dp_rx_process.
1707  *
1708  * @dp_soc: core txrx main context
1709  * @mac_id: mac_id which is one of 3 mac_ids
1710  * @dp_rxdma_srng: dp rxdma circular ring
1711  * @rx_desc_pool: Pointer to free Rx descriptor pool
1712  * @num_req_buffers: number of buffer to be replenished
1713  * @desc_list: list of descs if called from dp_rx_process
1714  *	       or NULL during dp rx initialization or out of buffer
1715  *	       interrupt.
1716  * @tail: tail of descs list
1717  *
1718  * Return: return success or failure
1719  */
1720 QDF_STATUS
1721 __dp_rx_buffers_no_map_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1722 				 struct dp_srng *dp_rxdma_srng,
1723 				 struct rx_desc_pool *rx_desc_pool,
1724 				 uint32_t num_req_buffers,
1725 				 union dp_rx_desc_list_elem_t **desc_list,
1726 				 union dp_rx_desc_list_elem_t **tail);
1727 
1728 /**
1729  * __dp_rx_comp2refill_replenish() - replenish rxdma ring with rx nbufs
1730  *					use direct APIs to get invalidate
1731  *					and get the physical address of the
1732  *					nbuf instead of map api,called during
1733  *					dp rx initialization and at the end
1734  *					of dp_rx_process.
1735  *
1736  * @dp_soc: core txrx main context
1737  * @mac_id: mac_id which is one of 3 mac_ids
1738  * @dp_rxdma_srng: dp rxdma circular ring
1739  * @rx_desc_pool: Pointer to free Rx descriptor pool
1740  * @num_req_buffers: number of buffer to be replenished
1741  * @desc_list: list of descs if called from dp_rx_process
1742  *	       or NULL during dp rx initialization or out of buffer
1743  *	       interrupt.
1744  * @tail: tail of descs list
1745  * Return: return success or failure
1746  */
1747 QDF_STATUS
1748 __dp_rx_comp2refill_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1749 			      struct dp_srng *dp_rxdma_srng,
1750 			      struct rx_desc_pool *rx_desc_pool,
1751 			      uint32_t num_req_buffers,
1752 			      union dp_rx_desc_list_elem_t **desc_list,
1753 			      union dp_rx_desc_list_elem_t **tail);
1754 
1755 /**
1756  * __dp_rx_buffers_no_map_lt_replenish() - replenish rxdma ring with rx nbufs
1757  *					use direct APIs to get invalidate
1758  *					and get the physical address of the
1759  *					nbuf instead of map api,called when
1760  *					low threshold interrupt is triggered
1761  *
1762  * @dp_soc: core txrx main context
1763  * @mac_id: mac_id which is one of 3 mac_ids
1764  * @dp_rxdma_srng: dp rxdma circular ring
1765  * @rx_desc_pool: Pointer to free Rx descriptor pool
1766  * @force_replenish: Force replenish the ring fully
1767  *
1768  * Return: return success or failure
1769  */
1770 QDF_STATUS
1771 __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
1772 				    struct dp_srng *dp_rxdma_srng,
1773 				    struct rx_desc_pool *rx_desc_pool,
1774 				    bool force_replenish);
1775 
1776 /**
1777  * __dp_pdev_rx_buffers_no_map_attach() - replenish rxdma ring with rx nbufs
1778  *					use direct APIs to get invalidate
1779  *					and get the physical address of the
1780  *					nbuf instead of map api,called during
1781  *					dp rx initialization.
1782  *
1783  * @dp_soc: core txrx main context
1784  * @mac_id: mac_id which is one of 3 mac_ids
1785  * @dp_rxdma_srng: dp rxdma circular ring
1786  * @rx_desc_pool: Pointer to free Rx descriptor pool
1787  * @num_req_buffers: number of buffer to be replenished
1788  *
1789  * Return: return success or failure
1790  */
1791 QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *dp_soc,
1792 					      uint32_t mac_id,
1793 					      struct dp_srng *dp_rxdma_srng,
1794 					      struct rx_desc_pool *rx_desc_pool,
1795 					      uint32_t num_req_buffers);
1796 
1797 /**
1798  * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs
1799  *                               called during dp rx initialization
1800  *
1801  * @dp_soc: core txrx main context
1802  * @mac_id: mac_id which is one of 3 mac_ids
1803  * @dp_rxdma_srng: dp rxdma circular ring
1804  * @rx_desc_pool: Pointer to free Rx descriptor pool
1805  * @num_req_buffers: number of buffer to be replenished
1806  *
1807  * Return: return success or failure
1808  */
1809 QDF_STATUS
1810 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
1811 			  struct dp_srng *dp_rxdma_srng,
1812 			  struct rx_desc_pool *rx_desc_pool,
1813 			  uint32_t num_req_buffers);
1814 
1815 /**
1816  * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
1817  * @vdev: DP Virtual device handle
1818  * @nbuf: Buffer pointer
1819  * @rx_tlv_hdr: start of rx tlv header
1820  * @txrx_peer: pointer to peer
1821  *
1822  * This function allocated memory for mesh receive stats and fill the
1823  * required stats. Stores the memory address in skb cb.
1824  *
1825  * Return: void
1826  */
1827 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1828 			   uint8_t *rx_tlv_hdr,
1829 			   struct dp_txrx_peer *txrx_peer);
1830 
1831 /**
1832  * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
1833  * @vdev: DP Virtual device handle
1834  * @nbuf: Buffer pointer
1835  * @rx_tlv_hdr: start of rx tlv header
1836  *
1837  * This checks if the received packet is matching any filter out
1838  * catogery and and drop the packet if it matches.
1839  *
1840  * Return: QDF_STATUS_SUCCESS indicates drop,
1841  *         QDF_STATUS_E_FAILURE indicate to not drop
1842  */
1843 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1844 					uint8_t *rx_tlv_hdr);
1845 
1846 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
1847 			   struct dp_txrx_peer *peer);
1848 
1849 /**
1850  * dp_rx_compute_delay() - Compute and fill in all timestamps
1851  *				to pass in correct fields
1852  * @vdev: pdev handle
1853  * @nbuf: network buffer
1854  *
1855  * Return: none
1856  */
1857 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
1858 
1859 #ifdef QCA_PEER_EXT_STATS
1860 
1861 /**
1862  * dp_rx_compute_tid_delay - Compute per TID delay stats
1863  * @stats: TID delay stats to update
1864  * @nbuf: NBuffer
1865  *
1866  * Return: Void
1867  */
1868 void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
1869 			     qdf_nbuf_t nbuf);
1870 #endif /* QCA_PEER_EXT_STATS */
1871 
1872 #ifdef WLAN_SUPPORT_PPEDS
1873 static inline
1874 void dp_rx_set_reuse_nbuf(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
1875 {
1876 	rx_desc->reuse_nbuf = nbuf;
1877 	rx_desc->has_reuse_nbuf = true;
1878 }
1879 
1880 /**
1881  * __dp_rx_add_to_free_desc_list_reuse() - Adds to a local free descriptor list
1882  *					   this list will reused
1883  *
1884  * @head: pointer to the head of local free list
1885  * @tail: pointer to the tail of local free list
1886  * @new: new descriptor that is added to the free list
1887  * @func_name: caller func name
1888  *
1889  * Return: void:
1890  */
1891 static inline
1892 void __dp_rx_add_to_free_desc_list_reuse(union dp_rx_desc_list_elem_t **head,
1893 					 union dp_rx_desc_list_elem_t **tail,
1894 					 struct dp_rx_desc *new,
1895 					 const char *func_name)
1896 {
1897 	qdf_assert(head && new);
1898 
1899 	dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST);
1900 
1901 	new->nbuf = NULL;
1902 
1903 	((union dp_rx_desc_list_elem_t *)new)->next = *head;
1904 	*head = (union dp_rx_desc_list_elem_t *)new;
1905 	/* reset tail if head->next is NULL */
1906 	if (!*tail || !(*head)->next)
1907 		*tail = *head;
1908 }
1909 #else
1910 static inline
1911 void dp_rx_set_reuse_nbuf(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
1912 {
1913 }
1914 
1915 static inline
1916 void __dp_rx_add_to_free_desc_list_reuse(union dp_rx_desc_list_elem_t **head,
1917 					 union dp_rx_desc_list_elem_t **tail,
1918 					 struct dp_rx_desc *new,
1919 					 const char *func_name)
1920 {
1921 }
1922 #endif
1923 
1924 #ifdef RX_DESC_DEBUG_CHECK
1925 /**
1926  * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc
1927  * @rx_desc: rx descriptor pointer
1928  *
1929  * Return: true, if magic is correct, else false.
1930  */
1931 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1932 {
1933 	if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC))
1934 		return false;
1935 
1936 	rx_desc->magic = 0;
1937 	return true;
1938 }
1939 
1940 /**
1941  * dp_rx_desc_prep() - prepare rx desc
1942  * @rx_desc: rx descriptor pointer to be prepared
1943  * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
1944  *
1945  * Note: assumption is that we are associating a nbuf which is mapped
1946  *
1947  * Return: none
1948  */
1949 static inline
1950 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
1951 		     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1952 {
1953 	rx_desc->magic = DP_RX_DESC_MAGIC;
1954 	rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
1955 	rx_desc->unmapped = 0;
1956 	rx_desc->nbuf_data_addr = (uint8_t *)qdf_nbuf_data(rx_desc->nbuf);
1957 	dp_rx_set_reuse_nbuf(rx_desc, rx_desc->nbuf);
1958 	rx_desc->prev_paddr_buf_start = rx_desc->paddr_buf_start;
1959 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
1960 }
1961 
1962 /**
1963  * dp_rx_desc_frag_prep() - prepare rx desc
1964  * @rx_desc: rx descriptor pointer to be prepared
1965  * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
1966  *
1967  * Note: assumption is that we frag address is mapped
1968  *
1969  * Return: none
1970  */
1971 #ifdef DP_RX_MON_MEM_FRAG
1972 static inline
1973 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1974 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1975 {
1976 	rx_desc->magic = DP_RX_DESC_MAGIC;
1977 	rx_desc->rx_buf_start =
1978 		(uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
1979 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
1980 	rx_desc->unmapped = 0;
1981 }
1982 #else
1983 static inline
1984 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
1985 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
1986 {
1987 }
1988 #endif /* DP_RX_MON_MEM_FRAG */
1989 
1990 /**
1991  * dp_rx_desc_paddr_sanity_check() - paddr sanity for ring desc vs rx_desc
1992  * @rx_desc: rx descriptor
1993  * @ring_paddr: paddr obatined from the ring
1994  *
1995  * Return: QDF_STATUS
1996  */
1997 static inline
1998 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
1999 				   uint64_t ring_paddr)
2000 {
2001 	return (ring_paddr == qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0));
2002 }
2003 #else
2004 
2005 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
2006 {
2007 	return true;
2008 }
2009 
2010 static inline
2011 void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
2012 		     struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
2013 {
2014 	rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
2015 	dp_rx_set_reuse_nbuf(rx_desc, rx_desc->nbuf);
2016 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
2017 	rx_desc->unmapped = 0;
2018 }
2019 
2020 #ifdef DP_RX_MON_MEM_FRAG
2021 static inline
2022 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
2023 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
2024 {
2025 	rx_desc->rx_buf_start =
2026 		(uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
2027 	rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
2028 	rx_desc->unmapped = 0;
2029 }
2030 #else
2031 static inline
2032 void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
2033 			  struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
2034 {
2035 }
2036 #endif /* DP_RX_MON_MEM_FRAG */
2037 
2038 static inline
2039 bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
2040 				   uint64_t ring_paddr)
2041 {
2042 	return true;
2043 }
2044 #endif /* RX_DESC_DEBUG_CHECK */
2045 
2046 /**
2047  * dp_rx_enable_mon_dest_frag() - Enable frag processing for
2048  *              monitor destination ring via frag.
2049  * @rx_desc_pool: Rx desc pool
2050  * @is_mon_dest_desc: Is it for monitor dest buffer
2051  *
2052  * Enable this flag only for monitor destination buffer processing
2053  * if DP_RX_MON_MEM_FRAG feature is enabled.
2054  * If flag is set then frag based function will be called for alloc,
2055  * map, prep desc and free ops for desc buffer else normal nbuf based
2056  * function will be called.
2057  *
2058  * Return: None
2059  */
2060 void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
2061 				bool is_mon_dest_desc);
2062 
2063 #ifndef QCA_MULTIPASS_SUPPORT
2064 static inline
2065 bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf,
2066 			     uint8_t tid)
2067 {
2068 	return false;
2069 }
2070 #else
2071 /**
2072  * dp_rx_multipass_process - insert vlan tag on frames for traffic separation
2073  * @txrx_peer: DP txrx peer handle
2074  * @nbuf: skb
2075  * @tid: traffic priority
2076  *
2077  * Return: bool: true in case of success else false
2078  * Success is considered if:
2079  *  i. If frame has vlan header
2080  *  ii. If the frame comes from different peer and dont need multipass processing
2081  * Failure is considered if:
2082  *  i. Frame comes from multipass peer but doesn't contain vlan header.
2083  *  In failure case, drop such frames.
2084  */
2085 bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf,
2086 			     uint8_t tid);
2087 #endif
2088 
2089 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2090 
2091 #ifndef WLAN_RX_PKT_CAPTURE_ENH
2092 static inline
2093 QDF_STATUS dp_peer_set_rx_capture_enabled(struct dp_pdev *pdev,
2094 					  struct dp_peer *peer_handle,
2095 					  bool value, uint8_t *mac_addr)
2096 {
2097 	return QDF_STATUS_SUCCESS;
2098 }
2099 #endif
2100 
2101 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2102 
2103 /**
2104  * dp_rx_deliver_to_stack() - deliver pkts to network stack
2105  * Caller to hold peer refcount and check for valid peer
2106  * @soc: soc
2107  * @vdev: vdev
2108  * @peer: txrx peer
2109  * @nbuf_head: skb list head
2110  * @nbuf_tail: skb list tail
2111  *
2112  * Return: QDF_STATUS
2113  */
2114 QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc,
2115 				  struct dp_vdev *vdev,
2116 				  struct dp_txrx_peer *peer,
2117 				  qdf_nbuf_t nbuf_head,
2118 				  qdf_nbuf_t nbuf_tail);
2119 
2120 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
2121 /**
2122  * dp_rx_eapol_deliver_to_stack() - deliver pkts to network stack
2123  * caller to hold peer refcount and check for valid peer
2124  * @soc: soc
2125  * @vdev: vdev
2126  * @peer: peer
2127  * @nbuf_head: skb list head
2128  * @nbuf_tail: skb list tail
2129  *
2130  * Return: QDF_STATUS
2131  */
2132 QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
2133 					struct dp_vdev *vdev,
2134 					struct dp_txrx_peer *peer,
2135 					qdf_nbuf_t nbuf_head,
2136 					qdf_nbuf_t nbuf_tail);
2137 #endif
2138 
2139 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2140 
2141 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
2142 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
2143 	do {								   \
2144 		if (!soc->rx_buff_pool[rx_desc->pool_id].is_initialized) { \
2145 			DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf);	   \
2146 			break;						   \
2147 		}							   \
2148 		DP_RX_LIST_APPEND(ebuf_head, ebuf_tail, rx_desc->nbuf);	   \
2149 		if (!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)) {	   \
2150 			if (!dp_rx_buffer_pool_refill(soc, ebuf_head,	   \
2151 						      rx_desc->pool_id))   \
2152 				DP_RX_MERGE_TWO_LIST(head, tail,	   \
2153 						     ebuf_head, ebuf_tail);\
2154 			ebuf_head = NULL;				   \
2155 			ebuf_tail = NULL;				   \
2156 		}							   \
2157 	} while (0)
2158 #else
2159 #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
2160 	DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf)
2161 #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
2162 
2163 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2164 
2165 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
2166 /**
2167  * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture
2168  * @soc : dp_soc handle
2169  * @pdev: dp_pdev handle
2170  * @peer_id: peer_id of the peer for which completion came
2171  * @is_offload:
2172  * @netbuf: Buffer pointer
2173  *
2174  * This function is used to deliver rx packet to packet capture
2175  */
2176 void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc,  struct dp_pdev *pdev,
2177 				  uint16_t peer_id, uint32_t is_offload,
2178 				  qdf_nbuf_t netbuf);
2179 void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
2180 					  uint32_t is_offload);
2181 #else
2182 static inline void
2183 dp_rx_deliver_to_pkt_capture(struct dp_soc *soc,  struct dp_pdev *pdev,
2184 			     uint16_t peer_id, uint32_t is_offload,
2185 			     qdf_nbuf_t netbuf)
2186 {
2187 }
2188 
2189 static inline void
2190 dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
2191 				     uint32_t is_offload)
2192 {
2193 }
2194 #endif
2195 
2196 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2197 #ifdef FEATURE_MEC
2198 /**
2199  * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
2200  *			      back on same vap or a different vap.
2201  * @soc: core DP main context
2202  * @peer: dp peer handler
2203  * @rx_tlv_hdr: start of the rx TLV header
2204  * @nbuf: pkt buffer
2205  *
2206  * Return: bool (true if it is a looped back pkt else false)
2207  *
2208  */
2209 bool dp_rx_mcast_echo_check(struct dp_soc *soc,
2210 			    struct dp_txrx_peer *peer,
2211 			    uint8_t *rx_tlv_hdr,
2212 			    qdf_nbuf_t nbuf);
2213 #else
2214 static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
2215 					  struct dp_txrx_peer *peer,
2216 					  uint8_t *rx_tlv_hdr,
2217 					  qdf_nbuf_t nbuf)
2218 {
2219 	return false;
2220 }
2221 #endif /* FEATURE_MEC */
2222 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2223 
2224 #ifdef RECEIVE_OFFLOAD
2225 /**
2226  * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
2227  * @soc: DP SOC handle
2228  * @rx_tlv: RX TLV received for the msdu
2229  * @msdu: msdu for which GRO info needs to be filled
2230  * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
2231  *
2232  * Return: None
2233  */
2234 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
2235 			 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt);
2236 #else
2237 static inline
2238 void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
2239 			 qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
2240 {
2241 }
2242 #endif
2243 
2244 /**
2245  * dp_rx_msdu_stats_update() - update per msdu stats.
2246  * @soc: core txrx main context
2247  * @nbuf: pointer to the first msdu of an amsdu.
2248  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
2249  * @txrx_peer: pointer to the txrx peer object.
2250  * @ring_id: reo dest ring number on which pkt is reaped.
2251  * @tid_stats: per tid rx stats.
2252  * @link_id: link Id on which packet is received
2253  *
2254  * update all the per msdu stats for that nbuf.
2255  *
2256  * Return: void
2257  */
2258 void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
2259 			     uint8_t *rx_tlv_hdr,
2260 			     struct dp_txrx_peer *txrx_peer,
2261 			     uint8_t ring_id,
2262 			     struct cdp_tid_rx_stats *tid_stats,
2263 			     uint8_t link_id);
2264 
2265 /**
2266  * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
2267  *				      no corresbonding peer found
2268  * @soc: core txrx main context
2269  * @nbuf: pkt skb pointer
2270  *
2271  * This function will try to deliver some RX special frames to stack
2272  * even there is no peer matched found. for instance, LFR case, some
2273  * eapol data will be sent to host before peer_map done.
2274  *
2275  * Return: None
2276  */
2277 void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf);
2278 
2279 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2280 #ifdef DP_RX_DROP_RAW_FRM
2281 /**
2282  * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop
2283  * @nbuf: pkt skb pointer
2284  *
2285  * Return: true - raw frame, dropped
2286  *	   false - not raw frame, do nothing
2287  */
2288 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf);
2289 #else
2290 static inline
2291 bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
2292 {
2293 	return false;
2294 }
2295 #endif
2296 
2297 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
2298 /**
2299  * dp_rx_update_stats() - Update soc level rx packet count
2300  * @soc: DP soc handle
2301  * @nbuf: nbuf received
2302  *
2303  * Return: none
2304  */
2305 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf);
2306 #else
2307 static inline
2308 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf)
2309 {
2310 }
2311 #endif
2312 
2313 /**
2314  * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
2315  * @pdev: dp_pdev handle
2316  * @nbuf: pointer to the first msdu of an amsdu.
2317  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
2318  *
2319  * The ipsumed field of the skb is set based on whether HW validated the
2320  * IP/TCP/UDP checksum.
2321  *
2322  * Return: void
2323  */
2324 #if defined(MAX_PDEV_CNT) && (MAX_PDEV_CNT == 1)
2325 static inline
2326 void dp_rx_cksum_offload(struct dp_pdev *pdev,
2327 			 qdf_nbuf_t nbuf,
2328 			 uint8_t *rx_tlv_hdr)
2329 {
2330 	qdf_nbuf_rx_cksum_t cksum = {0};
2331 	//TODO - Move this to ring desc api
2332 	//HAL_RX_MSDU_DESC_IP_CHKSUM_FAIL_GET
2333 	//HAL_RX_MSDU_DESC_TCP_UDP_CHKSUM_FAIL_GET
2334 	uint32_t ip_csum_err, tcp_udp_csum_er;
2335 
2336 	hal_rx_tlv_csum_err_get(pdev->soc->hal_soc, rx_tlv_hdr, &ip_csum_err,
2337 				&tcp_udp_csum_er);
2338 
2339 	if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
2340 		if (qdf_likely(!ip_csum_err)) {
2341 			cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
2342 			if (qdf_nbuf_is_ipv4_udp_pkt(nbuf) ||
2343 			    qdf_nbuf_is_ipv4_tcp_pkt(nbuf)) {
2344 				if (qdf_likely(!tcp_udp_csum_er)) {
2345 					cksum.csum_level = 1;
2346 				} else {
2347 					cksum.l4_result =
2348 						QDF_NBUF_RX_CKSUM_NONE;
2349 					DP_STATS_INC(pdev,
2350 						     err.tcp_udp_csum_err, 1);
2351 				}
2352 			}
2353 		} else {
2354 			DP_STATS_INC(pdev, err.ip_csum_err, 1);
2355 		}
2356 	} else if (qdf_nbuf_is_ipv6_udp_pkt(nbuf) ||
2357 		   qdf_nbuf_is_ipv6_tcp_pkt(nbuf)) {
2358 		if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er))
2359 			cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
2360 		else if (ip_csum_err) {
2361 			DP_STATS_INC(pdev, err.ip_csum_err, 1);
2362 		} else {
2363 			DP_STATS_INC(pdev, err.tcp_udp_csum_err, 1);
2364 		}
2365 	}
2366 
2367 	qdf_nbuf_set_rx_cksum(nbuf, &cksum);
2368 }
2369 #else
2370 static inline
2371 void dp_rx_cksum_offload(struct dp_pdev *pdev,
2372 			 qdf_nbuf_t nbuf,
2373 			 uint8_t *rx_tlv_hdr)
2374 {
2375 }
2376 #endif
2377 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2378 
2379 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
2380 static inline
2381 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
2382 				   int max_reap_limit)
2383 {
2384 	bool limit_hit = false;
2385 
2386 	limit_hit =
2387 		(num_reaped >= max_reap_limit) ? true : false;
2388 
2389 	if (limit_hit)
2390 		DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1)
2391 
2392 	return limit_hit;
2393 }
2394 
2395 static inline
2396 bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
2397 {
2398 	return soc->wlan_cfg_ctx->rx_enable_eol_data_check;
2399 }
2400 
2401 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc)
2402 {
2403 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
2404 
2405 	return cfg->rx_reap_loop_pkt_limit;
2406 }
2407 #else
2408 static inline
2409 bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
2410 				   int max_reap_limit)
2411 {
2412 	return false;
2413 }
2414 
2415 static inline
2416 bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
2417 {
2418 	return false;
2419 }
2420 
2421 static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc)
2422 {
2423 	return 0;
2424 }
2425 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
2426 
2427 void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf);
2428 
2429 static inline uint16_t
2430 dp_rx_peer_metadata_peer_id_get(struct dp_soc *soc, uint32_t peer_metadata)
2431 {
2432 	return soc->arch_ops.dp_rx_peer_metadata_peer_id_get(soc,
2433 							     peer_metadata);
2434 }
2435 
2436 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
2437 /**
2438  * dp_rx_nbuf_set_link_id_from_tlv() - Set link id in nbuf cb
2439  * @soc: SOC handle
2440  * @tlv_hdr: rx tlv header
2441  * @nbuf: nbuf pointer
2442  *
2443  * Return: None
2444  */
2445 static inline void
2446 dp_rx_nbuf_set_link_id_from_tlv(struct dp_soc *soc, uint8_t *tlv_hdr,
2447 				qdf_nbuf_t nbuf)
2448 {
2449 	uint32_t peer_metadata = hal_rx_tlv_peer_meta_data_get(soc->hal_soc,
2450 								tlv_hdr);
2451 
2452 	if (soc->arch_ops.dp_rx_peer_set_link_id)
2453 		soc->arch_ops.dp_rx_peer_set_link_id(nbuf, peer_metadata);
2454 }
2455 
2456 /**
2457  * dp_rx_set_nbuf_band() - Set band info in nbuf cb
2458  * @nbuf: nbuf pointer
2459  * @txrx_peer: txrx_peer pointer
2460  * @link_id: Peer Link ID
2461  *
2462  * Returen: None
2463  */
2464 static inline void
2465 dp_rx_set_nbuf_band(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer,
2466 		    uint8_t link_id)
2467 {
2468 	qdf_nbuf_rx_set_band(nbuf, txrx_peer->band[link_id]);
2469 }
2470 #else
2471 static inline void
2472 dp_rx_nbuf_set_link_id_from_tlv(struct dp_soc *soc, uint8_t *tlv_hdr,
2473 				qdf_nbuf_t nbuf)
2474 {
2475 }
2476 
2477 static inline void
2478 dp_rx_set_nbuf_band(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer,
2479 		    uint8_t link_id)
2480 {
2481 }
2482 #endif
2483 
2484 /**
2485  * dp_rx_desc_pool_init_generic() - Generic Rx descriptors initialization
2486  * @soc: SOC handle
2487  * @rx_desc_pool: pointer to RX descriptor pool
2488  * @pool_id: pool ID
2489  *
2490  * Return: None
2491  */
2492 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
2493 				  struct rx_desc_pool *rx_desc_pool,
2494 				  uint32_t pool_id);
2495 
2496 void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
2497 				  struct rx_desc_pool *rx_desc_pool,
2498 				  uint32_t pool_id);
2499 
2500 /**
2501  * dp_rx_pkt_tracepoints_enabled() - Get the state of rx pkt tracepoint
2502  *
2503  * Return: True if any rx pkt tracepoint is enabled else false
2504  */
2505 static inline
2506 bool dp_rx_pkt_tracepoints_enabled(void)
2507 {
2508 	return (qdf_trace_dp_rx_tcp_pkt_enabled() ||
2509 		qdf_trace_dp_rx_udp_pkt_enabled() ||
2510 		qdf_trace_dp_rx_pkt_enabled());
2511 }
2512 
2513 #ifdef FEATURE_DIRECT_LINK
2514 /**
2515  * dp_audio_smmu_map()- Map memory region into Audio SMMU CB
2516  * @qdf_dev: pointer to QDF device structure
2517  * @paddr: physical address
2518  * @iova: DMA address
2519  * @size: memory region size
2520  *
2521  * Return: 0 on success else failure code
2522  */
2523 static inline
2524 int dp_audio_smmu_map(qdf_device_t qdf_dev, qdf_dma_addr_t paddr,
2525 		      qdf_dma_addr_t iova, qdf_size_t size)
2526 {
2527 	return pld_audio_smmu_map(qdf_dev->dev, paddr, iova, size);
2528 }
2529 
2530 /**
2531  * dp_audio_smmu_unmap()- Remove memory region mapping from Audio SMMU CB
2532  * @qdf_dev: pointer to QDF device structure
2533  * @iova: DMA address
2534  * @size: memory region size
2535  *
2536  * Return: None
2537  */
2538 static inline
2539 void dp_audio_smmu_unmap(qdf_device_t qdf_dev, qdf_dma_addr_t iova,
2540 			 qdf_size_t size)
2541 {
2542 	pld_audio_smmu_unmap(qdf_dev->dev, iova, size);
2543 }
2544 #else
2545 static inline
2546 int dp_audio_smmu_map(qdf_device_t qdf_dev, qdf_dma_addr_t paddr,
2547 		      qdf_dma_addr_t iova, qdf_size_t size)
2548 {
2549 	return 0;
2550 }
2551 
2552 static inline
2553 void dp_audio_smmu_unmap(qdf_device_t qdf_dev, qdf_dma_addr_t iova,
2554 			 qdf_size_t size)
2555 {
2556 }
2557 #endif
2558 
2559 #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
2560 static inline
2561 QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id,
2562 					    struct dp_srng *rxdma_srng,
2563 					    struct rx_desc_pool *rx_desc_pool,
2564 					    uint32_t num_req_buffers)
2565 {
2566 	return __dp_pdev_rx_buffers_no_map_attach(soc, mac_id,
2567 						  rxdma_srng,
2568 						  rx_desc_pool,
2569 						  num_req_buffers);
2570 }
2571 
2572 static inline
2573 void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
2574 				    struct dp_srng *rxdma_srng,
2575 				    struct rx_desc_pool *rx_desc_pool,
2576 				    uint32_t num_req_buffers,
2577 				    union dp_rx_desc_list_elem_t **desc_list,
2578 				    union dp_rx_desc_list_elem_t **tail)
2579 {
2580 	__dp_rx_buffers_no_map_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
2581 					 num_req_buffers, desc_list, tail);
2582 }
2583 
2584 static inline
2585 void dp_rx_comp2refill_replenish(struct dp_soc *soc, uint32_t mac_id,
2586 				 struct dp_srng *rxdma_srng,
2587 				 struct rx_desc_pool *rx_desc_pool,
2588 				 uint32_t num_req_buffers,
2589 				 union dp_rx_desc_list_elem_t **desc_list,
2590 				 union dp_rx_desc_list_elem_t **tail)
2591 {
2592 	__dp_rx_comp2refill_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
2593 				      num_req_buffers, desc_list, tail);
2594 }
2595 
2596 static inline
2597 void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
2598 				       struct dp_srng *rxdma_srng,
2599 				       struct rx_desc_pool *rx_desc_pool,
2600 				       bool force_replenish)
2601 {
2602 	__dp_rx_buffers_no_map_lt_replenish(soc, mac_id, rxdma_srng,
2603 					    rx_desc_pool,
2604 					    force_replenish);
2605 }
2606 
2607 #ifndef QCA_DP_NBUF_FAST_RECYCLE_CHECK
2608 static inline
2609 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
2610 				      qdf_nbuf_t nbuf,
2611 				      uint32_t buf_size)
2612 {
2613 	qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2614 				      (void *)(nbuf->data + buf_size));
2615 
2616 	return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2617 }
2618 #else
2619 #define L3_HEADER_PAD 2
2620 static inline
2621 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
2622 				      qdf_nbuf_t nbuf,
2623 				      uint32_t buf_size)
2624 {
2625 	if (nbuf->recycled_for_ds)
2626 		return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2627 
2628 	if (unlikely(!nbuf->fast_recycled)) {
2629 		qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2630 					      (void *)(nbuf->data + buf_size));
2631 	}
2632 
2633 	DP_STATS_INC(dp_soc, rx.fast_recycled, 1);
2634 	nbuf->fast_recycled = 0;
2635 
2636 	return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2637 }
2638 #endif
2639 
2640 static inline
2641 qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc,
2642 			       qdf_nbuf_t nbuf,
2643 			       uint32_t buf_size)
2644 {
2645 	qdf_nbuf_dma_inv_range((void *)nbuf->data,
2646 			       (void *)(nbuf->data + buf_size));
2647 
2648 	return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
2649 }
2650 
2651 #if !defined(SPECULATIVE_READ_DISABLED)
2652 static inline
2653 void dp_rx_nbuf_unmap(struct dp_soc *soc,
2654 		      struct dp_rx_desc *rx_desc,
2655 		      uint8_t reo_ring_num)
2656 {
2657 	struct rx_desc_pool *rx_desc_pool;
2658 	qdf_nbuf_t nbuf;
2659 
2660 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2661 	nbuf = rx_desc->nbuf;
2662 
2663 	qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
2664 			       (void *)(nbuf->data + rx_desc_pool->buf_size));
2665 }
2666 
2667 static inline
2668 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc,
2669 			   struct rx_desc_pool *rx_desc_pool,
2670 			   qdf_nbuf_t nbuf)
2671 {
2672 	qdf_nbuf_dma_inv_range((void *)nbuf->data,
2673 			       (void *)(nbuf->data + rx_desc_pool->buf_size));
2674 }
2675 
2676 #else
2677 static inline
2678 void dp_rx_nbuf_unmap(struct dp_soc *soc,
2679 		      struct dp_rx_desc *rx_desc,
2680 		      uint8_t reo_ring_num)
2681 {
2682 }
2683 
2684 static inline
2685 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc,
2686 			   struct rx_desc_pool *rx_desc_pool,
2687 			   qdf_nbuf_t nbuf)
2688 {
2689 }
2690 #endif
2691 
2692 static inline
2693 void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id,
2694 				 uint32_t bufs_reaped)
2695 {
2696 }
2697 
2698 static inline
2699 qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc,
2700 			    struct rx_desc_pool *rx_desc_pool)
2701 {
2702 	return qdf_nbuf_alloc_simple(soc->osdev, rx_desc_pool->buf_size,
2703 				     RX_BUFFER_RESERVATION,
2704 				     rx_desc_pool->buf_alignment, FALSE);
2705 }
2706 
2707 static inline
2708 void  dp_rx_nbuf_free(qdf_nbuf_t nbuf)
2709 {
2710 	qdf_nbuf_free_simple(nbuf);
2711 }
2712 #else
2713 static inline
2714 QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id,
2715 					    struct dp_srng *rxdma_srng,
2716 					    struct rx_desc_pool *rx_desc_pool,
2717 					    uint32_t num_req_buffers)
2718 {
2719 	return dp_pdev_rx_buffers_attach(soc, mac_id,
2720 					 rxdma_srng,
2721 					 rx_desc_pool,
2722 					 num_req_buffers);
2723 }
2724 
2725 static inline
2726 void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
2727 				    struct dp_srng *rxdma_srng,
2728 				    struct rx_desc_pool *rx_desc_pool,
2729 				    uint32_t num_req_buffers,
2730 				    union dp_rx_desc_list_elem_t **desc_list,
2731 				    union dp_rx_desc_list_elem_t **tail)
2732 {
2733 	dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
2734 				num_req_buffers, desc_list, tail, false);
2735 }
2736 
2737 static inline
2738 void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
2739 				       struct dp_srng *rxdma_srng,
2740 				       struct rx_desc_pool *rx_desc_pool,
2741 				       bool force_replenish)
2742 {
2743 	__dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
2744 				  0, NULL, NULL, false, force_replenish,
2745 				  __func__);
2746 }
2747 
2748 static inline
2749 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
2750 				      qdf_nbuf_t nbuf,
2751 				      uint32_t buf_size)
2752 {
2753 	return (qdf_dma_addr_t)NULL;
2754 }
2755 
2756 static inline
2757 qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc,
2758 			       qdf_nbuf_t nbuf,
2759 			       uint32_t buf_size)
2760 {
2761 	return (qdf_dma_addr_t)NULL;
2762 }
2763 
2764 static inline
2765 void dp_rx_nbuf_unmap(struct dp_soc *soc,
2766 		      struct dp_rx_desc *rx_desc,
2767 		      uint8_t reo_ring_num)
2768 {
2769 	struct rx_desc_pool *rx_desc_pool;
2770 
2771 	rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
2772 	dp_ipa_reo_ctx_buf_mapping_lock(soc, reo_ring_num);
2773 
2774 	dp_audio_smmu_unmap(soc->osdev,
2775 			    QDF_NBUF_CB_PADDR(rx_desc->nbuf),
2776 			    rx_desc_pool->buf_size);
2777 
2778 	dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
2779 					  rx_desc_pool->buf_size,
2780 					  false, __func__, __LINE__);
2781 	qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
2782 				     QDF_DMA_FROM_DEVICE,
2783 				     rx_desc_pool->buf_size);
2784 	rx_desc->unmapped = 1;
2785 
2786 	dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num);
2787 }
2788 
2789 static inline
2790 void dp_rx_nbuf_unmap_pool(struct dp_soc *soc,
2791 			   struct rx_desc_pool *rx_desc_pool,
2792 			   qdf_nbuf_t nbuf)
2793 {
2794 	dp_audio_smmu_unmap(soc->osdev, QDF_NBUF_CB_PADDR(nbuf),
2795 			    rx_desc_pool->buf_size);
2796 	dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
2797 					  rx_desc_pool->buf_size,
2798 					  false, __func__, __LINE__);
2799 	qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE,
2800 				     rx_desc_pool->buf_size);
2801 }
2802 
2803 static inline
2804 void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id,
2805 				 uint32_t bufs_reaped)
2806 {
2807 	int cpu_id = qdf_get_cpu();
2808 
2809 	DP_STATS_INC(soc, rx.ring_packets[cpu_id][ring_id], bufs_reaped);
2810 }
2811 
2812 static inline
2813 qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc,
2814 			    struct rx_desc_pool *rx_desc_pool)
2815 {
2816 	return qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
2817 			      RX_BUFFER_RESERVATION,
2818 			      rx_desc_pool->buf_alignment, FALSE);
2819 }
2820 
2821 static inline
2822 void dp_rx_nbuf_free(qdf_nbuf_t nbuf)
2823 {
2824 	qdf_nbuf_free(nbuf);
2825 }
2826 #endif
2827 
2828 #ifdef DP_UMAC_HW_RESET_SUPPORT
2829 /**
2830  * dp_rx_desc_reuse() - Reuse the rx descriptors to fill the rx buf ring
2831  * @soc: core txrx main context
2832  * @nbuf_list: nbuf list for delayed free
2833  *
2834  * Return: void
2835  */
2836 void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list);
2837 
2838 /**
2839  * dp_rx_desc_delayed_free() - Delayed free of the rx descs
2840  *
2841  * @soc: core txrx main context
2842  *
2843  * Return: void
2844  */
2845 void dp_rx_desc_delayed_free(struct dp_soc *soc);
2846 #endif
2847 
2848 /**
2849  * dp_rx_get_txrx_peer_and_vdev() - Get txrx peer and vdev from peer id
2850  * @soc: core txrx main context
2851  * @nbuf : pointer to the first msdu of an amsdu.
2852  * @peer_id : Peer id of the peer
2853  * @txrx_ref_handle : Buffer to save the handle for txrx peer's reference
2854  * @pkt_capture_offload : Flag indicating if pkt capture offload is needed
2855  * @vdev : Buffer to hold pointer to vdev
2856  * @rx_pdev : Buffer to hold pointer to rx pdev
2857  * @dsf : delay stats flag
2858  * @old_tid : Old tid
2859  *
2860  * Get txrx peer and vdev from peer id
2861  *
2862  * Return: Pointer to txrx peer
2863  */
2864 static inline struct dp_txrx_peer *
2865 dp_rx_get_txrx_peer_and_vdev(struct dp_soc *soc,
2866 			     qdf_nbuf_t nbuf,
2867 			     uint16_t peer_id,
2868 			     dp_txrx_ref_handle *txrx_ref_handle,
2869 			     bool pkt_capture_offload,
2870 			     struct dp_vdev **vdev,
2871 			     struct dp_pdev **rx_pdev,
2872 			     uint32_t *dsf,
2873 			     uint32_t *old_tid)
2874 {
2875 	struct dp_txrx_peer *txrx_peer = NULL;
2876 
2877 	txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, txrx_ref_handle,
2878 					       DP_MOD_ID_RX);
2879 
2880 	if (qdf_likely(txrx_peer)) {
2881 		*vdev = txrx_peer->vdev;
2882 	} else {
2883 		nbuf->next = NULL;
2884 		dp_rx_deliver_to_pkt_capture_no_peer(soc, nbuf,
2885 						     pkt_capture_offload);
2886 		if (!pkt_capture_offload)
2887 			dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2888 
2889 		goto end;
2890 	}
2891 
2892 	if (qdf_unlikely(!(*vdev))) {
2893 		qdf_nbuf_free(nbuf);
2894 		DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
2895 		goto end;
2896 	}
2897 
2898 	*rx_pdev = (*vdev)->pdev;
2899 	*dsf = (*rx_pdev)->delay_stats_flag;
2900 	*old_tid = 0xff;
2901 
2902 end:
2903 	return txrx_peer;
2904 }
2905 
2906 static inline QDF_STATUS
2907 dp_peer_rx_reorder_queue_setup(struct dp_soc *soc, struct dp_peer *peer,
2908 			       uint32_t tid_bitmap, uint32_t ba_window_size)
2909 {
2910 	return soc->arch_ops.dp_peer_rx_reorder_queue_setup(soc,
2911 							    peer, tid_bitmap,
2912 							    ba_window_size);
2913 }
2914 
2915 static inline
2916 void dp_rx_nbuf_list_deliver(struct dp_soc *soc,
2917 			     struct dp_vdev *vdev,
2918 			     struct dp_txrx_peer *txrx_peer,
2919 			     uint16_t peer_id,
2920 			     uint8_t pkt_capture_offload,
2921 			     qdf_nbuf_t deliver_list_head,
2922 			     qdf_nbuf_t deliver_list_tail)
2923 {
2924 	qdf_nbuf_t nbuf, next;
2925 
2926 	if (qdf_likely(deliver_list_head)) {
2927 		if (qdf_likely(txrx_peer)) {
2928 			dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id,
2929 						     pkt_capture_offload,
2930 						     deliver_list_head);
2931 			if (!pkt_capture_offload)
2932 				dp_rx_deliver_to_stack(soc, vdev, txrx_peer,
2933 						       deliver_list_head,
2934 						       deliver_list_tail);
2935 		} else {
2936 			nbuf = deliver_list_head;
2937 			while (nbuf) {
2938 				next = nbuf->next;
2939 				nbuf->next = NULL;
2940 				dp_rx_deliver_to_stack_no_peer(soc, nbuf);
2941 				nbuf = next;
2942 			}
2943 		}
2944 	}
2945 }
2946 
2947 #ifdef DP_TX_RX_TPUT_SIMULATE
2948 /*
2949  * Change this macro value to simulate different RX T-put,
2950  * if OTA is 100 Mbps, to simulate 200 Mbps, then multiplication factor
2951  * is 2, set macro value as 1 (multiplication factor - 1).
2952  */
2953 #define DP_RX_PKTS_DUPLICATE_CNT 0
2954 static inline
2955 void dp_rx_nbuf_list_dup_deliver(struct dp_soc *soc,
2956 				 struct dp_vdev *vdev,
2957 				 struct dp_txrx_peer *txrx_peer,
2958 				 uint16_t peer_id,
2959 				 uint8_t pkt_capture_offload,
2960 				 qdf_nbuf_t ori_list_head,
2961 				 qdf_nbuf_t ori_list_tail)
2962 {
2963 	qdf_nbuf_t new_skb = NULL;
2964 	qdf_nbuf_t new_list_head = NULL;
2965 	qdf_nbuf_t new_list_tail = NULL;
2966 	qdf_nbuf_t nbuf = NULL;
2967 	int i;
2968 
2969 	for (i = 0; i < DP_RX_PKTS_DUPLICATE_CNT; i++) {
2970 		nbuf = ori_list_head;
2971 		new_list_head = NULL;
2972 		new_list_tail = NULL;
2973 
2974 		while (nbuf) {
2975 			new_skb = qdf_nbuf_copy(nbuf);
2976 			if (qdf_likely(new_skb))
2977 				DP_RX_LIST_APPEND(new_list_head,
2978 						  new_list_tail,
2979 						  new_skb);
2980 			else
2981 				dp_err("copy skb failed");
2982 
2983 			nbuf = qdf_nbuf_next(nbuf);
2984 		}
2985 
2986 		/* deliver the copied nbuf list */
2987 		dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id,
2988 					pkt_capture_offload,
2989 					new_list_head,
2990 					new_list_tail);
2991 	}
2992 
2993 	/* deliver the original skb_list */
2994 	dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id,
2995 				pkt_capture_offload,
2996 				ori_list_head,
2997 				ori_list_tail);
2998 }
2999 
3000 #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_dup_deliver
3001 
3002 #else /* !DP_TX_RX_TPUT_SIMULATE */
3003 
3004 #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_deliver
3005 
3006 #endif /* DP_TX_RX_TPUT_SIMULATE */
3007 
3008 /**
3009  * dp_rx_wbm_desc_nbuf_sanity_check() - Add sanity check to for WBM rx_desc
3010  *                                      paddr corruption
3011  * @soc: core txrx main context
3012  * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring
3013  * @ring_desc: REO ring descriptor
3014  * @rx_desc: Rx descriptor
3015  *
3016  * Return: NONE
3017  */
3018 QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
3019 					    hal_ring_handle_t hal_ring_hdl,
3020 					    hal_ring_desc_t ring_desc,
3021 					    struct dp_rx_desc *rx_desc);
3022 /**
3023  * dp_rx_is_sg_formation_required() - Check if sg formation is required
3024  * @info: WBM desc info
3025  *
3026  * Return: True if sg is required else false
3027  */
3028 bool dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info);
3029 
3030 /**
3031  * dp_rx_err_tlv_invalidate() - Invalidate network buffer
3032  * @soc: core txrx main context
3033  * @nbuf: Network buffer to invalidate
3034  *
3035  * Return: NONE
3036  */
3037 void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
3038 			      qdf_nbuf_t nbuf);
3039 
3040 /**
3041  * dp_rx_wbm_sg_list_last_msdu_war() - war for HW issue
3042  * @soc: DP SOC handle
3043  *
3044  * This is a war for HW issue where length is only valid in last msdu
3045  *
3046  * Return: NONE
3047  */
3048 void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc);
3049 
3050 /**
3051  * dp_rx_check_pkt_len() - Check for pktlen validity
3052  * @soc: DP SOC context
3053  * @pkt_len: computed length of the pkt from caller in bytes
3054  *
3055  * Return: true if pktlen > RX_BUFFER_SIZE, else return false
3056  *
3057  */
3058 bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len);
3059 
3060 /**
3061  * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
3062  * @soc: pointer to dp_soc struct
3063  * @pool_id: Pool id to find dp_pdev
3064  * @rx_tlv_hdr: TLV header of received packet
3065  * @nbuf: SKB
3066  *
3067  * In certain types of packets if peer_id is not correct then
3068  * driver may not be able find. Try finding peer by addr_2 of
3069  * received MPDU. If you find the peer then most likely sw_peer_id &
3070  * ast_idx is corrupted.
3071  *
3072  * Return: True if you find the peer by addr_2 of received MPDU else false
3073  */
3074 bool dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
3075 						   uint8_t pool_id,
3076 						   uint8_t *rx_tlv_hdr,
3077 						   qdf_nbuf_t nbuf);
3078 
3079 /**
3080  * dp_rx_err_drop_3addr_mcast() - Check if feature drop_3ddr_mcast is enabled
3081  *                                If so, drop the multicast frame.
3082  * @vdev: datapath vdev
3083  * @rx_tlv_hdr: TLV header
3084  *
3085  * Return: true if packet is to be dropped,
3086  *         false, if packet is not dropped.
3087  */
3088 bool dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr);
3089 
3090 /**
3091  * dp_rx_deliver_to_osif_stack() - function to deliver rx pkts to stack
3092  * @soc: DP soc
3093  * @vdev: DP vdev handle
3094  * @txrx_peer: pointer to the txrx_peer object
3095  * @nbuf: skb list head
3096  * @tail: skb list tail
3097  * @is_eapol: eapol pkt check
3098  *
3099  * Return: None
3100  */
3101 void
3102 dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
3103 			    struct dp_vdev *vdev,
3104 			    struct dp_txrx_peer *txrx_peer,
3105 			    qdf_nbuf_t nbuf,
3106 			    qdf_nbuf_t tail,
3107 			    bool is_eapol);
3108 
3109 /**
3110  * dp_rx_set_wbm_err_info_in_nbuf() - function to set wbm err info in nbuf
3111  * @soc: DP soc
3112  * @nbuf: skb list head
3113  * @wbm_err: wbm error info details
3114  *
3115  * Return: None
3116  */
3117 void
3118 dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
3119 			       qdf_nbuf_t nbuf,
3120 			       union hal_wbm_err_info_u wbm_err);
3121 
3122 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
3123 static inline uint8_t
3124 dp_rx_get_defrag_bm_id(struct dp_soc *soc)
3125 {
3126 	return DP_DEFRAG_RBM(soc->wbm_sw0_bm_id);
3127 }
3128 
3129 static inline uint8_t
3130 dp_rx_get_rx_bm_id(struct dp_soc *soc)
3131 {
3132 	return DP_WBM2SW_RBM(soc->wbm_sw0_bm_id);
3133 }
3134 #else
3135 static inline uint8_t
3136 dp_rx_get_rx_bm_id(struct dp_soc *soc)
3137 {
3138 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
3139 	uint8_t wbm2_sw_rx_rel_ring_id;
3140 
3141 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
3142 
3143 	return HAL_RX_BUF_RBM_SW_BM(soc->wbm_sw0_bm_id,
3144 				    wbm2_sw_rx_rel_ring_id);
3145 }
3146 
3147 static inline uint8_t
3148 dp_rx_get_defrag_bm_id(struct dp_soc *soc)
3149 {
3150 	return dp_rx_get_rx_bm_id(soc);
3151 }
3152 #endif
3153 
3154 #ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */
3155 /**
3156  * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
3157  *
3158  * @soc: core txrx main context
3159  * @hal_ring_hdl: opaque pointer to the HAL Rx Ring, which will be serviced
3160  * @ring_desc: opaque pointer to the RX ring descriptor
3161  * @rx_desc: host rx descriptor
3162  *
3163  * Return: void
3164  */
3165 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
3166 				hal_ring_handle_t hal_ring_hdl,
3167 				hal_ring_desc_t ring_desc,
3168 				struct dp_rx_desc *rx_desc);
3169 
3170 /**
3171  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
3172  *			      (WBM), following error handling
3173  *
3174  * @soc: core DP main context
3175  * @ring_desc: opaque pointer to the REO error ring descriptor
3176  * @bm_action: put to idle_list or release to msdu_list
3177  *
3178  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
3179  */
3180 QDF_STATUS
3181 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
3182 		       uint8_t bm_action);
3183 
3184 /**
3185  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
3186  *					(WBM) by address
3187  *
3188  * @soc: core DP main context
3189  * @link_desc_addr: link descriptor addr
3190  * @bm_action: put to idle_list or release to msdu_list
3191  *
3192  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
3193  */
3194 QDF_STATUS
3195 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
3196 			       hal_buff_addrinfo_t link_desc_addr,
3197 			       uint8_t bm_action);
3198 
3199 /**
3200  * dp_rxdma_err_process() - RxDMA error processing functionality
3201  * @int_ctx: pointer to DP interrupt context
3202  * @soc: core txrx main context
3203  * @mac_id: mac id which is one of 3 mac_ids
3204  * @quota: No. of units (packets) that can be serviced in one shot.
3205  *
3206  * Return: num of buffers processed
3207  */
3208 uint32_t
3209 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
3210 		     uint32_t mac_id, uint32_t quota);
3211 
3212 /**
3213  * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
3214  *			       frames to OS or wifi parse errors.
3215  * @soc: core DP main context
3216  * @nbuf: buffer pointer
3217  * @rx_tlv_hdr: start of rx tlv header
3218  * @txrx_peer: peer reference
3219  * @err_code: rxdma err code
3220  * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
3221  * pool_id has same mapping)
3222  * @link_id: link Id on which the packet is received
3223  *
3224  * Return: None
3225  */
3226 void
3227 dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
3228 			uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
3229 			uint8_t err_code, uint8_t mac_id, uint8_t link_id);
3230 
3231 /**
3232  * dp_rx_process_mic_error(): Function to pass mic error indication to umac
3233  * @soc: core DP main context
3234  * @nbuf: buffer pointer
3235  * @rx_tlv_hdr: start of rx tlv header
3236  * @txrx_peer: txrx peer handle
3237  *
3238  * Return: void
3239  */
3240 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
3241 			     uint8_t *rx_tlv_hdr,
3242 			     struct dp_txrx_peer *txrx_peer);
3243 
3244 /**
3245  * dp_2k_jump_handle() - Function to handle 2k jump exception
3246  *                        on WBM ring
3247  * @soc: core DP main context
3248  * @nbuf: buffer pointer
3249  * @rx_tlv_hdr: start of rx tlv header
3250  * @peer_id: peer id of first msdu
3251  * @tid: Tid for which exception occurred
3252  *
3253  * This function handles 2k jump violations arising out
3254  * of receiving aggregates in non BA case. This typically
3255  * may happen if aggregates are received on a QOS enabled TID
3256  * while Rx window size is still initialized to value of 2. Or
3257  * it may also happen if negotiated window size is 1 but peer
3258  * sends aggregates.
3259  */
3260 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
3261 		       uint16_t peer_id, uint8_t tid);
3262 
3263 #ifndef QCA_HOST_MODE_WIFI_DISABLED
3264 
3265 /**
3266  * dp_rx_err_process() - Processes error frames routed to REO error ring
3267  * @int_ctx: pointer to DP interrupt context
3268  * @soc: core txrx main context
3269  * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be serviced
3270  * @quota: No. of units (packets) that can be serviced in one shot.
3271  *
3272  * This function implements error processing and top level demultiplexer
3273  * for all the frames routed to REO error ring.
3274  *
3275  * Return: uint32_t: No. of elements processed
3276  */
3277 uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
3278 			   hal_ring_handle_t hal_ring_hdl, uint32_t quota);
3279 
3280 /**
3281  * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
3282  * @int_ctx: pointer to DP interrupt context
3283  * @soc: core txrx main context
3284  * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be
3285  *                serviced
3286  * @quota: No. of units (packets) that can be serviced in one shot.
3287  *
3288  * This function implements error processing and top level demultiplexer
3289  * for all the frames routed to WBM2HOST sw release ring.
3290  *
3291  * Return: uint32_t: No. of elements processed
3292  */
3293 uint32_t
3294 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
3295 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota);
3296 
3297 #ifdef QCA_OL_RX_LOCK_LESS_ACCESS
3298 /**
3299  * dp_rx_srng_access_start()- Wrapper function to log access start of a hal ring
3300  * @int_ctx: pointer to DP interrupt context
3301  * @soc: DP soc structure pointer
3302  * @hal_ring_hdl: HAL ring handle
3303  *
3304  * Return: 0 on success; error on failure
3305  */
3306 static inline int
3307 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
3308 			hal_ring_handle_t hal_ring_hdl)
3309 {
3310 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
3311 }
3312 
3313 /**
3314  * dp_rx_srng_access_end()- Wrapper function to log access end of a hal ring
3315  * @int_ctx: pointer to DP interrupt context
3316  * @soc: DP soc structure pointer
3317  * @hal_ring_hdl: HAL ring handle
3318  *
3319  * Return: None
3320  */
3321 static inline void
3322 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
3323 		      hal_ring_handle_t hal_ring_hdl)
3324 {
3325 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
3326 }
3327 #else
3328 static inline int
3329 dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
3330 			hal_ring_handle_t hal_ring_hdl)
3331 {
3332 	return dp_srng_access_start(int_ctx, soc, hal_ring_hdl);
3333 }
3334 
3335 static inline void
3336 dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
3337 		      hal_ring_handle_t hal_ring_hdl)
3338 {
3339 	dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
3340 }
3341 #endif
3342 
3343 #ifdef RX_DESC_SANITY_WAR
3344 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
3345 			     hal_ring_handle_t hal_ring_hdl,
3346 			     hal_ring_desc_t ring_desc,
3347 			     struct dp_rx_desc *rx_desc);
3348 #else
3349 static inline
3350 QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
3351 			     hal_ring_handle_t hal_ring_hdl,
3352 			     hal_ring_desc_t ring_desc,
3353 			     struct dp_rx_desc *rx_desc)
3354 {
3355 	return QDF_STATUS_SUCCESS;
3356 }
3357 #endif
3358 
3359 #ifdef RX_DESC_DEBUG_CHECK
3360 /**
3361  * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
3362  *				  corruption
3363  * @soc: DP SoC context
3364  * @ring_desc: REO ring descriptor
3365  * @rx_desc: Rx descriptor
3366  *
3367  * Return: NONE
3368  */
3369 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
3370 					hal_ring_desc_t ring_desc,
3371 					struct dp_rx_desc *rx_desc);
3372 #else
3373 static inline
3374 QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
3375 					hal_ring_desc_t ring_desc,
3376 					struct dp_rx_desc *rx_desc)
3377 {
3378 	return QDF_STATUS_SUCCESS;
3379 }
3380 #endif
3381 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
3382 
3383 /**
3384  * dp_rx_wbm_sg_list_reset() - Initialize sg list
3385  *
3386  * This api should be called at soc init and afterevery sg processing.
3387  *@soc: DP SOC handle
3388  */
3389 static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc)
3390 {
3391 	if (soc) {
3392 		soc->wbm_sg_param.wbm_is_first_msdu_in_sg = false;
3393 		soc->wbm_sg_param.wbm_sg_nbuf_head = NULL;
3394 		soc->wbm_sg_param.wbm_sg_nbuf_tail = NULL;
3395 		soc->wbm_sg_param.wbm_sg_desc_msdu_len = 0;
3396 	}
3397 }
3398 
3399 /**
3400  * dp_rx_wbm_sg_list_deinit() - De-initialize sg list
3401  *
3402  * This api should be called in down path, to avoid any leak.
3403  *@soc: DP SOC handle
3404  */
3405 static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc)
3406 {
3407 	if (soc) {
3408 		if (soc->wbm_sg_param.wbm_sg_nbuf_head)
3409 			qdf_nbuf_list_free(soc->wbm_sg_param.wbm_sg_nbuf_head);
3410 
3411 		dp_rx_wbm_sg_list_reset(soc);
3412 	}
3413 }
3414 
3415 /**
3416  * dp_rx_link_desc_refill_duplicate_check() - check if link desc duplicate
3417  *					      to refill
3418  * @soc: DP SOC handle
3419  * @buf_info: the last link desc buf info
3420  * @ring_buf_info: current buf address pointor including link desc
3421  *
3422  * Return: none.
3423  */
3424 void dp_rx_link_desc_refill_duplicate_check(
3425 				struct dp_soc *soc,
3426 				struct hal_buf_info *buf_info,
3427 				hal_buff_addrinfo_t ring_buf_info);
3428 /**
3429  * dp_rx_srng_get_num_pending() - get number of pending entries
3430  * @hal_soc: hal soc opaque pointer
3431  * @hal_ring_hdl: opaque pointer to the HAL Rx Ring
3432  * @num_entries: number of entries in the hal_ring.
3433  * @near_full: pointer to a boolean. This is set if ring is near full.
3434  *
3435  * The function returns the number of entries in a destination ring which are
3436  * yet to be reaped. The function also checks if the ring is near full.
3437  * If more than half of the ring needs to be reaped, the ring is considered
3438  * approaching full.
3439  * The function uses hal_srng_dst_num_valid_locked to get the number of valid
3440  * entries. It should not be called within a SRNG lock. HW pointer value is
3441  * synced into cached_hp.
3442  *
3443  * Return: Number of pending entries if any
3444  */
3445 uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
3446 				    hal_ring_handle_t hal_ring_hdl,
3447 				    uint32_t num_entries,
3448 				    bool *near_full);
3449 
3450 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
3451 /**
3452  * dp_rx_ring_record_entry() - Record an entry into the rx ring history.
3453  * @soc: Datapath soc structure
3454  * @ring_num: REO ring number
3455  * @ring_desc: REO ring descriptor
3456  *
3457  * Return: None
3458  */
3459 void dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
3460 			     hal_ring_desc_t ring_desc);
3461 #else
3462 static inline void
3463 dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
3464 			hal_ring_desc_t ring_desc)
3465 {
3466 }
3467 #endif
3468 
3469 #ifdef QCA_SUPPORT_WDS_EXTENDED
3470 /**
3471  * dp_rx_is_list_ready() - Make different lists for 4-address
3472  *			   and 3-address frames
3473  * @nbuf_head: skb list head
3474  * @vdev: vdev
3475  * @txrx_peer : txrx_peer
3476  * @peer_id: peer id of new received frame
3477  * @vdev_id: vdev_id of new received frame
3478  *
3479  * Return: true if peer_ids are different.
3480  */
3481 static inline bool
3482 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
3483 		    struct dp_vdev *vdev,
3484 		    struct dp_txrx_peer *txrx_peer,
3485 		    uint16_t peer_id,
3486 		    uint8_t vdev_id)
3487 {
3488 	if (nbuf_head && txrx_peer && txrx_peer->peer_id != peer_id)
3489 		return true;
3490 
3491 	return false;
3492 }
3493 
3494 /**
3495  * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta
3496  * @soc: core txrx main context
3497  * @vdev: vdev
3498  * @txrx_peer: txrx peer
3499  * @nbuf_head: skb list head
3500  *
3501  * Return: true if packet is delivered to netdev per STA.
3502  */
3503 bool
3504 dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
3505 			   struct dp_txrx_peer *txrx_peer,
3506 			   qdf_nbuf_t nbuf_head);
3507 #else
3508 static inline bool
3509 dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
3510 		    struct dp_vdev *vdev,
3511 		    struct dp_txrx_peer *txrx_peer,
3512 		    uint16_t peer_id,
3513 		    uint8_t vdev_id)
3514 {
3515 	if (nbuf_head && vdev && (vdev->vdev_id != vdev_id))
3516 		return true;
3517 
3518 	return false;
3519 }
3520 #endif
3521 
3522 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
3523 /**
3524  * dp_rx_mark_first_packet_after_wow_wakeup - get first packet after wow wakeup
3525  * @pdev: pointer to dp_pdev structure
3526  * @rx_tlv: pointer to rx_pkt_tlvs structure
3527  * @nbuf: pointer to skb buffer
3528  *
3529  * Return: None
3530  */
3531 void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
3532 					      uint8_t *rx_tlv,
3533 					      qdf_nbuf_t nbuf);
3534 #else
3535 static inline void
3536 dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
3537 					 uint8_t *rx_tlv,
3538 					 qdf_nbuf_t nbuf)
3539 {
3540 }
3541 #endif
3542 
3543 #else
3544 static inline QDF_STATUS
3545 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
3546 			       hal_buff_addrinfo_t link_desc_addr,
3547 			       uint8_t bm_action)
3548 {
3549 	return QDF_STATUS_SUCCESS;
3550 }
3551 
3552 static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc)
3553 {
3554 }
3555 
3556 static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc)
3557 {
3558 }
3559 
3560 static inline uint32_t
3561 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
3562 		     uint32_t mac_id, uint32_t quota)
3563 {
3564 	return 0;
3565 }
3566 #endif /* WLAN_SOFTUMAC_SUPPORT */
3567 
3568 #ifndef CONFIG_NBUF_AP_PLATFORM
3569 static inline uint8_t
3570 dp_rx_get_stats_arr_idx_from_link_id(qdf_nbuf_t nbuf,
3571 				     struct dp_txrx_peer *txrx_peer)
3572 {
3573 	return QDF_NBUF_CB_RX_LOGICAL_LINK_ID(nbuf);
3574 }
3575 #else
3576 static inline uint8_t
3577 dp_rx_get_stats_arr_idx_from_link_id(qdf_nbuf_t nbuf,
3578 				     struct dp_txrx_peer *txrx_peer)
3579 {
3580 	uint8_t link_id = 0;
3581 
3582 	link_id = (QDF_NBUF_CB_RX_HW_LINK_ID(nbuf) + 1);
3583 	if (link_id > DP_MAX_MLO_LINKS) {
3584 		link_id = 0;
3585 		DP_PEER_PER_PKT_STATS_INC(txrx_peer,
3586 					  rx.inval_link_id_pkt_cnt,
3587 					  1, link_id);
3588 	}
3589 
3590 	return link_id;
3591 }
3592 #endif /* CONFIG_NBUF_AP_PLATFORM */
3593 
3594 #endif /* _DP_RX_H */
3595