xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h (revision 3149adf58a329e17232a4c0e58d460d025edd55a)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_RX_H
20 #define _DP_RX_H
21 
22 #include "hal_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 
26 #ifdef RXDMA_OPTIMIZATION
27 #define RX_BUFFER_ALIGNMENT     128
28 #else /* RXDMA_OPTIMIZATION */
29 #define RX_BUFFER_ALIGNMENT     4
30 #endif /* RXDMA_OPTIMIZATION */
31 
32 #define RX_BUFFER_SIZE			2048
33 #define RX_BUFFER_RESERVATION   0
34 
35 #define DP_PEER_METADATA_PEER_ID_MASK	0x0000ffff
36 #define DP_PEER_METADATA_PEER_ID_SHIFT	0
37 #define DP_PEER_METADATA_VDEV_ID_MASK	0x00070000
38 #define DP_PEER_METADATA_VDEV_ID_SHIFT	16
39 
40 #define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata)		\
41 	(((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK)	\
42 			>> DP_PEER_METADATA_PEER_ID_SHIFT)
43 
44 #define DP_PEER_METADATA_ID_GET(_peer_metadata)			\
45 	(((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK)	\
46 			>> DP_PEER_METADATA_VDEV_ID_SHIFT)
47 
48 #define DP_RX_DESC_MAGIC 0xdec0de
49 
50 /**
51  * struct dp_rx_desc
52  *
53  * @nbuf		: VA of the "skb" posted
54  * @rx_buf_start	: VA of the original Rx buffer, before
55  *			  movement of any skb->data pointer
56  * @cookie		: index into the sw array which holds
57  *			  the sw Rx descriptors
58  *			  Cookie space is 21 bits:
59  *			  lower 18 bits -- index
60  *			  upper  3 bits -- pool_id
61  * @pool_id		: pool Id for which this allocated.
62  *			  Can only be used if there is no flow
63  *			  steering
64  * @in_use		  rx_desc is in use
65  * @unmapped		  used to mark rx_desc an unmapped if the corresponding
66  *			  nbuf is already unmapped
67  */
68 struct dp_rx_desc {
69 	qdf_nbuf_t nbuf;
70 	uint8_t *rx_buf_start;
71 	uint32_t cookie;
72 	uint8_t	 pool_id;
73 #ifdef RX_DESC_DEBUG_CHECK
74 	uint32_t magic;
75 #endif
76 	uint8_t	in_use:1,
77 	unmapped:1;
78 };
79 
80 #define RX_DESC_COOKIE_INDEX_SHIFT		0
81 #define RX_DESC_COOKIE_INDEX_MASK		0x3ffff /* 18 bits */
82 #define RX_DESC_COOKIE_POOL_ID_SHIFT		18
83 #define RX_DESC_COOKIE_POOL_ID_MASK		0x1c0000
84 
85 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie)		\
86 	(((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >>	\
87 			RX_DESC_COOKIE_POOL_ID_SHIFT)
88 
89 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie)		\
90 	(((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >>	\
91 			RX_DESC_COOKIE_INDEX_SHIFT)
92 
93 /*
94  *dp_rx_xor_block() - xor block of data
95  *@b: destination data block
96  *@a: source data block
97  *@len: length of the data to process
98  *
99  *Returns: None
100  */
101 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
102 {
103 	qdf_size_t i;
104 
105 	for (i = 0; i < len; i++)
106 		b[i] ^= a[i];
107 }
108 
109 /*
110  *dp_rx_rotl() - rotate the bits left
111  *@val: unsigned integer input value
112  *@bits: number of bits
113  *
114  *Returns: Integer with left rotated by number of 'bits'
115  */
116 static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
117 {
118 	return (val << bits) | (val >> (32 - bits));
119 }
120 
121 /*
122  *dp_rx_rotr() - rotate the bits right
123  *@val: unsigned integer input value
124  *@bits: number of bits
125  *
126  *Returns: Integer with right rotated by number of 'bits'
127  */
128 static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
129 {
130 	return (val >> bits) | (val << (32 - bits));
131 }
132 
133 /*
134  *dp_rx_xswap() - swap the bits left
135  *@val: unsigned integer input value
136  *
137  *Returns: Integer with bits swapped
138  */
139 static inline uint32_t dp_rx_xswap(uint32_t val)
140 {
141 	return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
142 }
143 
144 /*
145  *dp_rx_get_le32_split() - get little endian 32 bits split
146  *@b0: byte 0
147  *@b1: byte 1
148  *@b2: byte 2
149  *@b3: byte 3
150  *
151  *Returns: Integer with split little endian 32 bits
152  */
153 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
154 					uint8_t b3)
155 {
156 	return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
157 }
158 
159 /*
160  *dp_rx_get_le32() - get little endian 32 bits
161  *@b0: byte 0
162  *@b1: byte 1
163  *@b2: byte 2
164  *@b3: byte 3
165  *
166  *Returns: Integer with little endian 32 bits
167  */
168 static inline uint32_t dp_rx_get_le32(const uint8_t *p)
169 {
170 	return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
171 }
172 
173 /*
174  * dp_rx_put_le32() - put little endian 32 bits
175  * @p: destination char array
176  * @v: source 32-bit integer
177  *
178  * Returns: None
179  */
180 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
181 {
182 	p[0] = (v) & 0xff;
183 	p[1] = (v >> 8) & 0xff;
184 	p[2] = (v >> 16) & 0xff;
185 	p[3] = (v >> 24) & 0xff;
186 }
187 
188 /* Extract michal mic block of data */
189 #define dp_rx_michael_block(l, r)	\
190 	do {					\
191 		r ^= dp_rx_rotl(l, 17);	\
192 		l += r;				\
193 		r ^= dp_rx_xswap(l);		\
194 		l += r;				\
195 		r ^= dp_rx_rotl(l, 3);	\
196 		l += r;				\
197 		r ^= dp_rx_rotr(l, 2);	\
198 		l += r;				\
199 	} while (0)
200 
201 /**
202  * struct dp_rx_desc_list_elem_t
203  *
204  * @next		: Next pointer to form free list
205  * @rx_desc		: DP Rx descriptor
206  */
207 union dp_rx_desc_list_elem_t {
208 	union dp_rx_desc_list_elem_t *next;
209 	struct dp_rx_desc rx_desc;
210 };
211 
212 /**
213  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
214  *			 the Rx descriptor on Rx DMA source ring buffer
215  * @soc: core txrx main context
216  * @cookie: cookie used to lookup virtual address
217  *
218  * Return: void *: Virtual Address of the Rx descriptor
219  */
220 static inline
221 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
222 {
223 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
224 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
225 	/* TODO */
226 	/* Add sanity for pool_id & index */
227 	return &(soc->rx_desc_buf[pool_id].array[index].rx_desc);
228 }
229 
230 /**
231  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
232  *			 the Rx descriptor on monitor ring buffer
233  * @soc: core txrx main context
234  * @cookie: cookie used to lookup virtual address
235  *
236  * Return: void *: Virtual Address of the Rx descriptor
237  */
238 static inline
239 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
240 {
241 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
242 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
243 	/* TODO */
244 	/* Add sanity for pool_id & index */
245 	return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
246 }
247 
248 /**
249  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
250  *			 the Rx descriptor on monitor status ring buffer
251  * @soc: core txrx main context
252  * @cookie: cookie used to lookup virtual address
253  *
254  * Return: void *: Virtual Address of the Rx descriptor
255  */
256 static inline
257 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
258 {
259 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
260 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
261 	/* TODO */
262 	/* Add sanity for pool_id & index */
263 	return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
264 }
265 
266 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
267 				union dp_rx_desc_list_elem_t **local_desc_list,
268 				union dp_rx_desc_list_elem_t **tail,
269 				uint16_t pool_id,
270 				struct rx_desc_pool *rx_desc_pool);
271 
272 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
273 				struct rx_desc_pool *rx_desc_pool,
274 				uint16_t num_descs,
275 				union dp_rx_desc_list_elem_t **desc_list,
276 				union dp_rx_desc_list_elem_t **tail);
277 
278 
279 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
280 
281 void dp_rx_pdev_detach(struct dp_pdev *pdev);
282 
283 
284 uint32_t
285 dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota);
286 
287 uint32_t dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota);
288 
289 uint32_t
290 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota);
291 
292 void
293 dp_rx_sg_create(qdf_nbuf_t nbuf,
294 		uint8_t *rx_tlv_hdr,
295 		uint16_t *mpdu_len,
296 		bool *is_first_frag,
297 		uint16_t *frag_list_len,
298 		qdf_nbuf_t *head_frag_nbuf,
299 		qdf_nbuf_t *frag_list_head,
300 		qdf_nbuf_t *frag_list_tail);
301 
302 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
303 				uint32_t pool_id,
304 				uint32_t pool_size,
305 				struct rx_desc_pool *rx_desc_pool);
306 
307 void dp_rx_desc_pool_free(struct dp_soc *soc,
308 				uint32_t pool_id,
309 				struct rx_desc_pool *rx_desc_pool);
310 
311 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
312 				struct dp_peer *peer);
313 
314 /**
315  * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
316  *
317  * @head: pointer to the head of local free list
318  * @tail: pointer to the tail of local free list
319  * @new: new descriptor that is added to the free list
320  *
321  * Return: void:
322  */
323 static inline
324 void dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
325 				 union dp_rx_desc_list_elem_t **tail,
326 				 struct dp_rx_desc *new)
327 {
328 	qdf_assert(head && new);
329 
330 	new->nbuf = NULL;
331 	new->in_use = 0;
332 	new->unmapped = 0;
333 
334 	((union dp_rx_desc_list_elem_t *)new)->next = *head;
335 	*head = (union dp_rx_desc_list_elem_t *)new;
336 	if (*tail == NULL)
337 		*tail = *head;
338 
339 }
340 
341 /**
342  * dp_rx_wds_srcport_learn() - Add or update the STA PEER which
343  *				is behind the WDS repeater.
344  *
345  * @soc: core txrx main context
346  * @rx_tlv_hdr: base address of RX TLV header
347  * @ta_peer: WDS repeater peer
348  * @nbuf: rx pkt
349  *
350  * Return: void:
351  */
352 #ifdef FEATURE_WDS
353 static inline void
354 dp_rx_wds_srcport_learn(struct dp_soc *soc,
355 			 uint8_t *rx_tlv_hdr,
356 			 struct dp_peer *ta_peer,
357 			 qdf_nbuf_t nbuf)
358 {
359 	uint16_t sa_sw_peer_id = hal_rx_msdu_end_sa_sw_peer_id_get(rx_tlv_hdr);
360 	uint32_t flags = IEEE80211_NODE_F_WDS_HM;
361 	uint32_t ret = 0;
362 	uint8_t wds_src_mac[IEEE80211_ADDR_LEN];
363 
364 	/* Do wds source port learning only if it is a 4-address mpdu */
365 	if (!(qdf_nbuf_is_rx_chfrag_start(nbuf) &&
366 		hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr)))
367 		return;
368 
369 	memcpy(wds_src_mac, (qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN),
370 		IEEE80211_ADDR_LEN);
371 
372 	if (qdf_unlikely(!hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr))) {
373 		ret = dp_peer_add_ast(soc,
374 					ta_peer,
375 					wds_src_mac,
376 					CDP_TXRX_AST_TYPE_WDS,
377 					flags);
378 
379 	} else {
380 		/*
381 		 * Get the AST entry from HW SA index and mark it as active
382 		 */
383 		struct dp_ast_entry *ast;
384 		uint16_t sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr);
385 		ast = soc->ast_table[sa_idx];
386 
387 		/*
388 		 * Ensure we are updating the right AST entry by
389 		 * validating ast_idx.
390 		 * There is a possibility we might arrive here without
391 		 * AST MAP event , so this check is mandatory
392 		 */
393 		if (ast && (ast->ast_idx == sa_idx)) {
394 			ast->is_active = TRUE;
395 		}
396 
397 		if (ast && sa_sw_peer_id != ta_peer->peer_ids[0])
398 			dp_peer_update_ast(soc, ta_peer, ast, flags);
399 	}
400 	return;
401 }
402 #else
403 	static inline void
404 dp_rx_wds_srcport_learn(struct dp_soc *soc,
405 		uint8_t *rx_tlv_hdr,
406 		struct dp_peer *ta_peer,
407 		qdf_nbuf_t nbuf)
408 {
409 }
410 #endif
411 
412 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf);
413 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
414 					qdf_nbuf_t mpdu, bool mpdu_done);
415 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr);
416 
417 #define DP_RX_LIST_APPEND(head, tail, elem) \
418 	do {                                                \
419 		if (!(head)) {                              \
420 			(head) = (elem);                    \
421 		} else {                                    \
422 			qdf_nbuf_set_next((tail), (elem));  \
423 		}                                           \
424 		(tail) = (elem);                            \
425 	qdf_nbuf_set_next((tail), NULL);            \
426 } while (0)
427 
428 #ifndef BUILD_X86
429 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
430 				qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
431 {
432 	return QDF_STATUS_SUCCESS;
433 }
434 #else
435 #define MAX_RETRY 100
436 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
437 				qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
438 {
439 	uint32_t nbuf_retry = 0;
440 	int32_t ret;
441 	const uint32_t x86_phy_addr = 0x50000000;
442 	/*
443 	 * in M2M emulation platforms (x86) the memory below 0x50000000
444 	 * is reserved for target use, so any memory allocated in this
445 	 * region should not be used by host
446 	 */
447 	do {
448 		if (qdf_likely(*paddr > x86_phy_addr))
449 			return QDF_STATUS_SUCCESS;
450 		else {
451 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
452 				"phy addr %pK exceded 0x50000000 trying again\n",
453 				paddr);
454 
455 			nbuf_retry++;
456 			if ((*rx_netbuf)) {
457 				qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
458 							QDF_DMA_BIDIRECTIONAL);
459 				/* Not freeing buffer intentionally.
460 				 * Observed that same buffer is getting
461 				 * re-allocated resulting in longer load time
462 				 * WMI init timeout.
463 				 * This buffer is anyway not useful so skip it.
464 				 **/
465 			}
466 
467 			*rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
468 							RX_BUFFER_SIZE,
469 							RX_BUFFER_RESERVATION,
470 							RX_BUFFER_ALIGNMENT,
471 							FALSE);
472 
473 			if (qdf_unlikely(!(*rx_netbuf)))
474 				return QDF_STATUS_E_FAILURE;
475 
476 			ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf,
477 							QDF_DMA_BIDIRECTIONAL);
478 
479 			if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
480 				qdf_nbuf_free(*rx_netbuf);
481 				*rx_netbuf = NULL;
482 				continue;
483 			}
484 
485 			*paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
486 		}
487 	} while (nbuf_retry < MAX_RETRY);
488 
489 	if ((*rx_netbuf)) {
490 		qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
491 					QDF_DMA_BIDIRECTIONAL);
492 		qdf_nbuf_free(*rx_netbuf);
493 	}
494 
495 	return QDF_STATUS_E_FAILURE;
496 }
497 #endif
498 
499 /**
500  * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
501  *				   the MSDU Link Descriptor
502  * @soc: core txrx main context
503  * @buf_info: buf_info include cookie that used to lookup virtual address of
504  * link descriptor Normally this is just an index into a per SOC array.
505  *
506  * This is the VA of the link descriptor, that HAL layer later uses to
507  * retrieve the list of MSDU's for a given MPDU.
508  *
509  * Return: void *: Virtual Address of the Rx descriptor
510  */
511 static inline
512 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
513 				  struct hal_buf_info *buf_info)
514 {
515 	void *link_desc_va;
516 	uint32_t bank_id = LINK_DESC_COOKIE_BANK_ID(buf_info->sw_cookie);
517 
518 
519 	/* TODO */
520 	/* Add sanity for  cookie */
521 
522 	link_desc_va = soc->link_desc_banks[bank_id].base_vaddr +
523 		(buf_info->paddr -
524 			soc->link_desc_banks[bank_id].base_paddr);
525 
526 	return link_desc_va;
527 }
528 
529 /**
530  * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of
531  *				   the MSDU Link Descriptor
532  * @pdev: core txrx pdev context
533  * @buf_info: buf_info includes cookie that used to lookup virtual address of
534  * link descriptor. Normally this is just an index into a per pdev array.
535  *
536  * This is the VA of the link descriptor in monitor mode destination ring,
537  * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU.
538  *
539  * Return: void *: Virtual Address of the Rx descriptor
540  */
541 static inline
542 void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev,
543 				  struct hal_buf_info *buf_info)
544 {
545 	void *link_desc_va;
546 
547 	/* TODO */
548 	/* Add sanity for  cookie */
549 
550 	link_desc_va = pdev->link_desc_banks[buf_info->sw_cookie].base_vaddr +
551 		(buf_info->paddr -
552 			pdev->link_desc_banks[buf_info->sw_cookie].base_paddr);
553 	return link_desc_va;
554 }
555 
556 /**
557  * dp_rx_defrag_concat() - Concatenate the fragments
558  *
559  * @dst: destination pointer to the buffer
560  * @src: source pointer from where the fragment payload is to be copied
561  *
562  * Return: QDF_STATUS
563  */
564 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
565 {
566 	/*
567 	 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
568 	 * to provide space for src, the headroom portion is copied from
569 	 * the original dst buffer to the larger new dst buffer.
570 	 * (This is needed, because the headroom of the dst buffer
571 	 * contains the rx desc.)
572 	 */
573 	if (qdf_nbuf_cat(dst, src))
574 		return QDF_STATUS_E_DEFRAG_ERROR;
575 
576 	return QDF_STATUS_SUCCESS;
577 }
578 
579 /*
580  * dp_rx_ast_set_active() - set the active flag of the astentry
581  *				    corresponding to a hw index.
582  * @soc: core txrx main context
583  * @sa_idx: hw idx
584  * @is_active: active flag
585  *
586  */
587 #ifdef FEATURE_WDS
588 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
589 {
590 	struct dp_ast_entry *ast;
591 	qdf_spin_lock_bh(&soc->ast_lock);
592 	ast = soc->ast_table[sa_idx];
593 
594 	/*
595 	 * Ensure we are updating the right AST entry by
596 	 * validating ast_idx.
597 	 * There is a possibility we might arrive here without
598 	 * AST MAP event , so this check is mandatory
599 	 */
600 	if (ast && (ast->ast_idx == sa_idx)) {
601 		ast->is_active = is_active;
602 		qdf_spin_unlock_bh(&soc->ast_lock);
603 		return QDF_STATUS_SUCCESS;
604 	}
605 
606 	qdf_spin_unlock_bh(&soc->ast_lock);
607 	return QDF_STATUS_E_FAILURE;
608 }
609 #else
610 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
611 {
612 	return QDF_STATUS_SUCCESS;
613 }
614 #endif
615 
616 /*
617  * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
618  *					In qwrap mode, packets originated from
619  *					any vdev should not loopback and
620  *					should be dropped.
621  * @vdev: vdev on which rx packet is received
622  * @nbuf: rx pkt
623  *
624  */
625 #if ATH_SUPPORT_WRAP
626 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
627 						qdf_nbuf_t nbuf)
628 {
629 	struct dp_vdev *psta_vdev;
630 	struct dp_pdev *pdev = vdev->pdev;
631 	uint8_t *data = qdf_nbuf_data(nbuf);
632 
633 	if (qdf_unlikely(vdev->proxysta_vdev)) {
634 		/* In qwrap isolation mode, allow loopback packets as all
635 		 * packets go to RootAP and Loopback on the mpsta.
636 		 */
637 		if (vdev->isolation_vdev)
638 			return false;
639 		TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
640 			if (qdf_unlikely(psta_vdev->proxysta_vdev &&
641 				!qdf_mem_cmp(psta_vdev->mac_addr.raw,
642 				&data[DP_MAC_ADDR_LEN], DP_MAC_ADDR_LEN))) {
643 				/* Drop packet if source address is equal to
644 				 * any of the vdev addresses.
645 				 */
646 				return true;
647 			}
648 		}
649 	}
650 	return false;
651 }
652 #else
653 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
654 						qdf_nbuf_t nbuf)
655 {
656 	return false;
657 }
658 #endif
659 
660 /*
661  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
662  *			       called during dp rx initialization
663  *			       and at the end of dp_rx_process.
664  *
665  * @soc: core txrx main context
666  * @mac_id: mac_id which is one of 3 mac_ids
667  * @dp_rxdma_srng: dp rxdma circular ring
668  * @rx_desc_pool: Poiter to free Rx descriptor pool
669  * @num_req_buffers: number of buffer to be replenished
670  * @desc_list: list of descs if called from dp_rx_process
671  *	       or NULL during dp rx initialization or out of buffer
672  *	       interrupt.
673  * @tail: tail of descs list
674  * @owner: who owns the nbuf (host, NSS etc...)
675  * Return: return success or failure
676  */
677 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
678 				 struct dp_srng *dp_rxdma_srng,
679 				 struct rx_desc_pool *rx_desc_pool,
680 				 uint32_t num_req_buffers,
681 				 union dp_rx_desc_list_elem_t **desc_list,
682 				 union dp_rx_desc_list_elem_t **tail,
683 				 uint8_t owner);
684 
685 /**
686  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
687  *			      (WBM), following error handling
688  *
689  * @soc: core DP main context
690  * @buf_addr_info: opaque pointer to the REO error ring descriptor
691  * @buf_addr_info: void pointer to the buffer_addr_info
692  * @bm_action: put to idle_list or release to msdu_list
693  * Return: QDF_STATUS
694  */
695 QDF_STATUS
696 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action);
697 
698 QDF_STATUS
699 dp_rx_link_desc_buf_return(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng,
700 				void *buf_addr_info, uint8_t bm_action);
701 /**
702  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
703  *					(WBM) by address
704  *
705  * @soc: core DP main context
706  * @link_desc_addr: link descriptor addr
707  *
708  * Return: QDF_STATUS
709  */
710 QDF_STATUS
711 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr,
712 					uint8_t bm_action);
713 
714 uint32_t
715 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id,
716 						uint32_t quota);
717 
718 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
719 				uint8_t *rx_tlv_hdr, struct dp_peer *peer);
720 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
721 					uint8_t *rx_tlv_hdr);
722 
723 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
724 				struct dp_peer *peer, int rx_mcast);
725 
726 #endif /* _DP_RX_H */
727