xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h (revision 6ecd284e5a94a1c96e26d571dd47419ac305990d)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_RX_H
20 #define _DP_RX_H
21 
22 #include "hal_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 
27 #ifdef RXDMA_OPTIMIZATION
28 #define RX_BUFFER_ALIGNMENT     128
29 #else /* RXDMA_OPTIMIZATION */
30 #define RX_BUFFER_ALIGNMENT     4
31 #endif /* RXDMA_OPTIMIZATION */
32 
33 #ifdef QCA_HOST2FW_RXBUF_RING
34 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW1_BM
35 #else
36 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW3_BM
37 #endif
38 #define RX_BUFFER_SIZE			2048
39 #define RX_BUFFER_RESERVATION   0
40 
41 #define DP_PEER_METADATA_PEER_ID_MASK	0x0000ffff
42 #define DP_PEER_METADATA_PEER_ID_SHIFT	0
43 #define DP_PEER_METADATA_VDEV_ID_MASK	0x00070000
44 #define DP_PEER_METADATA_VDEV_ID_SHIFT	16
45 
46 #define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata)		\
47 	(((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK)	\
48 			>> DP_PEER_METADATA_PEER_ID_SHIFT)
49 
50 #define DP_PEER_METADATA_ID_GET(_peer_metadata)			\
51 	(((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK)	\
52 			>> DP_PEER_METADATA_VDEV_ID_SHIFT)
53 
54 #define DP_RX_DESC_MAGIC 0xdec0de
55 
56 /**
57  * struct dp_rx_desc
58  *
59  * @nbuf		: VA of the "skb" posted
60  * @rx_buf_start	: VA of the original Rx buffer, before
61  *			  movement of any skb->data pointer
62  * @cookie		: index into the sw array which holds
63  *			  the sw Rx descriptors
64  *			  Cookie space is 21 bits:
65  *			  lower 18 bits -- index
66  *			  upper  3 bits -- pool_id
67  * @pool_id		: pool Id for which this allocated.
68  *			  Can only be used if there is no flow
69  *			  steering
70  * @in_use		  rx_desc is in use
71  * @unmapped		  used to mark rx_desc an unmapped if the corresponding
72  *			  nbuf is already unmapped
73  */
74 struct dp_rx_desc {
75 	qdf_nbuf_t nbuf;
76 	uint8_t *rx_buf_start;
77 	uint32_t cookie;
78 	uint8_t	 pool_id;
79 #ifdef RX_DESC_DEBUG_CHECK
80 	uint32_t magic;
81 #endif
82 	uint8_t	in_use:1,
83 	unmapped:1;
84 };
85 
86 #define RX_DESC_COOKIE_INDEX_SHIFT		0
87 #define RX_DESC_COOKIE_INDEX_MASK		0x3ffff /* 18 bits */
88 #define RX_DESC_COOKIE_POOL_ID_SHIFT		18
89 #define RX_DESC_COOKIE_POOL_ID_MASK		0x1c0000
90 
91 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie)		\
92 	(((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >>	\
93 			RX_DESC_COOKIE_POOL_ID_SHIFT)
94 
95 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie)		\
96 	(((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >>	\
97 			RX_DESC_COOKIE_INDEX_SHIFT)
98 
99 /*
100  *dp_rx_xor_block() - xor block of data
101  *@b: destination data block
102  *@a: source data block
103  *@len: length of the data to process
104  *
105  *Returns: None
106  */
107 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
108 {
109 	qdf_size_t i;
110 
111 	for (i = 0; i < len; i++)
112 		b[i] ^= a[i];
113 }
114 
115 /*
116  *dp_rx_rotl() - rotate the bits left
117  *@val: unsigned integer input value
118  *@bits: number of bits
119  *
120  *Returns: Integer with left rotated by number of 'bits'
121  */
122 static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
123 {
124 	return (val << bits) | (val >> (32 - bits));
125 }
126 
127 /*
128  *dp_rx_rotr() - rotate the bits right
129  *@val: unsigned integer input value
130  *@bits: number of bits
131  *
132  *Returns: Integer with right rotated by number of 'bits'
133  */
134 static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
135 {
136 	return (val >> bits) | (val << (32 - bits));
137 }
138 
139 /*
140  *dp_rx_xswap() - swap the bits left
141  *@val: unsigned integer input value
142  *
143  *Returns: Integer with bits swapped
144  */
145 static inline uint32_t dp_rx_xswap(uint32_t val)
146 {
147 	return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
148 }
149 
150 /*
151  *dp_rx_get_le32_split() - get little endian 32 bits split
152  *@b0: byte 0
153  *@b1: byte 1
154  *@b2: byte 2
155  *@b3: byte 3
156  *
157  *Returns: Integer with split little endian 32 bits
158  */
159 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
160 					uint8_t b3)
161 {
162 	return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
163 }
164 
165 /*
166  *dp_rx_get_le32() - get little endian 32 bits
167  *@b0: byte 0
168  *@b1: byte 1
169  *@b2: byte 2
170  *@b3: byte 3
171  *
172  *Returns: Integer with little endian 32 bits
173  */
174 static inline uint32_t dp_rx_get_le32(const uint8_t *p)
175 {
176 	return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
177 }
178 
179 /*
180  * dp_rx_put_le32() - put little endian 32 bits
181  * @p: destination char array
182  * @v: source 32-bit integer
183  *
184  * Returns: None
185  */
186 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
187 {
188 	p[0] = (v) & 0xff;
189 	p[1] = (v >> 8) & 0xff;
190 	p[2] = (v >> 16) & 0xff;
191 	p[3] = (v >> 24) & 0xff;
192 }
193 
194 /* Extract michal mic block of data */
195 #define dp_rx_michael_block(l, r)	\
196 	do {					\
197 		r ^= dp_rx_rotl(l, 17);	\
198 		l += r;				\
199 		r ^= dp_rx_xswap(l);		\
200 		l += r;				\
201 		r ^= dp_rx_rotl(l, 3);	\
202 		l += r;				\
203 		r ^= dp_rx_rotr(l, 2);	\
204 		l += r;				\
205 	} while (0)
206 
207 /**
208  * struct dp_rx_desc_list_elem_t
209  *
210  * @next		: Next pointer to form free list
211  * @rx_desc		: DP Rx descriptor
212  */
213 union dp_rx_desc_list_elem_t {
214 	union dp_rx_desc_list_elem_t *next;
215 	struct dp_rx_desc rx_desc;
216 };
217 
218 /**
219  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
220  *			 the Rx descriptor on Rx DMA source ring buffer
221  * @soc: core txrx main context
222  * @cookie: cookie used to lookup virtual address
223  *
224  * Return: void *: Virtual Address of the Rx descriptor
225  */
226 static inline
227 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
228 {
229 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
230 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
231 	/* TODO */
232 	/* Add sanity for pool_id & index */
233 	return &(soc->rx_desc_buf[pool_id].array[index].rx_desc);
234 }
235 
236 /**
237  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
238  *			 the Rx descriptor on monitor ring buffer
239  * @soc: core txrx main context
240  * @cookie: cookie used to lookup virtual address
241  *
242  * Return: void *: Virtual Address of the Rx descriptor
243  */
244 static inline
245 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
246 {
247 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
248 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
249 	/* TODO */
250 	/* Add sanity for pool_id & index */
251 	return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
252 }
253 
254 /**
255  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
256  *			 the Rx descriptor on monitor status ring buffer
257  * @soc: core txrx main context
258  * @cookie: cookie used to lookup virtual address
259  *
260  * Return: void *: Virtual Address of the Rx descriptor
261  */
262 static inline
263 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
264 {
265 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
266 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
267 	/* TODO */
268 	/* Add sanity for pool_id & index */
269 	return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
270 }
271 
272 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
273 				union dp_rx_desc_list_elem_t **local_desc_list,
274 				union dp_rx_desc_list_elem_t **tail,
275 				uint16_t pool_id,
276 				struct rx_desc_pool *rx_desc_pool);
277 
278 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
279 				struct rx_desc_pool *rx_desc_pool,
280 				uint16_t num_descs,
281 				union dp_rx_desc_list_elem_t **desc_list,
282 				union dp_rx_desc_list_elem_t **tail);
283 
284 
285 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
286 
287 void dp_rx_pdev_detach(struct dp_pdev *pdev);
288 
289 
290 uint32_t
291 dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota);
292 
293 uint32_t dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota);
294 
295 uint32_t
296 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota);
297 
298 void
299 dp_rx_sg_create(qdf_nbuf_t nbuf,
300 		uint8_t *rx_tlv_hdr,
301 		uint16_t *mpdu_len,
302 		bool *is_first_frag,
303 		uint16_t *frag_list_len,
304 		qdf_nbuf_t *head_frag_nbuf,
305 		qdf_nbuf_t *frag_list_head,
306 		qdf_nbuf_t *frag_list_tail);
307 
308 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
309 				uint32_t pool_id,
310 				uint32_t pool_size,
311 				struct rx_desc_pool *rx_desc_pool);
312 
313 void dp_rx_desc_pool_free(struct dp_soc *soc,
314 				uint32_t pool_id,
315 				struct rx_desc_pool *rx_desc_pool);
316 
317 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
318 				struct dp_peer *peer);
319 
320 /**
321  * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
322  *
323  * @head: pointer to the head of local free list
324  * @tail: pointer to the tail of local free list
325  * @new: new descriptor that is added to the free list
326  *
327  * Return: void:
328  */
329 static inline
330 void dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
331 				 union dp_rx_desc_list_elem_t **tail,
332 				 struct dp_rx_desc *new)
333 {
334 	qdf_assert(head && new);
335 
336 	new->nbuf = NULL;
337 	new->in_use = 0;
338 	new->unmapped = 0;
339 
340 	((union dp_rx_desc_list_elem_t *)new)->next = *head;
341 	*head = (union dp_rx_desc_list_elem_t *)new;
342 	if (*tail == NULL)
343 		*tail = *head;
344 
345 }
346 
347 /**
348  * dp_rx_wds_srcport_learn() - Add or update the STA PEER which
349  *				is behind the WDS repeater.
350  *
351  * @soc: core txrx main context
352  * @rx_tlv_hdr: base address of RX TLV header
353  * @ta_peer: WDS repeater peer
354  * @nbuf: rx pkt
355  *
356  * Return: void:
357  */
358 #ifdef FEATURE_WDS
359 static inline void
360 dp_rx_wds_srcport_learn(struct dp_soc *soc,
361 			 uint8_t *rx_tlv_hdr,
362 			 struct dp_peer *ta_peer,
363 			 qdf_nbuf_t nbuf)
364 {
365 	uint16_t sa_sw_peer_id = hal_rx_msdu_end_sa_sw_peer_id_get(rx_tlv_hdr);
366 	uint32_t flags = IEEE80211_NODE_F_WDS_HM;
367 	uint32_t ret = 0;
368 	uint8_t wds_src_mac[IEEE80211_ADDR_LEN];
369 	struct dp_ast_entry *ast;
370 	uint16_t sa_idx;
371 
372 	/* Do wds source port learning only if it is a 4-address mpdu */
373 	if (!(qdf_nbuf_is_rx_chfrag_start(nbuf) &&
374 		hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr)))
375 		return;
376 
377 	memcpy(wds_src_mac, (qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN),
378 		IEEE80211_ADDR_LEN);
379 
380 	if (qdf_unlikely(!hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr))) {
381 		ret = dp_peer_add_ast(soc,
382 					ta_peer,
383 					wds_src_mac,
384 					CDP_TXRX_AST_TYPE_WDS,
385 					flags);
386 		return;
387 
388 	}
389 
390 	/*
391 	 * Get the AST entry from HW SA index and mark it as active
392 	 */
393 	sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr);
394 
395 	qdf_spin_lock_bh(&soc->ast_lock);
396 	ast = soc->ast_table[sa_idx];
397 
398 	if (!ast) {
399 		qdf_spin_unlock_bh(&soc->ast_lock);
400 		return;
401 	}
402 
403 	/*
404 	 * Ensure we are updating the right AST entry by
405 	 * validating ast_idx.
406 	 * There is a possibility we might arrive here without
407 	 * AST MAP event , so this check is mandatory
408 	 */
409 	if (ast->ast_idx == sa_idx)
410 		ast->is_active = TRUE;
411 
412 	/* Handle client roaming */
413 	if (sa_sw_peer_id != ta_peer->peer_ids[0])
414 		dp_peer_update_ast(soc, ta_peer, ast, flags);
415 
416 	qdf_spin_unlock_bh(&soc->ast_lock);
417 
418 	return;
419 }
420 #else
421 static inline void
422 dp_rx_wds_srcport_learn(struct dp_soc *soc,
423 		uint8_t *rx_tlv_hdr,
424 		struct dp_peer *ta_peer,
425 		qdf_nbuf_t nbuf)
426 {
427 }
428 #endif
429 
430 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf);
431 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
432 		qdf_nbuf_t mpdu, bool mpdu_done);
433 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr);
434 
435 #define DP_RX_LIST_APPEND(head, tail, elem) \
436 	do {                                                \
437 		if (!(head)) {                              \
438 			(head) = (elem);                    \
439 		} else {                                    \
440 			qdf_nbuf_set_next((tail), (elem));  \
441 		}                                           \
442 		(tail) = (elem);                            \
443 		qdf_nbuf_set_next((tail), NULL);            \
444 	} while (0)
445 
446 #ifndef BUILD_X86
447 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
448 		qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
449 {
450 	return QDF_STATUS_SUCCESS;
451 }
452 #else
453 #define MAX_RETRY 100
454 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
455 		qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
456 {
457 	uint32_t nbuf_retry = 0;
458 	int32_t ret;
459 	const uint32_t x86_phy_addr = 0x50000000;
460 	/*
461 	 * in M2M emulation platforms (x86) the memory below 0x50000000
462 	 * is reserved for target use, so any memory allocated in this
463 	 * region should not be used by host
464 	 */
465 	do {
466 		if (qdf_likely(*paddr > x86_phy_addr))
467 			return QDF_STATUS_SUCCESS;
468 		else {
469 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
470 					"phy addr %pK exceded 0x50000000 trying again\n",
471 					paddr);
472 
473 			nbuf_retry++;
474 			if ((*rx_netbuf)) {
475 				qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
476 						QDF_DMA_BIDIRECTIONAL);
477 				/* Not freeing buffer intentionally.
478 				 * Observed that same buffer is getting
479 				 * re-allocated resulting in longer load time
480 				 * WMI init timeout.
481 				 * This buffer is anyway not useful so skip it.
482 				 **/
483 			}
484 
485 			*rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
486 							RX_BUFFER_SIZE,
487 							RX_BUFFER_RESERVATION,
488 							RX_BUFFER_ALIGNMENT,
489 							FALSE);
490 
491 			if (qdf_unlikely(!(*rx_netbuf)))
492 				return QDF_STATUS_E_FAILURE;
493 
494 			ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf,
495 							QDF_DMA_BIDIRECTIONAL);
496 
497 			if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
498 				qdf_nbuf_free(*rx_netbuf);
499 				*rx_netbuf = NULL;
500 				continue;
501 			}
502 
503 			*paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
504 		}
505 	} while (nbuf_retry < MAX_RETRY);
506 
507 	if ((*rx_netbuf)) {
508 		qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
509 					QDF_DMA_BIDIRECTIONAL);
510 		qdf_nbuf_free(*rx_netbuf);
511 	}
512 
513 	return QDF_STATUS_E_FAILURE;
514 }
515 #endif
516 
517 /**
518  * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
519  *				   the MSDU Link Descriptor
520  * @soc: core txrx main context
521  * @buf_info: buf_info include cookie that used to lookup virtual address of
522  * link descriptor Normally this is just an index into a per SOC array.
523  *
524  * This is the VA of the link descriptor, that HAL layer later uses to
525  * retrieve the list of MSDU's for a given MPDU.
526  *
527  * Return: void *: Virtual Address of the Rx descriptor
528  */
529 static inline
530 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
531 				  struct hal_buf_info *buf_info)
532 {
533 	void *link_desc_va;
534 	uint32_t bank_id = LINK_DESC_COOKIE_BANK_ID(buf_info->sw_cookie);
535 
536 
537 	/* TODO */
538 	/* Add sanity for  cookie */
539 
540 	link_desc_va = soc->link_desc_banks[bank_id].base_vaddr +
541 		(buf_info->paddr -
542 			soc->link_desc_banks[bank_id].base_paddr);
543 
544 	return link_desc_va;
545 }
546 
547 /**
548  * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of
549  *				   the MSDU Link Descriptor
550  * @pdev: core txrx pdev context
551  * @buf_info: buf_info includes cookie that used to lookup virtual address of
552  * link descriptor. Normally this is just an index into a per pdev array.
553  *
554  * This is the VA of the link descriptor in monitor mode destination ring,
555  * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU.
556  *
557  * Return: void *: Virtual Address of the Rx descriptor
558  */
559 static inline
560 void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev,
561 				  struct hal_buf_info *buf_info,
562 				  int mac_id)
563 {
564 	void *link_desc_va;
565 	int mac_for_pdev = dp_get_mac_id_for_mac(pdev->soc, mac_id);
566 
567 	/* TODO */
568 	/* Add sanity for  cookie */
569 
570 	link_desc_va =
571 	   pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_vaddr +
572 	   (buf_info->paddr -
573 	   pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_paddr);
574 
575 	return link_desc_va;
576 }
577 
578 /**
579  * dp_rx_defrag_concat() - Concatenate the fragments
580  *
581  * @dst: destination pointer to the buffer
582  * @src: source pointer from where the fragment payload is to be copied
583  *
584  * Return: QDF_STATUS
585  */
586 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
587 {
588 	/*
589 	 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
590 	 * to provide space for src, the headroom portion is copied from
591 	 * the original dst buffer to the larger new dst buffer.
592 	 * (This is needed, because the headroom of the dst buffer
593 	 * contains the rx desc.)
594 	 */
595 	if (qdf_nbuf_cat(dst, src))
596 		return QDF_STATUS_E_DEFRAG_ERROR;
597 
598 	return QDF_STATUS_SUCCESS;
599 }
600 
601 /*
602  * dp_rx_ast_set_active() - set the active flag of the astentry
603  *				    corresponding to a hw index.
604  * @soc: core txrx main context
605  * @sa_idx: hw idx
606  * @is_active: active flag
607  *
608  */
609 #ifdef FEATURE_WDS
610 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
611 {
612 	struct dp_ast_entry *ast;
613 	qdf_spin_lock_bh(&soc->ast_lock);
614 	ast = soc->ast_table[sa_idx];
615 
616 	/*
617 	 * Ensure we are updating the right AST entry by
618 	 * validating ast_idx.
619 	 * There is a possibility we might arrive here without
620 	 * AST MAP event , so this check is mandatory
621 	 */
622 	if (ast && (ast->ast_idx == sa_idx)) {
623 		ast->is_active = is_active;
624 		qdf_spin_unlock_bh(&soc->ast_lock);
625 		return QDF_STATUS_SUCCESS;
626 	}
627 
628 	qdf_spin_unlock_bh(&soc->ast_lock);
629 	return QDF_STATUS_E_FAILURE;
630 }
631 #else
632 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
633 {
634 	return QDF_STATUS_SUCCESS;
635 }
636 #endif
637 
638 /*
639  * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
640  *					In qwrap mode, packets originated from
641  *					any vdev should not loopback and
642  *					should be dropped.
643  * @vdev: vdev on which rx packet is received
644  * @nbuf: rx pkt
645  *
646  */
647 #if ATH_SUPPORT_WRAP
648 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
649 						qdf_nbuf_t nbuf)
650 {
651 	struct dp_vdev *psta_vdev;
652 	struct dp_pdev *pdev = vdev->pdev;
653 	uint8_t *data = qdf_nbuf_data(nbuf);
654 
655 	if (qdf_unlikely(vdev->proxysta_vdev)) {
656 		/* In qwrap isolation mode, allow loopback packets as all
657 		 * packets go to RootAP and Loopback on the mpsta.
658 		 */
659 		if (vdev->isolation_vdev)
660 			return false;
661 		TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
662 			if (qdf_unlikely(psta_vdev->proxysta_vdev &&
663 				!qdf_mem_cmp(psta_vdev->mac_addr.raw,
664 				&data[DP_MAC_ADDR_LEN], DP_MAC_ADDR_LEN))) {
665 				/* Drop packet if source address is equal to
666 				 * any of the vdev addresses.
667 				 */
668 				return true;
669 			}
670 		}
671 	}
672 	return false;
673 }
674 #else
675 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
676 						qdf_nbuf_t nbuf)
677 {
678 	return false;
679 }
680 #endif
681 
682 /*
683  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
684  *			       called during dp rx initialization
685  *			       and at the end of dp_rx_process.
686  *
687  * @soc: core txrx main context
688  * @mac_id: mac_id which is one of 3 mac_ids
689  * @dp_rxdma_srng: dp rxdma circular ring
690  * @rx_desc_pool: Poiter to free Rx descriptor pool
691  * @num_req_buffers: number of buffer to be replenished
692  * @desc_list: list of descs if called from dp_rx_process
693  *	       or NULL during dp rx initialization or out of buffer
694  *	       interrupt.
695  * @tail: tail of descs list
696  * Return: return success or failure
697  */
698 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
699 				 struct dp_srng *dp_rxdma_srng,
700 				 struct rx_desc_pool *rx_desc_pool,
701 				 uint32_t num_req_buffers,
702 				 union dp_rx_desc_list_elem_t **desc_list,
703 				 union dp_rx_desc_list_elem_t **tail);
704 
705 /**
706  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
707  *			      (WBM), following error handling
708  *
709  * @soc: core DP main context
710  * @buf_addr_info: opaque pointer to the REO error ring descriptor
711  * @buf_addr_info: void pointer to the buffer_addr_info
712  * @bm_action: put to idle_list or release to msdu_list
713  * Return: QDF_STATUS
714  */
715 QDF_STATUS
716 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action);
717 
718 QDF_STATUS
719 dp_rx_link_desc_buf_return(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng,
720 				void *buf_addr_info, uint8_t bm_action);
721 /**
722  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
723  *					(WBM) by address
724  *
725  * @soc: core DP main context
726  * @link_desc_addr: link descriptor addr
727  *
728  * Return: QDF_STATUS
729  */
730 QDF_STATUS
731 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr,
732 					uint8_t bm_action);
733 
734 uint32_t
735 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id,
736 						uint32_t quota);
737 
738 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
739 				uint8_t *rx_tlv_hdr, struct dp_peer *peer);
740 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
741 					uint8_t *rx_tlv_hdr);
742 
743 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
744 				struct dp_peer *peer, int rx_mcast);
745 
746 qdf_nbuf_t
747 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev);
748 
749 #endif /* _DP_RX_H */
750