xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h (revision 27d564647e9b50e713c60b0d7e5ea2a9b0a3ae74)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_RX_H
20 #define _DP_RX_H
21 
22 #include "hal_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 
27 #ifdef RXDMA_OPTIMIZATION
28 #define RX_BUFFER_ALIGNMENT     128
29 #else /* RXDMA_OPTIMIZATION */
30 #define RX_BUFFER_ALIGNMENT     4
31 #endif /* RXDMA_OPTIMIZATION */
32 
33 #ifdef QCA_HOST2FW_RXBUF_RING
34 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW1_BM
35 #else
36 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW3_BM
37 #endif
38 #define RX_BUFFER_SIZE			2048
39 #define RX_BUFFER_RESERVATION   0
40 
41 #define DP_PEER_METADATA_PEER_ID_MASK	0x0000ffff
42 #define DP_PEER_METADATA_PEER_ID_SHIFT	0
43 #define DP_PEER_METADATA_VDEV_ID_MASK	0x00070000
44 #define DP_PEER_METADATA_VDEV_ID_SHIFT	16
45 
46 #define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata)		\
47 	(((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK)	\
48 			>> DP_PEER_METADATA_PEER_ID_SHIFT)
49 
50 #define DP_PEER_METADATA_ID_GET(_peer_metadata)			\
51 	(((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK)	\
52 			>> DP_PEER_METADATA_VDEV_ID_SHIFT)
53 
54 #define DP_RX_DESC_MAGIC 0xdec0de
55 
56 /**
57  * struct dp_rx_desc
58  *
59  * @nbuf		: VA of the "skb" posted
60  * @rx_buf_start	: VA of the original Rx buffer, before
61  *			  movement of any skb->data pointer
62  * @cookie		: index into the sw array which holds
63  *			  the sw Rx descriptors
64  *			  Cookie space is 21 bits:
65  *			  lower 18 bits -- index
66  *			  upper  3 bits -- pool_id
67  * @pool_id		: pool Id for which this allocated.
68  *			  Can only be used if there is no flow
69  *			  steering
70  * @in_use		  rx_desc is in use
71  * @unmapped		  used to mark rx_desc an unmapped if the corresponding
72  *			  nbuf is already unmapped
73  */
74 struct dp_rx_desc {
75 	qdf_nbuf_t nbuf;
76 	uint8_t *rx_buf_start;
77 	uint32_t cookie;
78 	uint8_t	 pool_id;
79 #ifdef RX_DESC_DEBUG_CHECK
80 	uint32_t magic;
81 #endif
82 	uint8_t	in_use:1,
83 	unmapped:1;
84 };
85 
86 #define RX_DESC_COOKIE_INDEX_SHIFT		0
87 #define RX_DESC_COOKIE_INDEX_MASK		0x3ffff /* 18 bits */
88 #define RX_DESC_COOKIE_POOL_ID_SHIFT		18
89 #define RX_DESC_COOKIE_POOL_ID_MASK		0x1c0000
90 
91 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie)		\
92 	(((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >>	\
93 			RX_DESC_COOKIE_POOL_ID_SHIFT)
94 
95 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie)		\
96 	(((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >>	\
97 			RX_DESC_COOKIE_INDEX_SHIFT)
98 
99 /*
100  *dp_rx_xor_block() - xor block of data
101  *@b: destination data block
102  *@a: source data block
103  *@len: length of the data to process
104  *
105  *Returns: None
106  */
107 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
108 {
109 	qdf_size_t i;
110 
111 	for (i = 0; i < len; i++)
112 		b[i] ^= a[i];
113 }
114 
115 /*
116  *dp_rx_rotl() - rotate the bits left
117  *@val: unsigned integer input value
118  *@bits: number of bits
119  *
120  *Returns: Integer with left rotated by number of 'bits'
121  */
122 static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
123 {
124 	return (val << bits) | (val >> (32 - bits));
125 }
126 
127 /*
128  *dp_rx_rotr() - rotate the bits right
129  *@val: unsigned integer input value
130  *@bits: number of bits
131  *
132  *Returns: Integer with right rotated by number of 'bits'
133  */
134 static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
135 {
136 	return (val >> bits) | (val << (32 - bits));
137 }
138 
139 /*
140  * dp_set_rx_queue() - set queue_mapping in skb
141  * @nbuf: skb
142  * @queue_id: rx queue_id
143  *
144  * Return: void
145  */
146 #ifdef QCA_OL_RX_MULTIQ_SUPPORT
147 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
148 {
149 	qdf_nbuf_record_rx_queue(nbuf, queue_id);
150 	return;
151 }
152 #else
153 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
154 {
155 }
156 #endif
157 
158 /*
159  *dp_rx_xswap() - swap the bits left
160  *@val: unsigned integer input value
161  *
162  *Returns: Integer with bits swapped
163  */
164 static inline uint32_t dp_rx_xswap(uint32_t val)
165 {
166 	return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
167 }
168 
169 /*
170  *dp_rx_get_le32_split() - get little endian 32 bits split
171  *@b0: byte 0
172  *@b1: byte 1
173  *@b2: byte 2
174  *@b3: byte 3
175  *
176  *Returns: Integer with split little endian 32 bits
177  */
178 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
179 					uint8_t b3)
180 {
181 	return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
182 }
183 
184 /*
185  *dp_rx_get_le32() - get little endian 32 bits
186  *@b0: byte 0
187  *@b1: byte 1
188  *@b2: byte 2
189  *@b3: byte 3
190  *
191  *Returns: Integer with little endian 32 bits
192  */
193 static inline uint32_t dp_rx_get_le32(const uint8_t *p)
194 {
195 	return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
196 }
197 
198 /*
199  * dp_rx_put_le32() - put little endian 32 bits
200  * @p: destination char array
201  * @v: source 32-bit integer
202  *
203  * Returns: None
204  */
205 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
206 {
207 	p[0] = (v) & 0xff;
208 	p[1] = (v >> 8) & 0xff;
209 	p[2] = (v >> 16) & 0xff;
210 	p[3] = (v >> 24) & 0xff;
211 }
212 
213 /* Extract michal mic block of data */
214 #define dp_rx_michael_block(l, r)	\
215 	do {					\
216 		r ^= dp_rx_rotl(l, 17);	\
217 		l += r;				\
218 		r ^= dp_rx_xswap(l);		\
219 		l += r;				\
220 		r ^= dp_rx_rotl(l, 3);	\
221 		l += r;				\
222 		r ^= dp_rx_rotr(l, 2);	\
223 		l += r;				\
224 	} while (0)
225 
226 /**
227  * struct dp_rx_desc_list_elem_t
228  *
229  * @next		: Next pointer to form free list
230  * @rx_desc		: DP Rx descriptor
231  */
232 union dp_rx_desc_list_elem_t {
233 	union dp_rx_desc_list_elem_t *next;
234 	struct dp_rx_desc rx_desc;
235 };
236 
237 /**
238  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
239  *			 the Rx descriptor on Rx DMA source ring buffer
240  * @soc: core txrx main context
241  * @cookie: cookie used to lookup virtual address
242  *
243  * Return: void *: Virtual Address of the Rx descriptor
244  */
245 static inline
246 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
247 {
248 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
249 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
250 	struct rx_desc_pool *rx_desc_pool;
251 
252 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
253 		return NULL;
254 
255 	rx_desc_pool = &soc->rx_desc_buf[pool_id];
256 
257 	if (qdf_unlikely(index >= rx_desc_pool->pool_size))
258 		return NULL;
259 
260 	return &(soc->rx_desc_buf[pool_id].array[index].rx_desc);
261 }
262 
263 /**
264  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
265  *			 the Rx descriptor on monitor ring buffer
266  * @soc: core txrx main context
267  * @cookie: cookie used to lookup virtual address
268  *
269  * Return: void *: Virtual Address of the Rx descriptor
270  */
271 static inline
272 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
273 {
274 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
275 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
276 	/* TODO */
277 	/* Add sanity for pool_id & index */
278 	return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
279 }
280 
281 /**
282  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
283  *			 the Rx descriptor on monitor status ring buffer
284  * @soc: core txrx main context
285  * @cookie: cookie used to lookup virtual address
286  *
287  * Return: void *: Virtual Address of the Rx descriptor
288  */
289 static inline
290 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
291 {
292 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
293 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
294 	/* TODO */
295 	/* Add sanity for pool_id & index */
296 	return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
297 }
298 
299 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
300 				union dp_rx_desc_list_elem_t **local_desc_list,
301 				union dp_rx_desc_list_elem_t **tail,
302 				uint16_t pool_id,
303 				struct rx_desc_pool *rx_desc_pool);
304 
305 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
306 				struct rx_desc_pool *rx_desc_pool,
307 				uint16_t num_descs,
308 				union dp_rx_desc_list_elem_t **desc_list,
309 				union dp_rx_desc_list_elem_t **tail);
310 
311 
312 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
313 
314 void dp_rx_pdev_detach(struct dp_pdev *pdev);
315 
316 
317 uint32_t
318 dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint8_t reo_ring_num,
319 	      uint32_t quota);
320 
321 uint32_t dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota);
322 
323 uint32_t
324 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota);
325 
326 /**
327  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
328  *		     multiple nbufs.
329  * @nbuf: pointer to the first msdu of an amsdu.
330  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
331  *
332  * This function implements the creation of RX frag_list for cases
333  * where an MSDU is spread across multiple nbufs.
334  *
335  * Return: returns the head nbuf which contains complete frag_list.
336  */
337 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr);
338 
339 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
340 				uint32_t pool_id,
341 				uint32_t pool_size,
342 				struct rx_desc_pool *rx_desc_pool);
343 
344 void dp_rx_desc_pool_free(struct dp_soc *soc,
345 				uint32_t pool_id,
346 				struct rx_desc_pool *rx_desc_pool);
347 
348 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
349 				struct dp_peer *peer);
350 
351 /**
352  * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
353  *
354  * @head: pointer to the head of local free list
355  * @tail: pointer to the tail of local free list
356  * @new: new descriptor that is added to the free list
357  *
358  * Return: void:
359  */
360 static inline
361 void dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
362 				 union dp_rx_desc_list_elem_t **tail,
363 				 struct dp_rx_desc *new)
364 {
365 	qdf_assert(head && new);
366 
367 	new->nbuf = NULL;
368 	new->in_use = 0;
369 	new->unmapped = 0;
370 
371 	((union dp_rx_desc_list_elem_t *)new)->next = *head;
372 	*head = (union dp_rx_desc_list_elem_t *)new;
373 	if (*tail == NULL)
374 		*tail = *head;
375 
376 }
377 
378 /**
379  * dp_rx_wds_srcport_learn() - Add or update the STA PEER which
380  *				is behind the WDS repeater.
381  *
382  * @soc: core txrx main context
383  * @rx_tlv_hdr: base address of RX TLV header
384  * @ta_peer: WDS repeater peer
385  * @nbuf: rx pkt
386  *
387  * Return: void:
388  */
389 #ifdef FEATURE_WDS
390 static inline void
391 dp_rx_wds_srcport_learn(struct dp_soc *soc,
392 			 uint8_t *rx_tlv_hdr,
393 			 struct dp_peer *ta_peer,
394 			 qdf_nbuf_t nbuf)
395 {
396 	uint16_t sa_sw_peer_id = hal_rx_msdu_end_sa_sw_peer_id_get(rx_tlv_hdr);
397 	uint32_t flags = IEEE80211_NODE_F_WDS_HM;
398 	uint32_t ret = 0;
399 	uint8_t wds_src_mac[IEEE80211_ADDR_LEN];
400 	struct dp_peer *sa_peer;
401 	struct dp_ast_entry *ast;
402 	uint16_t sa_idx;
403 
404 	if (qdf_unlikely(!ta_peer))
405 		return;
406 
407 	/* For AP mode : Do wds source port learning only if it is a
408 	 * 4-address mpdu
409 	 *
410 	 * For STA mode : Frames from RootAP backend will be in 3-address mode,
411 	 * till RootAP does the WDS source port learning; Hence in repeater/STA
412 	 * mode, we enable learning even in 3-address mode , to avoid RootAP
413 	 * backbone getting wrongly learnt as MEC on repeater
414 	 */
415 	if (ta_peer->vdev->opmode != wlan_op_mode_sta) {
416 		if (!(qdf_nbuf_is_rx_chfrag_start(nbuf) &&
417 		      hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr)))
418 			return;
419 	} else {
420 		/* For HKv2 Source port learing is not needed in STA mode
421 		 * as we have support in HW
422 		 */
423 		if (soc->ast_override_support)
424 			return;
425 	}
426 
427 	memcpy(wds_src_mac, (qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN),
428 		IEEE80211_ADDR_LEN);
429 
430 	if (qdf_unlikely(!hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr))) {
431 		ret = dp_peer_add_ast(soc,
432 					ta_peer,
433 					wds_src_mac,
434 					CDP_TXRX_AST_TYPE_WDS,
435 					flags);
436 		return;
437 	}
438 
439 	/*
440 	 * Get the AST entry from HW SA index and mark it as active
441 	 */
442 	sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr);
443 
444 	qdf_spin_lock_bh(&soc->ast_lock);
445 	ast = soc->ast_table[sa_idx];
446 
447 	if (!ast) {
448 		qdf_spin_unlock_bh(&soc->ast_lock);
449 		return;
450 	}
451 
452 	qdf_spin_unlock_bh(&soc->ast_lock);
453 
454 	if ((ast->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
455 	    (ast->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
456 		return;
457 
458 	/*
459 	 * Ensure we are updating the right AST entry by
460 	 * validating ast_idx.
461 	 * There is a possibility we might arrive here without
462 	 * AST MAP event , so this check is mandatory
463 	 */
464 	if (ast->ast_idx == sa_idx)
465 		ast->is_active = TRUE;
466 
467 	if (sa_sw_peer_id != ta_peer->peer_ids[0]) {
468 		sa_peer = ast->peer;
469 
470 		if ((ast->type != CDP_TXRX_AST_TYPE_STATIC) &&
471 		    (ast->type != CDP_TXRX_AST_TYPE_SELF) &&
472 			(ast->type != CDP_TXRX_AST_TYPE_STA_BSS)) {
473 			if (ast->pdev_id != ta_peer->vdev->pdev->pdev_id) {
474 				ret = dp_peer_add_ast(soc,
475 						      ta_peer, wds_src_mac,
476 						      CDP_TXRX_AST_TYPE_WDS,
477 						      flags);
478 			} else {
479 				qdf_spin_lock_bh(&soc->ast_lock);
480 				dp_peer_update_ast(soc, ta_peer, ast, flags);
481 				qdf_spin_unlock_bh(&soc->ast_lock);
482 				return;
483 			}
484 		}
485 		/*
486 		 * Do not kickout STA if it belongs to a different radio.
487 		 * For DBDC repeater, it is possible to arrive here
488 		 * for multicast loopback frames originated from connected
489 		 * clients and looped back (intrabss) by Root AP
490 		 */
491 		if (ast->pdev_id != ta_peer->vdev->pdev->pdev_id) {
492 			return;
493 		}
494 
495 		/*
496 		 * Kickout, when direct associated peer(SA) roams
497 		 * to another AP and reachable via TA peer
498 		 */
499 		if ((sa_peer->vdev->opmode == wlan_op_mode_ap) &&
500 		    !sa_peer->delete_in_progress) {
501 			sa_peer->delete_in_progress = true;
502 			if (soc->cdp_soc.ol_ops->peer_sta_kickout) {
503 				soc->cdp_soc.ol_ops->peer_sta_kickout(
504 						sa_peer->vdev->pdev->ctrl_pdev,
505 						wds_src_mac);
506 			}
507 		}
508 	}
509 
510 	return;
511 }
512 #else
513 static inline void
514 dp_rx_wds_srcport_learn(struct dp_soc *soc,
515 		uint8_t *rx_tlv_hdr,
516 		struct dp_peer *ta_peer,
517 		qdf_nbuf_t nbuf)
518 {
519 }
520 #endif
521 
522 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf);
523 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
524 		qdf_nbuf_t mpdu, bool mpdu_done);
525 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
526 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer);
527 
528 #define DP_RX_LIST_APPEND(head, tail, elem) \
529 	do {                                                          \
530 		if (!(head)) {                                        \
531 			(head) = (elem);                              \
532 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\
533 		} else {                                              \
534 			qdf_nbuf_set_next((tail), (elem));            \
535 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++;  \
536 		}                                                     \
537 		(tail) = (elem);                                      \
538 		qdf_nbuf_set_next((tail), NULL);                      \
539 	} while (0)
540 
541 #ifndef BUILD_X86
542 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
543 		qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
544 {
545 	return QDF_STATUS_SUCCESS;
546 }
547 #else
548 #define MAX_RETRY 100
549 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
550 		qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
551 {
552 	uint32_t nbuf_retry = 0;
553 	int32_t ret;
554 	const uint32_t x86_phy_addr = 0x50000000;
555 	/*
556 	 * in M2M emulation platforms (x86) the memory below 0x50000000
557 	 * is reserved for target use, so any memory allocated in this
558 	 * region should not be used by host
559 	 */
560 	do {
561 		if (qdf_likely(*paddr > x86_phy_addr))
562 			return QDF_STATUS_SUCCESS;
563 		else {
564 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
565 					"phy addr %pK exceeded 0x50000000 trying again",
566 					paddr);
567 
568 			nbuf_retry++;
569 			if ((*rx_netbuf)) {
570 				qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
571 						QDF_DMA_BIDIRECTIONAL);
572 				/* Not freeing buffer intentionally.
573 				 * Observed that same buffer is getting
574 				 * re-allocated resulting in longer load time
575 				 * WMI init timeout.
576 				 * This buffer is anyway not useful so skip it.
577 				 **/
578 			}
579 
580 			*rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
581 							RX_BUFFER_SIZE,
582 							RX_BUFFER_RESERVATION,
583 							RX_BUFFER_ALIGNMENT,
584 							FALSE);
585 
586 			if (qdf_unlikely(!(*rx_netbuf)))
587 				return QDF_STATUS_E_FAILURE;
588 
589 			ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf,
590 							QDF_DMA_BIDIRECTIONAL);
591 
592 			if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
593 				qdf_nbuf_free(*rx_netbuf);
594 				*rx_netbuf = NULL;
595 				continue;
596 			}
597 
598 			*paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
599 		}
600 	} while (nbuf_retry < MAX_RETRY);
601 
602 	if ((*rx_netbuf)) {
603 		qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
604 					QDF_DMA_BIDIRECTIONAL);
605 		qdf_nbuf_free(*rx_netbuf);
606 	}
607 
608 	return QDF_STATUS_E_FAILURE;
609 }
610 #endif
611 
612 /**
613  * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
614  *				   the MSDU Link Descriptor
615  * @soc: core txrx main context
616  * @buf_info: buf_info include cookie that used to lookup virtual address of
617  * link descriptor Normally this is just an index into a per SOC array.
618  *
619  * This is the VA of the link descriptor, that HAL layer later uses to
620  * retrieve the list of MSDU's for a given MPDU.
621  *
622  * Return: void *: Virtual Address of the Rx descriptor
623  */
624 static inline
625 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
626 				  struct hal_buf_info *buf_info)
627 {
628 	void *link_desc_va;
629 	uint32_t bank_id = LINK_DESC_COOKIE_BANK_ID(buf_info->sw_cookie);
630 
631 
632 	/* TODO */
633 	/* Add sanity for  cookie */
634 
635 	link_desc_va = soc->link_desc_banks[bank_id].base_vaddr +
636 		(buf_info->paddr -
637 			soc->link_desc_banks[bank_id].base_paddr);
638 
639 	return link_desc_va;
640 }
641 
642 /**
643  * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of
644  *				   the MSDU Link Descriptor
645  * @pdev: core txrx pdev context
646  * @buf_info: buf_info includes cookie that used to lookup virtual address of
647  * link descriptor. Normally this is just an index into a per pdev array.
648  *
649  * This is the VA of the link descriptor in monitor mode destination ring,
650  * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU.
651  *
652  * Return: void *: Virtual Address of the Rx descriptor
653  */
654 static inline
655 void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev,
656 				  struct hal_buf_info *buf_info,
657 				  int mac_id)
658 {
659 	void *link_desc_va;
660 	int mac_for_pdev = dp_get_mac_id_for_mac(pdev->soc, mac_id);
661 
662 	/* TODO */
663 	/* Add sanity for  cookie */
664 
665 	link_desc_va =
666 	   pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_vaddr +
667 	   (buf_info->paddr -
668 	   pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_paddr);
669 
670 	return link_desc_va;
671 }
672 
673 /**
674  * dp_rx_defrag_concat() - Concatenate the fragments
675  *
676  * @dst: destination pointer to the buffer
677  * @src: source pointer from where the fragment payload is to be copied
678  *
679  * Return: QDF_STATUS
680  */
681 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
682 {
683 	/*
684 	 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
685 	 * to provide space for src, the headroom portion is copied from
686 	 * the original dst buffer to the larger new dst buffer.
687 	 * (This is needed, because the headroom of the dst buffer
688 	 * contains the rx desc.)
689 	 */
690 	if (qdf_nbuf_cat(dst, src))
691 		return QDF_STATUS_E_DEFRAG_ERROR;
692 
693 	return QDF_STATUS_SUCCESS;
694 }
695 
696 /*
697  * dp_rx_ast_set_active() - set the active flag of the astentry
698  *				    corresponding to a hw index.
699  * @soc: core txrx main context
700  * @sa_idx: hw idx
701  * @is_active: active flag
702  *
703  */
704 #ifdef FEATURE_WDS
705 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
706 {
707 	struct dp_ast_entry *ast;
708 	qdf_spin_lock_bh(&soc->ast_lock);
709 	ast = soc->ast_table[sa_idx];
710 
711 	/*
712 	 * Ensure we are updating the right AST entry by
713 	 * validating ast_idx.
714 	 * There is a possibility we might arrive here without
715 	 * AST MAP event , so this check is mandatory
716 	 */
717 	if (ast && (ast->ast_idx == sa_idx)) {
718 		ast->is_active = is_active;
719 		qdf_spin_unlock_bh(&soc->ast_lock);
720 		return QDF_STATUS_SUCCESS;
721 	}
722 
723 	qdf_spin_unlock_bh(&soc->ast_lock);
724 	return QDF_STATUS_E_FAILURE;
725 }
726 #else
727 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
728 {
729 	return QDF_STATUS_SUCCESS;
730 }
731 #endif
732 
733 /*
734  * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
735  *					In qwrap mode, packets originated from
736  *					any vdev should not loopback and
737  *					should be dropped.
738  * @vdev: vdev on which rx packet is received
739  * @nbuf: rx pkt
740  *
741  */
742 #if ATH_SUPPORT_WRAP
743 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
744 						qdf_nbuf_t nbuf)
745 {
746 	struct dp_vdev *psta_vdev;
747 	struct dp_pdev *pdev = vdev->pdev;
748 	struct dp_soc *soc = pdev->soc;
749 	uint8_t *data = qdf_nbuf_data(nbuf);
750 	uint8_t i;
751 
752 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
753 		pdev = soc->pdev_list[i];
754 		if (qdf_unlikely(vdev->proxysta_vdev)) {
755 			/* In qwrap isolation mode, allow loopback packets as all
756 			 * packets go to RootAP and Loopback on the mpsta.
757 			 */
758 			if (vdev->isolation_vdev)
759 				return false;
760 			TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
761 				if (qdf_unlikely(psta_vdev->proxysta_vdev &&
762 					!qdf_mem_cmp(psta_vdev->mac_addr.raw,
763 					&data[DP_MAC_ADDR_LEN], DP_MAC_ADDR_LEN))) {
764 					/* Drop packet if source address is equal to
765 					 * any of the vdev addresses.
766 					 */
767 					return true;
768 				}
769 			}
770 		}
771 	}
772 	return false;
773 }
774 #else
775 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
776 						qdf_nbuf_t nbuf)
777 {
778 	return false;
779 }
780 #endif
781 
782 /*
783  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
784  *			       called during dp rx initialization
785  *			       and at the end of dp_rx_process.
786  *
787  * @soc: core txrx main context
788  * @mac_id: mac_id which is one of 3 mac_ids
789  * @dp_rxdma_srng: dp rxdma circular ring
790  * @rx_desc_pool: Pointer to free Rx descriptor pool
791  * @num_req_buffers: number of buffer to be replenished
792  * @desc_list: list of descs if called from dp_rx_process
793  *	       or NULL during dp rx initialization or out of buffer
794  *	       interrupt.
795  * @tail: tail of descs list
796  * Return: return success or failure
797  */
798 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
799 				 struct dp_srng *dp_rxdma_srng,
800 				 struct rx_desc_pool *rx_desc_pool,
801 				 uint32_t num_req_buffers,
802 				 union dp_rx_desc_list_elem_t **desc_list,
803 				 union dp_rx_desc_list_elem_t **tail);
804 
805 /**
806  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
807  *			      (WBM), following error handling
808  *
809  * @soc: core DP main context
810  * @buf_addr_info: opaque pointer to the REO error ring descriptor
811  * @buf_addr_info: void pointer to the buffer_addr_info
812  * @bm_action: put to idle_list or release to msdu_list
813  * Return: QDF_STATUS
814  */
815 QDF_STATUS
816 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action);
817 
818 QDF_STATUS
819 dp_rx_link_desc_buf_return(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng,
820 				void *buf_addr_info, uint8_t bm_action);
821 /**
822  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
823  *					(WBM) by address
824  *
825  * @soc: core DP main context
826  * @link_desc_addr: link descriptor addr
827  *
828  * Return: QDF_STATUS
829  */
830 QDF_STATUS
831 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr,
832 					uint8_t bm_action);
833 
834 uint32_t
835 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id,
836 						uint32_t quota);
837 
838 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
839 				uint8_t *rx_tlv_hdr, struct dp_peer *peer);
840 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
841 					uint8_t *rx_tlv_hdr);
842 
843 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
844 				struct dp_peer *peer, int rx_mcast);
845 
846 qdf_nbuf_t
847 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev);
848 
849 #endif /* _DP_RX_H */
850