xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h (revision 8ddef7dd9a290d4a9b1efd5d3efacf51d78a1a0d)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_RX_H
20 #define _DP_RX_H
21 
22 #include "hal_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 
27 #ifdef RXDMA_OPTIMIZATION
28 #define RX_BUFFER_ALIGNMENT     128
29 #else /* RXDMA_OPTIMIZATION */
30 #define RX_BUFFER_ALIGNMENT     4
31 #endif /* RXDMA_OPTIMIZATION */
32 
33 #ifdef QCA_HOST2FW_RXBUF_RING
34 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW1_BM
35 
36 /**
37  * For MCL cases, allocate as many RX descriptors as buffers in the SW2RXDMA
38  * ring. This value may need to be tuned later.
39  */
40 #define DP_RX_DESC_ALLOC_MULTIPLIER 1
41 #else
42 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW3_BM
43 
44 /**
45  * AP use cases need to allocate more RX Descriptors than the number of
46  * entries avaialable in the SW2RXDMA buffer replenish ring. This is to account
47  * for frames sitting in REO queues, HW-HW DMA rings etc. Hence using a
48  * multiplication factor of 3, to allocate three times as many RX descriptors
49  * as RX buffers.
50  */
51 #define DP_RX_DESC_ALLOC_MULTIPLIER 3
52 #endif /* QCA_HOST2FW_RXBUF_RING */
53 
54 #define RX_BUFFER_SIZE			2048
55 #define RX_BUFFER_RESERVATION   0
56 
57 #define DP_PEER_METADATA_PEER_ID_MASK	0x0000ffff
58 #define DP_PEER_METADATA_PEER_ID_SHIFT	0
59 #define DP_PEER_METADATA_VDEV_ID_MASK	0x00070000
60 #define DP_PEER_METADATA_VDEV_ID_SHIFT	16
61 
62 #define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata)		\
63 	(((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK)	\
64 			>> DP_PEER_METADATA_PEER_ID_SHIFT)
65 
66 #define DP_PEER_METADATA_ID_GET(_peer_metadata)			\
67 	(((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK)	\
68 			>> DP_PEER_METADATA_VDEV_ID_SHIFT)
69 
70 #define DP_RX_DESC_MAGIC 0xdec0de
71 
72 /**
73  * struct dp_rx_desc
74  *
75  * @nbuf		: VA of the "skb" posted
76  * @rx_buf_start	: VA of the original Rx buffer, before
77  *			  movement of any skb->data pointer
78  * @cookie		: index into the sw array which holds
79  *			  the sw Rx descriptors
80  *			  Cookie space is 21 bits:
81  *			  lower 18 bits -- index
82  *			  upper  3 bits -- pool_id
83  * @pool_id		: pool Id for which this allocated.
84  *			  Can only be used if there is no flow
85  *			  steering
86  * @in_use		  rx_desc is in use
87  * @unmapped		  used to mark rx_desc an unmapped if the corresponding
88  *			  nbuf is already unmapped
89  */
90 struct dp_rx_desc {
91 	qdf_nbuf_t nbuf;
92 	uint8_t *rx_buf_start;
93 	uint32_t cookie;
94 	uint8_t	 pool_id;
95 #ifdef RX_DESC_DEBUG_CHECK
96 	uint32_t magic;
97 #endif
98 	uint8_t	in_use:1,
99 	unmapped:1;
100 };
101 
102 #define RX_DESC_COOKIE_INDEX_SHIFT		0
103 #define RX_DESC_COOKIE_INDEX_MASK		0x3ffff /* 18 bits */
104 #define RX_DESC_COOKIE_POOL_ID_SHIFT		18
105 #define RX_DESC_COOKIE_POOL_ID_MASK		0x1c0000
106 
107 #define DP_RX_DESC_COOKIE_MAX	\
108 	(RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK)
109 
110 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie)		\
111 	(((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >>	\
112 			RX_DESC_COOKIE_POOL_ID_SHIFT)
113 
114 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie)		\
115 	(((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >>	\
116 			RX_DESC_COOKIE_INDEX_SHIFT)
117 
118 /*
119  *dp_rx_xor_block() - xor block of data
120  *@b: destination data block
121  *@a: source data block
122  *@len: length of the data to process
123  *
124  *Returns: None
125  */
126 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
127 {
128 	qdf_size_t i;
129 
130 	for (i = 0; i < len; i++)
131 		b[i] ^= a[i];
132 }
133 
134 /*
135  *dp_rx_rotl() - rotate the bits left
136  *@val: unsigned integer input value
137  *@bits: number of bits
138  *
139  *Returns: Integer with left rotated by number of 'bits'
140  */
141 static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
142 {
143 	return (val << bits) | (val >> (32 - bits));
144 }
145 
146 /*
147  *dp_rx_rotr() - rotate the bits right
148  *@val: unsigned integer input value
149  *@bits: number of bits
150  *
151  *Returns: Integer with right rotated by number of 'bits'
152  */
153 static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
154 {
155 	return (val >> bits) | (val << (32 - bits));
156 }
157 
158 /*
159  * dp_set_rx_queue() - set queue_mapping in skb
160  * @nbuf: skb
161  * @queue_id: rx queue_id
162  *
163  * Return: void
164  */
165 #ifdef QCA_OL_RX_MULTIQ_SUPPORT
166 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
167 {
168 	qdf_nbuf_record_rx_queue(nbuf, queue_id);
169 	return;
170 }
171 #else
172 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
173 {
174 }
175 #endif
176 
177 /*
178  *dp_rx_xswap() - swap the bits left
179  *@val: unsigned integer input value
180  *
181  *Returns: Integer with bits swapped
182  */
183 static inline uint32_t dp_rx_xswap(uint32_t val)
184 {
185 	return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
186 }
187 
188 /*
189  *dp_rx_get_le32_split() - get little endian 32 bits split
190  *@b0: byte 0
191  *@b1: byte 1
192  *@b2: byte 2
193  *@b3: byte 3
194  *
195  *Returns: Integer with split little endian 32 bits
196  */
197 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
198 					uint8_t b3)
199 {
200 	return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
201 }
202 
203 /*
204  *dp_rx_get_le32() - get little endian 32 bits
205  *@b0: byte 0
206  *@b1: byte 1
207  *@b2: byte 2
208  *@b3: byte 3
209  *
210  *Returns: Integer with little endian 32 bits
211  */
212 static inline uint32_t dp_rx_get_le32(const uint8_t *p)
213 {
214 	return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
215 }
216 
217 /*
218  * dp_rx_put_le32() - put little endian 32 bits
219  * @p: destination char array
220  * @v: source 32-bit integer
221  *
222  * Returns: None
223  */
224 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
225 {
226 	p[0] = (v) & 0xff;
227 	p[1] = (v >> 8) & 0xff;
228 	p[2] = (v >> 16) & 0xff;
229 	p[3] = (v >> 24) & 0xff;
230 }
231 
232 /* Extract michal mic block of data */
233 #define dp_rx_michael_block(l, r)	\
234 	do {					\
235 		r ^= dp_rx_rotl(l, 17);	\
236 		l += r;				\
237 		r ^= dp_rx_xswap(l);		\
238 		l += r;				\
239 		r ^= dp_rx_rotl(l, 3);	\
240 		l += r;				\
241 		r ^= dp_rx_rotr(l, 2);	\
242 		l += r;				\
243 	} while (0)
244 
245 /**
246  * struct dp_rx_desc_list_elem_t
247  *
248  * @next		: Next pointer to form free list
249  * @rx_desc		: DP Rx descriptor
250  */
251 union dp_rx_desc_list_elem_t {
252 	union dp_rx_desc_list_elem_t *next;
253 	struct dp_rx_desc rx_desc;
254 };
255 
256 /**
257  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
258  *			 the Rx descriptor on Rx DMA source ring buffer
259  * @soc: core txrx main context
260  * @cookie: cookie used to lookup virtual address
261  *
262  * Return: void *: Virtual Address of the Rx descriptor
263  */
264 static inline
265 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
266 {
267 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
268 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
269 	struct rx_desc_pool *rx_desc_pool;
270 
271 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
272 		return NULL;
273 
274 	rx_desc_pool = &soc->rx_desc_buf[pool_id];
275 
276 	if (qdf_unlikely(index >= rx_desc_pool->pool_size))
277 		return NULL;
278 
279 	return &(soc->rx_desc_buf[pool_id].array[index].rx_desc);
280 }
281 
282 /**
283  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
284  *			 the Rx descriptor on monitor ring buffer
285  * @soc: core txrx main context
286  * @cookie: cookie used to lookup virtual address
287  *
288  * Return: void *: Virtual Address of the Rx descriptor
289  */
290 static inline
291 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
292 {
293 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
294 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
295 	/* TODO */
296 	/* Add sanity for pool_id & index */
297 	return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
298 }
299 
300 /**
301  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
302  *			 the Rx descriptor on monitor status ring buffer
303  * @soc: core txrx main context
304  * @cookie: cookie used to lookup virtual address
305  *
306  * Return: void *: Virtual Address of the Rx descriptor
307  */
308 static inline
309 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
310 {
311 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
312 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
313 	/* TODO */
314 	/* Add sanity for pool_id & index */
315 	return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
316 }
317 
318 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
319 				union dp_rx_desc_list_elem_t **local_desc_list,
320 				union dp_rx_desc_list_elem_t **tail,
321 				uint16_t pool_id,
322 				struct rx_desc_pool *rx_desc_pool);
323 
324 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
325 				struct rx_desc_pool *rx_desc_pool,
326 				uint16_t num_descs,
327 				union dp_rx_desc_list_elem_t **desc_list,
328 				union dp_rx_desc_list_elem_t **tail);
329 
330 
331 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
332 
333 void dp_rx_pdev_detach(struct dp_pdev *pdev);
334 
335 
336 uint32_t
337 dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint8_t reo_ring_num,
338 	      uint32_t quota);
339 
340 uint32_t dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota);
341 
342 uint32_t
343 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota);
344 
345 /**
346  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
347  *		     multiple nbufs.
348  * @nbuf: pointer to the first msdu of an amsdu.
349  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
350  *
351  * This function implements the creation of RX frag_list for cases
352  * where an MSDU is spread across multiple nbufs.
353  *
354  * Return: returns the head nbuf which contains complete frag_list.
355  */
356 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr);
357 
358 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
359 				uint32_t pool_id,
360 				uint32_t pool_size,
361 				struct rx_desc_pool *rx_desc_pool);
362 
363 void dp_rx_desc_pool_free(struct dp_soc *soc,
364 				uint32_t pool_id,
365 				struct rx_desc_pool *rx_desc_pool);
366 
367 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
368 				struct dp_peer *peer);
369 
370 /**
371  * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
372  *
373  * @head: pointer to the head of local free list
374  * @tail: pointer to the tail of local free list
375  * @new: new descriptor that is added to the free list
376  *
377  * Return: void:
378  */
379 static inline
380 void dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
381 				 union dp_rx_desc_list_elem_t **tail,
382 				 struct dp_rx_desc *new)
383 {
384 	qdf_assert(head && new);
385 
386 	new->nbuf = NULL;
387 	new->in_use = 0;
388 	new->unmapped = 0;
389 
390 	((union dp_rx_desc_list_elem_t *)new)->next = *head;
391 	*head = (union dp_rx_desc_list_elem_t *)new;
392 	if (*tail == NULL)
393 		*tail = *head;
394 
395 }
396 
397 /**
398  * dp_rx_wds_add_or_update_ast() - Add or update the ast entry.
399  *
400  * @soc: core txrx main context
401  * @ta_peer: WDS repeater peer
402  * @mac_addr: mac address of the peer
403  * @is_ad4_valid: 4-address valid flag
404  * @is_sa_valid: source address valid flag
405  * @is_chfrag_start: frag start flag
406  * @sa_idx: source-address index for peer
407  * @sa_sw_peer_id: software source-address peer-id
408  *
409  * Return: void:
410  */
411 #ifdef FEATURE_WDS
412 static inline void
413 dp_rx_wds_add_or_update_ast(struct dp_soc *soc, struct dp_peer *ta_peer,
414 			    uint8_t *wds_src_mac, uint8_t is_ad4_valid,
415 			    uint8_t is_sa_valid, uint8_t is_chfrag_start,
416 			    uint16_t sa_idx, uint16_t sa_sw_peer_id)
417 {
418 	struct dp_peer *sa_peer;
419 	struct dp_ast_entry *ast;
420 	uint32_t flags = IEEE80211_NODE_F_WDS_HM;
421 	uint32_t ret = 0;
422 	struct dp_neighbour_peer *neighbour_peer = NULL;
423 	struct dp_pdev *pdev = ta_peer->vdev->pdev;
424 
425 	/* For AP mode : Do wds source port learning only if it is a
426 	 * 4-address mpdu
427 	 *
428 	 * For STA mode : Frames from RootAP backend will be in 3-address mode,
429 	 * till RootAP does the WDS source port learning; Hence in repeater/STA
430 	 * mode, we enable learning even in 3-address mode , to avoid RootAP
431 	 * backbone getting wrongly learnt as MEC on repeater
432 	 */
433 	if (ta_peer->vdev->opmode != wlan_op_mode_sta) {
434 		if (!(is_chfrag_start && is_ad4_valid))
435 			return;
436 	} else {
437 		/* For HKv2 Source port learing is not needed in STA mode
438 		 * as we have support in HW
439 		 */
440 		if (soc->ast_override_support)
441 			return;
442 	}
443 
444 	if (qdf_unlikely(!is_sa_valid)) {
445 		ret = dp_peer_add_ast(soc,
446 					ta_peer,
447 					wds_src_mac,
448 					CDP_TXRX_AST_TYPE_WDS,
449 					flags);
450 		return;
451 	}
452 
453 	qdf_spin_lock_bh(&soc->ast_lock);
454 	ast = soc->ast_table[sa_idx];
455 	qdf_spin_unlock_bh(&soc->ast_lock);
456 
457 	if (!ast) {
458 		/*
459 		 * In HKv1, it is possible that HW retains the AST entry in
460 		 * GSE cache on 1 radio , even after the AST entry is deleted
461 		 * (on another radio).
462 		 *
463 		 * Due to this, host might still get sa_is_valid indications
464 		 * for frames with SA not really present in AST table.
465 		 *
466 		 * So we go ahead and send an add_ast command to FW in such
467 		 * cases where sa is reported still as valid, so that FW will
468 		 * invalidate this GSE cache entry and new AST entry gets
469 		 * cached.
470 		 */
471 		if (!soc->ast_override_support) {
472 			ret = dp_peer_add_ast(soc,
473 					      ta_peer,
474 					      wds_src_mac,
475 					      CDP_TXRX_AST_TYPE_WDS,
476 					      flags);
477 			return;
478 		} else {
479 			/* In HKv2 smart monitor case, when NAC client is
480 			 * added first and this client roams within BSS to
481 			 * connect to RE, since we have an AST entry for
482 			 * NAC we get sa_is_valid bit set. So we check if
483 			 * smart monitor is enabled and send add_ast command
484 			 * to FW.
485 			 */
486 			if (pdev->neighbour_peers_added) {
487 				qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
488 				TAILQ_FOREACH(neighbour_peer,
489 					      &pdev->neighbour_peers_list,
490 					      neighbour_peer_list_elem) {
491 					if (!qdf_mem_cmp(&neighbour_peer->neighbour_peers_macaddr,
492 							 wds_src_mac,
493 							 DP_MAC_ADDR_LEN)) {
494 						ret = dp_peer_add_ast(soc,
495 								      ta_peer,
496 								      wds_src_mac,
497 								      CDP_TXRX_AST_TYPE_WDS,
498 								      flags);
499 						QDF_TRACE(QDF_MODULE_ID_DP,
500 							  QDF_TRACE_LEVEL_INFO,
501 							  "sa valid and nac roamed to wds");
502 						break;
503 					}
504 				}
505 				qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
506 			}
507 			return;
508 		}
509 	}
510 
511 
512 	if ((ast->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
513 	    (ast->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
514 		return;
515 
516 	/*
517 	 * Ensure we are updating the right AST entry by
518 	 * validating ast_idx.
519 	 * There is a possibility we might arrive here without
520 	 * AST MAP event , so this check is mandatory
521 	 */
522 	if (ast->is_mapped && (ast->ast_idx == sa_idx))
523 		ast->is_active = TRUE;
524 
525 	if (sa_sw_peer_id != ta_peer->peer_ids[0]) {
526 		sa_peer = ast->peer;
527 
528 		if ((ast->type != CDP_TXRX_AST_TYPE_STATIC) &&
529 		    (ast->type != CDP_TXRX_AST_TYPE_SELF) &&
530 			(ast->type != CDP_TXRX_AST_TYPE_STA_BSS)) {
531 			if (ast->pdev_id != ta_peer->vdev->pdev->pdev_id) {
532 				/* This case is when a STA roams from one
533 				 * repeater to another repeater, but these
534 				 * repeaters are connected to root AP on
535 				 * different radios.
536 				 * Ex: rptr1 connected to ROOT AP over 5G
537 				 * and rptr2 connected to ROOT AP over 2G
538 				 * radio
539 				 */
540 				qdf_spin_lock_bh(&soc->ast_lock);
541 				dp_peer_del_ast(soc, ast);
542 				qdf_spin_unlock_bh(&soc->ast_lock);
543 			} else {
544 				/* this case is when a STA roams from one
545 				 * reapter to another repeater, but inside
546 				 * same radio.
547 				 */
548 				qdf_spin_lock_bh(&soc->ast_lock);
549 				dp_peer_update_ast(soc, ta_peer, ast, flags);
550 				qdf_spin_unlock_bh(&soc->ast_lock);
551 				return;
552 			}
553 		}
554 		/*
555 		 * Do not kickout STA if it belongs to a different radio.
556 		 * For DBDC repeater, it is possible to arrive here
557 		 * for multicast loopback frames originated from connected
558 		 * clients and looped back (intrabss) by Root AP
559 		 */
560 		if (ast->pdev_id != ta_peer->vdev->pdev->pdev_id) {
561 			return;
562 		}
563 
564 		/*
565 		 * Kickout, when direct associated peer(SA) roams
566 		 * to another AP and reachable via TA peer
567 		 */
568 		if ((sa_peer->vdev->opmode == wlan_op_mode_ap) &&
569 		    !sa_peer->delete_in_progress) {
570 			sa_peer->delete_in_progress = true;
571 			if (soc->cdp_soc.ol_ops->peer_sta_kickout) {
572 				soc->cdp_soc.ol_ops->peer_sta_kickout(
573 						sa_peer->vdev->pdev->ctrl_pdev,
574 						wds_src_mac);
575 			}
576 		}
577 	}
578 }
579 
580 /**
581  * dp_rx_wds_srcport_learn() - Add or update the STA PEER which
582  *				is behind the WDS repeater.
583  *
584  * @soc: core txrx main context
585  * @rx_tlv_hdr: base address of RX TLV header
586  * @ta_peer: WDS repeater peer
587  * @nbuf: rx pkt
588  *
589  * Return: void:
590  */
591 static inline void
592 dp_rx_wds_srcport_learn(struct dp_soc *soc,
593 			uint8_t *rx_tlv_hdr,
594 			struct dp_peer *ta_peer,
595 			qdf_nbuf_t nbuf)
596 {
597 	uint16_t sa_sw_peer_id = hal_rx_msdu_end_sa_sw_peer_id_get(rx_tlv_hdr);
598 	uint8_t sa_is_valid = hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr);
599 	uint8_t wds_src_mac[IEEE80211_ADDR_LEN];
600 	uint16_t sa_idx;
601 	uint8_t is_chfrag_start = 0;
602 	uint8_t is_ad4_valid = 0;
603 
604 	if (qdf_unlikely(!ta_peer))
605 		return;
606 
607 	is_chfrag_start = qdf_nbuf_is_rx_chfrag_start(nbuf);
608 	if (is_chfrag_start)
609 		is_ad4_valid = hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr);
610 
611 	memcpy(wds_src_mac, (qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN),
612 	       IEEE80211_ADDR_LEN);
613 
614 	/*
615 	 * Get the AST entry from HW SA index and mark it as active
616 	 */
617 	sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr);
618 
619 	dp_rx_wds_add_or_update_ast(soc, ta_peer, wds_src_mac, is_ad4_valid,
620 				    sa_is_valid, is_chfrag_start,
621 				    sa_idx, sa_sw_peer_id);
622 
623 	return;
624 }
625 #else
626 static inline void
627 dp_rx_wds_srcport_learn(struct dp_soc *soc,
628 		uint8_t *rx_tlv_hdr,
629 		struct dp_peer *ta_peer,
630 		qdf_nbuf_t nbuf)
631 {
632 }
633 #endif
634 
635 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf);
636 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
637 		qdf_nbuf_t mpdu, bool mpdu_done);
638 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
639 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer);
640 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
641 		       uint16_t peer_id, uint8_t tid);
642 
643 
644 #define DP_RX_LIST_APPEND(head, tail, elem) \
645 	do {                                                          \
646 		if (!(head)) {                                        \
647 			(head) = (elem);                              \
648 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\
649 		} else {                                              \
650 			qdf_nbuf_set_next((tail), (elem));            \
651 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++;  \
652 		}                                                     \
653 		(tail) = (elem);                                      \
654 		qdf_nbuf_set_next((tail), NULL);                      \
655 	} while (0)
656 
657 #ifndef BUILD_X86
658 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
659 		qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
660 {
661 	return QDF_STATUS_SUCCESS;
662 }
663 #else
664 #define MAX_RETRY 100
665 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
666 		qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
667 {
668 	uint32_t nbuf_retry = 0;
669 	int32_t ret;
670 	const uint32_t x86_phy_addr = 0x50000000;
671 	/*
672 	 * in M2M emulation platforms (x86) the memory below 0x50000000
673 	 * is reserved for target use, so any memory allocated in this
674 	 * region should not be used by host
675 	 */
676 	do {
677 		if (qdf_likely(*paddr > x86_phy_addr))
678 			return QDF_STATUS_SUCCESS;
679 		else {
680 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
681 					"phy addr %pK exceeded 0x50000000 trying again",
682 					paddr);
683 
684 			nbuf_retry++;
685 			if ((*rx_netbuf)) {
686 				qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
687 						QDF_DMA_BIDIRECTIONAL);
688 				/* Not freeing buffer intentionally.
689 				 * Observed that same buffer is getting
690 				 * re-allocated resulting in longer load time
691 				 * WMI init timeout.
692 				 * This buffer is anyway not useful so skip it.
693 				 **/
694 			}
695 
696 			*rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
697 							RX_BUFFER_SIZE,
698 							RX_BUFFER_RESERVATION,
699 							RX_BUFFER_ALIGNMENT,
700 							FALSE);
701 
702 			if (qdf_unlikely(!(*rx_netbuf)))
703 				return QDF_STATUS_E_FAILURE;
704 
705 			ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf,
706 							QDF_DMA_BIDIRECTIONAL);
707 
708 			if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
709 				qdf_nbuf_free(*rx_netbuf);
710 				*rx_netbuf = NULL;
711 				continue;
712 			}
713 
714 			*paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
715 		}
716 	} while (nbuf_retry < MAX_RETRY);
717 
718 	if ((*rx_netbuf)) {
719 		qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
720 					QDF_DMA_BIDIRECTIONAL);
721 		qdf_nbuf_free(*rx_netbuf);
722 	}
723 
724 	return QDF_STATUS_E_FAILURE;
725 }
726 #endif
727 
728 /**
729  * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
730  *				   the MSDU Link Descriptor
731  * @soc: core txrx main context
732  * @buf_info: buf_info include cookie that used to lookup virtual address of
733  * link descriptor Normally this is just an index into a per SOC array.
734  *
735  * This is the VA of the link descriptor, that HAL layer later uses to
736  * retrieve the list of MSDU's for a given MPDU.
737  *
738  * Return: void *: Virtual Address of the Rx descriptor
739  */
740 static inline
741 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
742 				  struct hal_buf_info *buf_info)
743 {
744 	void *link_desc_va;
745 	uint32_t bank_id = LINK_DESC_COOKIE_BANK_ID(buf_info->sw_cookie);
746 
747 
748 	/* TODO */
749 	/* Add sanity for  cookie */
750 
751 	link_desc_va = soc->link_desc_banks[bank_id].base_vaddr +
752 		(buf_info->paddr -
753 			soc->link_desc_banks[bank_id].base_paddr);
754 
755 	return link_desc_va;
756 }
757 
758 /**
759  * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of
760  *				   the MSDU Link Descriptor
761  * @pdev: core txrx pdev context
762  * @buf_info: buf_info includes cookie that used to lookup virtual address of
763  * link descriptor. Normally this is just an index into a per pdev array.
764  *
765  * This is the VA of the link descriptor in monitor mode destination ring,
766  * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU.
767  *
768  * Return: void *: Virtual Address of the Rx descriptor
769  */
770 static inline
771 void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev,
772 				  struct hal_buf_info *buf_info,
773 				  int mac_id)
774 {
775 	void *link_desc_va;
776 	int mac_for_pdev = dp_get_mac_id_for_mac(pdev->soc, mac_id);
777 
778 	/* TODO */
779 	/* Add sanity for  cookie */
780 
781 	link_desc_va =
782 	   pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_vaddr +
783 	   (buf_info->paddr -
784 	   pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_paddr);
785 
786 	return link_desc_va;
787 }
788 
789 /**
790  * dp_rx_defrag_concat() - Concatenate the fragments
791  *
792  * @dst: destination pointer to the buffer
793  * @src: source pointer from where the fragment payload is to be copied
794  *
795  * Return: QDF_STATUS
796  */
797 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
798 {
799 	/*
800 	 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
801 	 * to provide space for src, the headroom portion is copied from
802 	 * the original dst buffer to the larger new dst buffer.
803 	 * (This is needed, because the headroom of the dst buffer
804 	 * contains the rx desc.)
805 	 */
806 	if (qdf_nbuf_cat(dst, src))
807 		return QDF_STATUS_E_DEFRAG_ERROR;
808 
809 	return QDF_STATUS_SUCCESS;
810 }
811 
812 /*
813  * dp_rx_ast_set_active() - set the active flag of the astentry
814  *				    corresponding to a hw index.
815  * @soc: core txrx main context
816  * @sa_idx: hw idx
817  * @is_active: active flag
818  *
819  */
820 #ifdef FEATURE_WDS
821 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
822 {
823 	struct dp_ast_entry *ast;
824 	qdf_spin_lock_bh(&soc->ast_lock);
825 	ast = soc->ast_table[sa_idx];
826 
827 	/*
828 	 * Ensure we are updating the right AST entry by
829 	 * validating ast_idx.
830 	 * There is a possibility we might arrive here without
831 	 * AST MAP event , so this check is mandatory
832 	 */
833 	if (ast && ast->is_mapped && (ast->ast_idx == sa_idx)) {
834 		ast->is_active = is_active;
835 		qdf_spin_unlock_bh(&soc->ast_lock);
836 		return QDF_STATUS_SUCCESS;
837 	}
838 
839 	qdf_spin_unlock_bh(&soc->ast_lock);
840 	return QDF_STATUS_E_FAILURE;
841 }
842 #else
843 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
844 {
845 	return QDF_STATUS_SUCCESS;
846 }
847 #endif
848 
849 /*
850  * dp_rx_desc_dump() - dump the sw rx descriptor
851  *
852  * @rx_desc: sw rx descriptor
853  */
854 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc)
855 {
856 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
857 		  "rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d",
858 		  rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id,
859 		  rx_desc->in_use, rx_desc->unmapped);
860 }
861 
862 /*
863  * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
864  *					In qwrap mode, packets originated from
865  *					any vdev should not loopback and
866  *					should be dropped.
867  * @vdev: vdev on which rx packet is received
868  * @nbuf: rx pkt
869  *
870  */
871 #if ATH_SUPPORT_WRAP
872 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
873 						qdf_nbuf_t nbuf)
874 {
875 	struct dp_vdev *psta_vdev;
876 	struct dp_pdev *pdev = vdev->pdev;
877 	uint8_t *data = qdf_nbuf_data(nbuf);
878 
879 	if (qdf_unlikely(vdev->proxysta_vdev)) {
880 		/* In qwrap isolation mode, allow loopback packets as all
881 		 * packets go to RootAP and Loopback on the mpsta.
882 		 */
883 		if (vdev->isolation_vdev)
884 			return false;
885 		TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
886 			if (qdf_unlikely(psta_vdev->proxysta_vdev &&
887 					 !qdf_mem_cmp(psta_vdev->mac_addr.raw,
888 						      &data[DP_MAC_ADDR_LEN],
889 						      DP_MAC_ADDR_LEN))) {
890 				/* Drop packet if source address is equal to
891 				 * any of the vdev addresses.
892 				 */
893 				return true;
894 			}
895 		}
896 	}
897 	return false;
898 }
899 #else
900 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
901 						qdf_nbuf_t nbuf)
902 {
903 	return false;
904 }
905 #endif
906 
907 /*
908  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
909  *			       called during dp rx initialization
910  *			       and at the end of dp_rx_process.
911  *
912  * @soc: core txrx main context
913  * @mac_id: mac_id which is one of 3 mac_ids
914  * @dp_rxdma_srng: dp rxdma circular ring
915  * @rx_desc_pool: Pointer to free Rx descriptor pool
916  * @num_req_buffers: number of buffer to be replenished
917  * @desc_list: list of descs if called from dp_rx_process
918  *	       or NULL during dp rx initialization or out of buffer
919  *	       interrupt.
920  * @tail: tail of descs list
921  * Return: return success or failure
922  */
923 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
924 				 struct dp_srng *dp_rxdma_srng,
925 				 struct rx_desc_pool *rx_desc_pool,
926 				 uint32_t num_req_buffers,
927 				 union dp_rx_desc_list_elem_t **desc_list,
928 				 union dp_rx_desc_list_elem_t **tail);
929 
930 /**
931  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
932  *			      (WBM), following error handling
933  *
934  * @soc: core DP main context
935  * @buf_addr_info: opaque pointer to the REO error ring descriptor
936  * @buf_addr_info: void pointer to the buffer_addr_info
937  * @bm_action: put to idle_list or release to msdu_list
938  * Return: QDF_STATUS
939  */
940 QDF_STATUS
941 dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action);
942 
943 QDF_STATUS
944 dp_rx_link_desc_buf_return(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng,
945 				void *buf_addr_info, uint8_t bm_action);
946 /**
947  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
948  *					(WBM) by address
949  *
950  * @soc: core DP main context
951  * @link_desc_addr: link descriptor addr
952  *
953  * Return: QDF_STATUS
954  */
955 QDF_STATUS
956 dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr,
957 					uint8_t bm_action);
958 
959 uint32_t
960 dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id,
961 						uint32_t quota);
962 
963 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
964 				uint8_t *rx_tlv_hdr, struct dp_peer *peer);
965 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
966 					uint8_t *rx_tlv_hdr);
967 
968 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
969 				struct dp_peer *peer, int rx_mcast);
970 
971 qdf_nbuf_t
972 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev);
973 
974 void dp_rx_dump_info_and_assert(struct dp_soc *soc, void *hal_ring,
975 				void *ring_desc, struct dp_rx_desc *rx_desc);
976 
977 #endif /* _DP_RX_H */
978