xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h (revision bea437e2293c3d4fb1b5704fcf633aedac996962)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_RX_H
20 #define _DP_RX_H
21 
22 #include "hal_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 
27 #ifdef RXDMA_OPTIMIZATION
28 #ifdef NO_RX_PKT_HDR_TLV
29 #define RX_BUFFER_ALIGNMENT     0
30 #else
31 #define RX_BUFFER_ALIGNMENT     128
32 #endif /* NO_RX_PKT_HDR_TLV */
33 #else /* RXDMA_OPTIMIZATION */
34 #define RX_BUFFER_ALIGNMENT     4
35 #endif /* RXDMA_OPTIMIZATION */
36 
37 #ifdef QCA_HOST2FW_RXBUF_RING
38 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW1_BM
39 /* RBM value used for re-injecting defragmented packets into REO */
40 #define DP_DEFRAG_RBM HAL_RX_BUF_RBM_SW3_BM
41 #else
42 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW3_BM
43 #define DP_DEFRAG_RBM DP_WBM2SW_RBM
44 #endif /* QCA_HOST2FW_RXBUF_RING */
45 
46 #define RX_BUFFER_RESERVATION   0
47 
48 #define DP_PEER_METADATA_PEER_ID_MASK	0x0000ffff
49 #define DP_PEER_METADATA_PEER_ID_SHIFT	0
50 #define DP_PEER_METADATA_VDEV_ID_MASK	0x003f0000
51 #define DP_PEER_METADATA_VDEV_ID_SHIFT	16
52 
53 #define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata)		\
54 	(((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK)	\
55 			>> DP_PEER_METADATA_PEER_ID_SHIFT)
56 
57 #define DP_PEER_METADATA_VDEV_ID_GET(_peer_metadata)		\
58 	(((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK)	\
59 			>> DP_PEER_METADATA_VDEV_ID_SHIFT)
60 
61 #define DP_RX_DESC_MAGIC 0xdec0de
62 
63 /**
64  * struct dp_rx_desc
65  *
66  * @nbuf		: VA of the "skb" posted
67  * @rx_buf_start	: VA of the original Rx buffer, before
68  *			  movement of any skb->data pointer
69  * @cookie		: index into the sw array which holds
70  *			  the sw Rx descriptors
71  *			  Cookie space is 21 bits:
72  *			  lower 18 bits -- index
73  *			  upper  3 bits -- pool_id
74  * @pool_id		: pool Id for which this allocated.
75  *			  Can only be used if there is no flow
76  *			  steering
77  * @in_use		  rx_desc is in use
78  * @unmapped		  used to mark rx_desc an unmapped if the corresponding
79  *			  nbuf is already unmapped
80  */
81 struct dp_rx_desc {
82 	qdf_nbuf_t nbuf;
83 	uint8_t *rx_buf_start;
84 	uint32_t cookie;
85 	uint8_t	 pool_id;
86 #ifdef RX_DESC_DEBUG_CHECK
87 	uint32_t magic;
88 #endif
89 	uint8_t	in_use:1,
90 	unmapped:1;
91 };
92 
93 /* RX Descriptor Multi Page memory alloc related */
94 #define DP_RX_DESC_OFFSET_NUM_BITS 8
95 #define DP_RX_DESC_PAGE_ID_NUM_BITS 8
96 #define DP_RX_DESC_POOL_ID_NUM_BITS 4
97 
98 #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS
99 #define DP_RX_DESC_POOL_ID_SHIFT \
100 		(DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS)
101 #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \
102 	(((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT)
103 #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK	\
104 			(((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \
105 			 DP_RX_DESC_PAGE_ID_SHIFT)
106 #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \
107 			((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1)
108 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie)		\
109 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >>	\
110 			DP_RX_DESC_POOL_ID_SHIFT)
111 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie)		\
112 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >>	\
113 			DP_RX_DESC_PAGE_ID_SHIFT)
114 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie)		\
115 	((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK)
116 
117 #define RX_DESC_COOKIE_INDEX_SHIFT		0
118 #define RX_DESC_COOKIE_INDEX_MASK		0x3ffff /* 18 bits */
119 #define RX_DESC_COOKIE_POOL_ID_SHIFT		18
120 #define RX_DESC_COOKIE_POOL_ID_MASK		0x1c0000
121 
122 #define DP_RX_DESC_COOKIE_MAX	\
123 	(RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK)
124 
125 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie)		\
126 	(((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >>	\
127 			RX_DESC_COOKIE_POOL_ID_SHIFT)
128 
129 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie)		\
130 	(((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >>	\
131 			RX_DESC_COOKIE_INDEX_SHIFT)
132 
133 /* DOC: Offset to obtain LLC hdr
134  *
135  * In the case of Wifi parse error
136  * to reach LLC header from beginning
137  * of VLAN tag we need to skip 8 bytes.
138  * Vlan_tag(4)+length(2)+length added
139  * by HW(2) = 8 bytes.
140  */
141 #define DP_SKIP_VLAN		8
142 
143 /**
144  * struct dp_rx_cached_buf - rx cached buffer
145  * @list: linked list node
146  * @buf: skb buffer
147  */
148 struct dp_rx_cached_buf {
149 	qdf_list_node_t node;
150 	qdf_nbuf_t buf;
151 };
152 
153 /*
154  *dp_rx_xor_block() - xor block of data
155  *@b: destination data block
156  *@a: source data block
157  *@len: length of the data to process
158  *
159  *Returns: None
160  */
161 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
162 {
163 	qdf_size_t i;
164 
165 	for (i = 0; i < len; i++)
166 		b[i] ^= a[i];
167 }
168 
169 /*
170  *dp_rx_rotl() - rotate the bits left
171  *@val: unsigned integer input value
172  *@bits: number of bits
173  *
174  *Returns: Integer with left rotated by number of 'bits'
175  */
176 static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
177 {
178 	return (val << bits) | (val >> (32 - bits));
179 }
180 
181 /*
182  *dp_rx_rotr() - rotate the bits right
183  *@val: unsigned integer input value
184  *@bits: number of bits
185  *
186  *Returns: Integer with right rotated by number of 'bits'
187  */
188 static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
189 {
190 	return (val >> bits) | (val << (32 - bits));
191 }
192 
193 /*
194  * dp_set_rx_queue() - set queue_mapping in skb
195  * @nbuf: skb
196  * @queue_id: rx queue_id
197  *
198  * Return: void
199  */
200 #ifdef QCA_OL_RX_MULTIQ_SUPPORT
201 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
202 {
203 	qdf_nbuf_record_rx_queue(nbuf, queue_id);
204 	return;
205 }
206 #else
207 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
208 {
209 }
210 #endif
211 
212 /*
213  *dp_rx_xswap() - swap the bits left
214  *@val: unsigned integer input value
215  *
216  *Returns: Integer with bits swapped
217  */
218 static inline uint32_t dp_rx_xswap(uint32_t val)
219 {
220 	return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
221 }
222 
223 /*
224  *dp_rx_get_le32_split() - get little endian 32 bits split
225  *@b0: byte 0
226  *@b1: byte 1
227  *@b2: byte 2
228  *@b3: byte 3
229  *
230  *Returns: Integer with split little endian 32 bits
231  */
232 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
233 					uint8_t b3)
234 {
235 	return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
236 }
237 
238 /*
239  *dp_rx_get_le32() - get little endian 32 bits
240  *@b0: byte 0
241  *@b1: byte 1
242  *@b2: byte 2
243  *@b3: byte 3
244  *
245  *Returns: Integer with little endian 32 bits
246  */
247 static inline uint32_t dp_rx_get_le32(const uint8_t *p)
248 {
249 	return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
250 }
251 
252 /*
253  * dp_rx_put_le32() - put little endian 32 bits
254  * @p: destination char array
255  * @v: source 32-bit integer
256  *
257  * Returns: None
258  */
259 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
260 {
261 	p[0] = (v) & 0xff;
262 	p[1] = (v >> 8) & 0xff;
263 	p[2] = (v >> 16) & 0xff;
264 	p[3] = (v >> 24) & 0xff;
265 }
266 
267 /* Extract michal mic block of data */
268 #define dp_rx_michael_block(l, r)	\
269 	do {					\
270 		r ^= dp_rx_rotl(l, 17);	\
271 		l += r;				\
272 		r ^= dp_rx_xswap(l);		\
273 		l += r;				\
274 		r ^= dp_rx_rotl(l, 3);	\
275 		l += r;				\
276 		r ^= dp_rx_rotr(l, 2);	\
277 		l += r;				\
278 	} while (0)
279 
280 /**
281  * struct dp_rx_desc_list_elem_t
282  *
283  * @next		: Next pointer to form free list
284  * @rx_desc		: DP Rx descriptor
285  */
286 union dp_rx_desc_list_elem_t {
287 	union dp_rx_desc_list_elem_t *next;
288 	struct dp_rx_desc rx_desc;
289 };
290 
291 #ifdef RX_DESC_MULTI_PAGE_ALLOC
292 /**
293  * dp_rx_desc_find() - find dp rx descriptor from page ID and offset
294  * @page_id: Page ID
295  * @offset: Offset of the descriptor element
296  *
297  * Return: RX descriptor element
298  */
299 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
300 					      struct rx_desc_pool *rx_pool);
301 
302 static inline
303 struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc,
304 					      struct rx_desc_pool *pool,
305 					      uint32_t cookie)
306 {
307 	uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
308 	uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
309 	uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
310 	struct rx_desc_pool *rx_desc_pool;
311 	union dp_rx_desc_list_elem_t *rx_desc_elem;
312 
313 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
314 		return NULL;
315 
316 	rx_desc_pool = &pool[pool_id];
317 	rx_desc_elem = (union dp_rx_desc_list_elem_t *)
318 		(rx_desc_pool->desc_pages.cacheable_pages[page_id] +
319 		rx_desc_pool->elem_size * offset);
320 
321 	return &rx_desc_elem->rx_desc;
322 }
323 
324 /**
325  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
326  *			 the Rx descriptor on Rx DMA source ring buffer
327  * @soc: core txrx main context
328  * @cookie: cookie used to lookup virtual address
329  *
330  * Return: Pointer to the Rx descriptor
331  */
332 static inline
333 struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc,
334 					       uint32_t cookie)
335 {
336 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie);
337 }
338 
339 /**
340  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
341  *			 the Rx descriptor on monitor ring buffer
342  * @soc: core txrx main context
343  * @cookie: cookie used to lookup virtual address
344  *
345  * Return: Pointer to the Rx descriptor
346  */
347 static inline
348 struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc,
349 					     uint32_t cookie)
350 {
351 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie);
352 }
353 
354 /**
355  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
356  *			 the Rx descriptor on monitor status ring buffer
357  * @soc: core txrx main context
358  * @cookie: cookie used to lookup virtual address
359  *
360  * Return: Pointer to the Rx descriptor
361  */
362 static inline
363 struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc,
364 						uint32_t cookie)
365 {
366 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_status[0], cookie);
367 }
368 #else
369 /**
370  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
371  *			 the Rx descriptor on Rx DMA source ring buffer
372  * @soc: core txrx main context
373  * @cookie: cookie used to lookup virtual address
374  *
375  * Return: void *: Virtual Address of the Rx descriptor
376  */
377 static inline
378 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
379 {
380 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
381 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
382 	struct rx_desc_pool *rx_desc_pool;
383 
384 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
385 		return NULL;
386 
387 	rx_desc_pool = &soc->rx_desc_buf[pool_id];
388 
389 	if (qdf_unlikely(index >= rx_desc_pool->pool_size))
390 		return NULL;
391 
392 	return &(soc->rx_desc_buf[pool_id].array[index].rx_desc);
393 }
394 
395 /**
396  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
397  *			 the Rx descriptor on monitor ring buffer
398  * @soc: core txrx main context
399  * @cookie: cookie used to lookup virtual address
400  *
401  * Return: void *: Virtual Address of the Rx descriptor
402  */
403 static inline
404 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
405 {
406 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
407 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
408 	/* TODO */
409 	/* Add sanity for pool_id & index */
410 	return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
411 }
412 
413 /**
414  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
415  *			 the Rx descriptor on monitor status ring buffer
416  * @soc: core txrx main context
417  * @cookie: cookie used to lookup virtual address
418  *
419  * Return: void *: Virtual Address of the Rx descriptor
420  */
421 static inline
422 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
423 {
424 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
425 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
426 	/* TODO */
427 	/* Add sanity for pool_id & index */
428 	return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
429 }
430 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
431 
432 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
433 				union dp_rx_desc_list_elem_t **local_desc_list,
434 				union dp_rx_desc_list_elem_t **tail,
435 				uint16_t pool_id,
436 				struct rx_desc_pool *rx_desc_pool);
437 
438 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
439 				struct rx_desc_pool *rx_desc_pool,
440 				uint16_t num_descs,
441 				union dp_rx_desc_list_elem_t **desc_list,
442 				union dp_rx_desc_list_elem_t **tail);
443 
444 
445 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
446 
447 void dp_rx_pdev_detach(struct dp_pdev *pdev);
448 
449 void dp_print_napi_stats(struct dp_soc *soc);
450 
451 /**
452  * dp_rx_vdev_detach() - detach vdev from dp rx
453  * @vdev: virtual device instance
454  *
455  * Return: QDF_STATUS_SUCCESS: success
456  *         QDF_STATUS_E_RESOURCES: Error return
457  */
458 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev);
459 
460 uint32_t
461 dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
462 	      uint8_t reo_ring_num,
463 	      uint32_t quota);
464 
465 /**
466  * dp_rx_err_process() - Processes error frames routed to REO error ring
467  * @int_ctx: pointer to DP interrupt context
468  * @soc: core txrx main context
469  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
470  * @quota: No. of units (packets) that can be serviced in one shot.
471  *
472  * This function implements error processing and top level demultiplexer
473  * for all the frames routed to REO error ring.
474  *
475  * Return: uint32_t: No. of elements processed
476  */
477 uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
478 			   hal_ring_handle_t hal_ring_hdl, uint32_t quota);
479 
480 /**
481  * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
482  * @int_ctx: pointer to DP interrupt context
483  * @soc: core txrx main context
484  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
485  * @quota: No. of units (packets) that can be serviced in one shot.
486  *
487  * This function implements error processing and top level demultiplexer
488  * for all the frames routed to WBM2HOST sw release ring.
489  *
490  * Return: uint32_t: No. of elements processed
491  */
492 uint32_t
493 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
494 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota);
495 
496 /**
497  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
498  *		     multiple nbufs.
499  * @nbuf: pointer to the first msdu of an amsdu.
500  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
501  *
502  * This function implements the creation of RX frag_list for cases
503  * where an MSDU is spread across multiple nbufs.
504  *
505  * Return: returns the head nbuf which contains complete frag_list.
506  */
507 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr);
508 
509 /*
510  * dp_rx_desc_pool_alloc() - create a pool of software rx_descs
511  *			     at the time of dp rx initialization
512  *
513  * @soc: core txrx main context
514  * @pool_id: pool_id which is one of 3 mac_ids
515  * @pool_size: number of Rx descriptor in the pool
516  * @rx_desc_pool: rx descriptor pool pointer
517  *
518  * Return: QDF status
519  */
520 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
521 				 uint32_t pool_size, struct rx_desc_pool *pool);
522 
523 /*
524  * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during
525  *				     de-initialization of wifi module.
526  *
527  * @soc: core txrx main context
528  * @pool_id: pool_id which is one of 3 mac_ids
529  * @rx_desc_pool: rx descriptor pool pointer
530  *
531  * Return: None
532  */
533 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
534 				   struct rx_desc_pool *rx_desc_pool);
535 
536 /*
537  * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during
538  *			    de-initialization of wifi module.
539  *
540  * @soc: core txrx main context
541  * @pool_id: pool_id which is one of 3 mac_ids
542  * @rx_desc_pool: rx descriptor pool pointer
543  *
544  * Return: None
545  */
546 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
547 			  struct rx_desc_pool *rx_desc_pool);
548 
549 /*
550  * dp_rx_desc_pool_free() - free the sw rx desc array called during
551  *			    de-initialization of wifi module.
552  *
553  * @soc: core txrx main context
554  * @rx_desc_pool: rx descriptor pool pointer
555  *
556  * Return: None
557  */
558 void dp_rx_desc_pool_free(struct dp_soc *soc,
559 			  struct rx_desc_pool *rx_desc_pool);
560 
561 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
562 				struct dp_peer *peer);
563 
564 /**
565  * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
566  *
567  * @head: pointer to the head of local free list
568  * @tail: pointer to the tail of local free list
569  * @new: new descriptor that is added to the free list
570  *
571  * Return: void:
572  */
573 static inline
574 void dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
575 				 union dp_rx_desc_list_elem_t **tail,
576 				 struct dp_rx_desc *new)
577 {
578 	qdf_assert(head && new);
579 
580 	new->nbuf = NULL;
581 	new->in_use = 0;
582 
583 	((union dp_rx_desc_list_elem_t *)new)->next = *head;
584 	*head = (union dp_rx_desc_list_elem_t *)new;
585 	if (!*tail)
586 		*tail = *head;
587 
588 }
589 
590 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
591 				   uint8_t mac_id);
592 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
593 		qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id);
594 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
595 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer);
596 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
597 		       uint16_t peer_id, uint8_t tid);
598 
599 
600 #define DP_RX_LIST_APPEND(head, tail, elem) \
601 	do {                                                          \
602 		if (!(head)) {                                        \
603 			(head) = (elem);                              \
604 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\
605 		} else {                                              \
606 			qdf_nbuf_set_next((tail), (elem));            \
607 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++;  \
608 		}                                                     \
609 		(tail) = (elem);                                      \
610 		qdf_nbuf_set_next((tail), NULL);                      \
611 	} while (0)
612 
613 /*for qcn9000 emulation the pcie is complete phy and no address restrictions*/
614 #if !defined(BUILD_X86) || defined(QCA_WIFI_QCN9000)
615 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
616 		qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
617 {
618 	return QDF_STATUS_SUCCESS;
619 }
620 #else
621 #define MAX_RETRY 100
622 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
623 		qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
624 {
625 	uint32_t nbuf_retry = 0;
626 	int32_t ret;
627 	const uint32_t x86_phy_addr = 0x50000000;
628 	/*
629 	 * in M2M emulation platforms (x86) the memory below 0x50000000
630 	 * is reserved for target use, so any memory allocated in this
631 	 * region should not be used by host
632 	 */
633 	do {
634 		if (qdf_likely(*paddr > x86_phy_addr))
635 			return QDF_STATUS_SUCCESS;
636 		else {
637 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
638 					"phy addr %pK exceeded 0x50000000 trying again",
639 					paddr);
640 
641 			nbuf_retry++;
642 			if ((*rx_netbuf)) {
643 				qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
644 						QDF_DMA_FROM_DEVICE);
645 				/* Not freeing buffer intentionally.
646 				 * Observed that same buffer is getting
647 				 * re-allocated resulting in longer load time
648 				 * WMI init timeout.
649 				 * This buffer is anyway not useful so skip it.
650 				 **/
651 			}
652 
653 			*rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
654 							RX_BUFFER_SIZE,
655 							RX_BUFFER_RESERVATION,
656 							RX_BUFFER_ALIGNMENT,
657 							FALSE);
658 
659 			if (qdf_unlikely(!(*rx_netbuf)))
660 				return QDF_STATUS_E_FAILURE;
661 
662 			ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf,
663 							QDF_DMA_FROM_DEVICE);
664 
665 			if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
666 				qdf_nbuf_free(*rx_netbuf);
667 				*rx_netbuf = NULL;
668 				continue;
669 			}
670 
671 			*paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
672 		}
673 	} while (nbuf_retry < MAX_RETRY);
674 
675 	if ((*rx_netbuf)) {
676 		qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
677 					QDF_DMA_FROM_DEVICE);
678 		qdf_nbuf_free(*rx_netbuf);
679 	}
680 
681 	return QDF_STATUS_E_FAILURE;
682 }
683 #endif
684 
685 /**
686  * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
687  *				   the MSDU Link Descriptor
688  * @soc: core txrx main context
689  * @buf_info: buf_info include cookie that used to lookup virtual address of
690  * link descriptor Normally this is just an index into a per SOC array.
691  *
692  * This is the VA of the link descriptor, that HAL layer later uses to
693  * retrieve the list of MSDU's for a given MPDU.
694  *
695  * Return: void *: Virtual Address of the Rx descriptor
696  */
697 static inline
698 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
699 				  struct hal_buf_info *buf_info)
700 {
701 	void *link_desc_va;
702 	uint32_t bank_id = LINK_DESC_COOKIE_BANK_ID(buf_info->sw_cookie);
703 
704 
705 	/* TODO */
706 	/* Add sanity for  cookie */
707 
708 	link_desc_va = soc->link_desc_banks[bank_id].base_vaddr +
709 		(buf_info->paddr -
710 			soc->link_desc_banks[bank_id].base_paddr);
711 
712 	return link_desc_va;
713 }
714 
715 /**
716  * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of
717  *				   the MSDU Link Descriptor
718  * @pdev: core txrx pdev context
719  * @buf_info: buf_info includes cookie that used to lookup virtual address of
720  * link descriptor. Normally this is just an index into a per pdev array.
721  *
722  * This is the VA of the link descriptor in monitor mode destination ring,
723  * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU.
724  *
725  * Return: void *: Virtual Address of the Rx descriptor
726  */
727 static inline
728 void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev,
729 				  struct hal_buf_info *buf_info,
730 				  int mac_id)
731 {
732 	void *link_desc_va;
733 	int mac_for_pdev = dp_get_mac_id_for_mac(pdev->soc, mac_id);
734 
735 	/* TODO */
736 	/* Add sanity for  cookie */
737 
738 	link_desc_va =
739 	   pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_vaddr +
740 	   (buf_info->paddr -
741 	   pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_paddr);
742 
743 	return link_desc_va;
744 }
745 
746 /**
747  * dp_rx_defrag_concat() - Concatenate the fragments
748  *
749  * @dst: destination pointer to the buffer
750  * @src: source pointer from where the fragment payload is to be copied
751  *
752  * Return: QDF_STATUS
753  */
754 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
755 {
756 	/*
757 	 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
758 	 * to provide space for src, the headroom portion is copied from
759 	 * the original dst buffer to the larger new dst buffer.
760 	 * (This is needed, because the headroom of the dst buffer
761 	 * contains the rx desc.)
762 	 */
763 	if (!qdf_nbuf_cat(dst, src)) {
764 		/*
765 		 * qdf_nbuf_cat does not free the src memory.
766 		 * Free src nbuf before returning
767 		 * For failure case the caller takes of freeing the nbuf
768 		 */
769 		qdf_nbuf_free(src);
770 		return QDF_STATUS_SUCCESS;
771 	}
772 
773 	return QDF_STATUS_E_DEFRAG_ERROR;
774 }
775 
776 #ifndef FEATURE_WDS
777 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
778 {
779 	return QDF_STATUS_SUCCESS;
780 }
781 
782 static inline void
783 dp_rx_wds_srcport_learn(struct dp_soc *soc,
784 			uint8_t *rx_tlv_hdr,
785 			struct dp_peer *ta_peer,
786 			qdf_nbuf_t nbuf)
787 {
788 }
789 #endif
790 
791 /*
792  * dp_rx_desc_dump() - dump the sw rx descriptor
793  *
794  * @rx_desc: sw rx descriptor
795  */
796 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc)
797 {
798 	dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d",
799 		rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id,
800 		rx_desc->in_use, rx_desc->unmapped);
801 }
802 
803 /*
804  * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
805  *					In qwrap mode, packets originated from
806  *					any vdev should not loopback and
807  *					should be dropped.
808  * @vdev: vdev on which rx packet is received
809  * @nbuf: rx pkt
810  *
811  */
812 #if ATH_SUPPORT_WRAP
813 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
814 						qdf_nbuf_t nbuf)
815 {
816 	struct dp_vdev *psta_vdev;
817 	struct dp_pdev *pdev = vdev->pdev;
818 	uint8_t *data = qdf_nbuf_data(nbuf);
819 
820 	if (qdf_unlikely(vdev->proxysta_vdev)) {
821 		/* In qwrap isolation mode, allow loopback packets as all
822 		 * packets go to RootAP and Loopback on the mpsta.
823 		 */
824 		if (vdev->isolation_vdev)
825 			return false;
826 		TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
827 			if (qdf_unlikely(psta_vdev->proxysta_vdev &&
828 					 !qdf_mem_cmp(psta_vdev->mac_addr.raw,
829 						      &data[QDF_MAC_ADDR_SIZE],
830 						      QDF_MAC_ADDR_SIZE))) {
831 				/* Drop packet if source address is equal to
832 				 * any of the vdev addresses.
833 				 */
834 				return true;
835 			}
836 		}
837 	}
838 	return false;
839 }
840 #else
841 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
842 						qdf_nbuf_t nbuf)
843 {
844 	return false;
845 }
846 #endif
847 
848 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
849 	defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\
850 	defined(WLAN_SUPPORT_RX_FLOW_TAG)
851 #include "dp_rx_tag.h"
852 #endif
853 
854 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
855 /**
856  * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV
857  *                              and set the corresponding tag in QDF packet
858  * @soc: core txrx main context
859  * @vdev: vdev on which the packet is received
860  * @nbuf: QDF pkt buffer on which the protocol tag should be set
861  * @rx_tlv_hdr: rBbase address where the RX TLVs starts
862  * @ring_index: REO ring number, not used for error & monitor ring
863  * @is_reo_exception: flag to indicate if rx from REO ring or exception ring
864  * @is_update_stats: flag to indicate whether to update stats or not
865  * Return: void
866  */
867 static inline void
868 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev,
869 			  qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
870 			  uint16_t ring_index,
871 			  bool is_reo_exception, bool is_update_stats)
872 {
873 }
874 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
875 
876 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
877 /**
878  * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV
879  *                           and set the corresponding tag in QDF packet
880  * @soc: core txrx main context
881  * @vdev: vdev on which the packet is received
882  * @nbuf: QDF pkt buffer on which the protocol tag should be set
883  * @rx_tlv_hdr: base address where the RX TLVs starts
884  * @is_update_stats: flag to indicate whether to update stats or not
885  *
886  * Return: void
887  */
888 static inline void
889 dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev,
890 		      qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats)
891 {
892 }
893 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
894 
895 #if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\
896 	!defined(WLAN_SUPPORT_RX_FLOW_TAG)
897 /**
898  * dp_rx_mon_update_protocol_flow_tag() - Performs necessary checks for monitor
899  *                                       mode and then tags appropriate packets
900  * @soc: core txrx main context
901  * @vdev: pdev on which packet is received
902  * @msdu: QDF packet buffer on which the protocol tag should be set
903  * @rx_desc: base address where the RX TLVs start
904  * Return: void
905  */
906 static inline
907 void dp_rx_mon_update_protocol_flow_tag(struct dp_soc *soc,
908 					struct dp_pdev *dp_pdev,
909 					qdf_nbuf_t msdu, void *rx_desc)
910 {
911 }
912 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG || WLAN_SUPPORT_RX_FLOW_TAG */
913 
914 /*
915  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
916  *			       called during dp rx initialization
917  *			       and at the end of dp_rx_process.
918  *
919  * @soc: core txrx main context
920  * @mac_id: mac_id which is one of 3 mac_ids
921  * @dp_rxdma_srng: dp rxdma circular ring
922  * @rx_desc_pool: Pointer to free Rx descriptor pool
923  * @num_req_buffers: number of buffer to be replenished
924  * @desc_list: list of descs if called from dp_rx_process
925  *	       or NULL during dp rx initialization or out of buffer
926  *	       interrupt.
927  * @tail: tail of descs list
928  * Return: return success or failure
929  */
930 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
931 				 struct dp_srng *dp_rxdma_srng,
932 				 struct rx_desc_pool *rx_desc_pool,
933 				 uint32_t num_req_buffers,
934 				 union dp_rx_desc_list_elem_t **desc_list,
935 				 union dp_rx_desc_list_elem_t **tail);
936 
937 /*
938  * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs
939  *                               called during dp rx initialization
940  *
941  * @soc: core txrx main context
942  * @mac_id: mac_id which is one of 3 mac_ids
943  * @dp_rxdma_srng: dp rxdma circular ring
944  * @rx_desc_pool: Pointer to free Rx descriptor pool
945  * @num_req_buffers: number of buffer to be replenished
946  *
947  * Return: return success or failure
948  */
949 QDF_STATUS
950 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
951 			  struct dp_srng *dp_rxdma_srng,
952 			  struct rx_desc_pool *rx_desc_pool,
953 			  uint32_t num_req_buffers);
954 
955 /**
956  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
957  *			      (WBM), following error handling
958  *
959  * @soc: core DP main context
960  * @buf_addr_info: opaque pointer to the REO error ring descriptor
961  * @buf_addr_info: void pointer to the buffer_addr_info
962  * @bm_action: put to idle_list or release to msdu_list
963  *
964  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
965  */
966 QDF_STATUS
967 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
968 		       uint8_t bm_action);
969 
970 /**
971  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
972  *					(WBM) by address
973  *
974  * @soc: core DP main context
975  * @link_desc_addr: link descriptor addr
976  *
977  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
978  */
979 QDF_STATUS
980 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
981 			       hal_buff_addrinfo_t link_desc_addr,
982 			       uint8_t bm_action);
983 
984 /**
985  * dp_rxdma_err_process() - RxDMA error processing functionality
986  * @soc: core txrx main contex
987  * @mac_id: mac id which is one of 3 mac_ids
988  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
989  * @quota: No. of units (packets) that can be serviced in one shot.
990  *
991  * Return: num of buffers processed
992  */
993 uint32_t
994 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
995 		     uint32_t mac_id, uint32_t quota);
996 
997 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
998 				uint8_t *rx_tlv_hdr, struct dp_peer *peer);
999 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
1000 					uint8_t *rx_tlv_hdr);
1001 
1002 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
1003 			   struct dp_peer *peer);
1004 
1005 qdf_nbuf_t
1006 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev);
1007 
1008 /*
1009  * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
1010  *
1011  * @soc: core txrx main context
1012  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1013  * @ring_desc: opaque pointer to the RX ring descriptor
1014  * @rx_desc: host rs descriptor
1015  *
1016  * Return: void
1017  */
1018 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
1019 				hal_ring_handle_t hal_ring_hdl,
1020 				hal_ring_desc_t ring_desc,
1021 				struct dp_rx_desc *rx_desc);
1022 
1023 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
1024 #ifdef RX_DESC_DEBUG_CHECK
1025 /**
1026  * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc
1027  * @rx_desc: rx descriptor pointer
1028  *
1029  * Return: true, if magic is correct, else false.
1030  */
1031 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1032 {
1033 	if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC))
1034 		return false;
1035 
1036 	rx_desc->magic = 0;
1037 	return true;
1038 }
1039 
1040 /**
1041  * dp_rx_desc_prep() - prepare rx desc
1042  * @rx_desc: rx descriptor pointer to be prepared
1043  * @nbuf: nbuf to be associated with rx_desc
1044  *
1045  * Note: assumption is that we are associating a nbuf which is mapped
1046  *
1047  * Return: none
1048  */
1049 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
1050 {
1051 	rx_desc->magic = DP_RX_DESC_MAGIC;
1052 	rx_desc->nbuf = nbuf;
1053 	rx_desc->unmapped = 0;
1054 }
1055 
1056 #else
1057 
1058 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1059 {
1060 	return true;
1061 }
1062 
1063 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
1064 {
1065 	rx_desc->nbuf = nbuf;
1066 	rx_desc->unmapped = 0;
1067 }
1068 #endif /* RX_DESC_DEBUG_CHECK */
1069 
1070 void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1071 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer,
1072 			     uint8_t err_code, uint8_t mac_id);
1073 
1074 #ifdef PEER_CACHE_RX_PKTS
1075 /**
1076  * dp_rx_flush_rx_cached() - flush cached rx frames
1077  * @peer: peer
1078  * @drop: set flag to drop frames
1079  *
1080  * Return: None
1081  */
1082 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
1083 #else
1084 static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
1085 {
1086 }
1087 #endif
1088 
1089 #ifndef QCA_MULTIPASS_SUPPORT
1090 static inline
1091 bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, uint8_t tid)
1092 {
1093 	return false;
1094 }
1095 #else
1096 bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf,
1097 			     uint8_t tid);
1098 #endif
1099 
1100 #endif /* _DP_RX_H */
1101