xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx.h (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _DP_RX_H
20 #define _DP_RX_H
21 
22 #include "hal_rx.h"
23 #include "dp_tx.h"
24 #include "dp_peer.h"
25 #include "dp_internal.h"
26 
27 #ifdef RXDMA_OPTIMIZATION
28 #ifdef NO_RX_PKT_HDR_TLV
29 #define RX_BUFFER_ALIGNMENT     0
30 #else
31 #define RX_BUFFER_ALIGNMENT     128
32 #endif /* NO_RX_PKT_HDR_TLV */
33 #else /* RXDMA_OPTIMIZATION */
34 #define RX_BUFFER_ALIGNMENT     4
35 #endif /* RXDMA_OPTIMIZATION */
36 
37 #ifdef QCA_HOST2FW_RXBUF_RING
38 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW1_BM
39 #else
40 #define DP_WBM2SW_RBM HAL_RX_BUF_RBM_SW3_BM
41 #endif /* QCA_HOST2FW_RXBUF_RING */
42 
43 #define RX_BUFFER_RESERVATION   0
44 
45 #define DP_PEER_METADATA_PEER_ID_MASK	0x0000ffff
46 #define DP_PEER_METADATA_PEER_ID_SHIFT	0
47 #define DP_PEER_METADATA_VDEV_ID_MASK	0x00070000
48 #define DP_PEER_METADATA_VDEV_ID_SHIFT	16
49 
50 #define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata)		\
51 	(((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK)	\
52 			>> DP_PEER_METADATA_PEER_ID_SHIFT)
53 
54 #define DP_PEER_METADATA_ID_GET(_peer_metadata)			\
55 	(((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK)	\
56 			>> DP_PEER_METADATA_VDEV_ID_SHIFT)
57 
58 #define DP_RX_DESC_MAGIC 0xdec0de
59 
60 /**
61  * struct dp_rx_desc
62  *
63  * @nbuf		: VA of the "skb" posted
64  * @rx_buf_start	: VA of the original Rx buffer, before
65  *			  movement of any skb->data pointer
66  * @cookie		: index into the sw array which holds
67  *			  the sw Rx descriptors
68  *			  Cookie space is 21 bits:
69  *			  lower 18 bits -- index
70  *			  upper  3 bits -- pool_id
71  * @pool_id		: pool Id for which this allocated.
72  *			  Can only be used if there is no flow
73  *			  steering
74  * @in_use		  rx_desc is in use
75  * @unmapped		  used to mark rx_desc an unmapped if the corresponding
76  *			  nbuf is already unmapped
77  */
78 struct dp_rx_desc {
79 	qdf_nbuf_t nbuf;
80 	uint8_t *rx_buf_start;
81 	uint32_t cookie;
82 	uint8_t	 pool_id;
83 #ifdef RX_DESC_DEBUG_CHECK
84 	uint32_t magic;
85 #endif
86 	uint8_t	in_use:1,
87 	unmapped:1;
88 };
89 
90 /* RX Descriptor Multi Page memory alloc related */
91 #define DP_RX_DESC_OFFSET_NUM_BITS 8
92 #define DP_RX_DESC_PAGE_ID_NUM_BITS 8
93 #define DP_RX_DESC_POOL_ID_NUM_BITS 4
94 
95 #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS
96 #define DP_RX_DESC_POOL_ID_SHIFT \
97 		(DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS)
98 #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \
99 	(((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT)
100 #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK	\
101 			(((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \
102 			 DP_RX_DESC_PAGE_ID_SHIFT)
103 #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \
104 			((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1)
105 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie)		\
106 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >>	\
107 			DP_RX_DESC_POOL_ID_SHIFT)
108 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie)		\
109 	(((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >>	\
110 			DP_RX_DESC_PAGE_ID_SHIFT)
111 #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie)		\
112 	((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK)
113 
114 #define RX_DESC_COOKIE_INDEX_SHIFT		0
115 #define RX_DESC_COOKIE_INDEX_MASK		0x3ffff /* 18 bits */
116 #define RX_DESC_COOKIE_POOL_ID_SHIFT		18
117 #define RX_DESC_COOKIE_POOL_ID_MASK		0x1c0000
118 
119 #define DP_RX_DESC_COOKIE_MAX	\
120 	(RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK)
121 
122 #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie)		\
123 	(((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >>	\
124 			RX_DESC_COOKIE_POOL_ID_SHIFT)
125 
126 #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie)		\
127 	(((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >>	\
128 			RX_DESC_COOKIE_INDEX_SHIFT)
129 
130 /* DOC: Offset to obtain LLC hdr
131  *
132  * In the case of Wifi parse error
133  * to reach LLC header from beginning
134  * of VLAN tag we need to skip 8 bytes.
135  * Vlan_tag(4)+length(2)+length added
136  * by HW(2) = 8 bytes.
137  */
138 #define DP_SKIP_VLAN		8
139 
140 /**
141  * struct dp_rx_cached_buf - rx cached buffer
142  * @list: linked list node
143  * @buf: skb buffer
144  */
145 struct dp_rx_cached_buf {
146 	qdf_list_node_t node;
147 	qdf_nbuf_t buf;
148 };
149 
150 /*
151  *dp_rx_xor_block() - xor block of data
152  *@b: destination data block
153  *@a: source data block
154  *@len: length of the data to process
155  *
156  *Returns: None
157  */
158 static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
159 {
160 	qdf_size_t i;
161 
162 	for (i = 0; i < len; i++)
163 		b[i] ^= a[i];
164 }
165 
166 /*
167  *dp_rx_rotl() - rotate the bits left
168  *@val: unsigned integer input value
169  *@bits: number of bits
170  *
171  *Returns: Integer with left rotated by number of 'bits'
172  */
173 static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
174 {
175 	return (val << bits) | (val >> (32 - bits));
176 }
177 
178 /*
179  *dp_rx_rotr() - rotate the bits right
180  *@val: unsigned integer input value
181  *@bits: number of bits
182  *
183  *Returns: Integer with right rotated by number of 'bits'
184  */
185 static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
186 {
187 	return (val >> bits) | (val << (32 - bits));
188 }
189 
190 /*
191  * dp_set_rx_queue() - set queue_mapping in skb
192  * @nbuf: skb
193  * @queue_id: rx queue_id
194  *
195  * Return: void
196  */
197 #ifdef QCA_OL_RX_MULTIQ_SUPPORT
198 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
199 {
200 	qdf_nbuf_record_rx_queue(nbuf, queue_id);
201 	return;
202 }
203 #else
204 static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
205 {
206 }
207 #endif
208 
209 /*
210  *dp_rx_xswap() - swap the bits left
211  *@val: unsigned integer input value
212  *
213  *Returns: Integer with bits swapped
214  */
215 static inline uint32_t dp_rx_xswap(uint32_t val)
216 {
217 	return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
218 }
219 
220 /*
221  *dp_rx_get_le32_split() - get little endian 32 bits split
222  *@b0: byte 0
223  *@b1: byte 1
224  *@b2: byte 2
225  *@b3: byte 3
226  *
227  *Returns: Integer with split little endian 32 bits
228  */
229 static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
230 					uint8_t b3)
231 {
232 	return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
233 }
234 
235 /*
236  *dp_rx_get_le32() - get little endian 32 bits
237  *@b0: byte 0
238  *@b1: byte 1
239  *@b2: byte 2
240  *@b3: byte 3
241  *
242  *Returns: Integer with little endian 32 bits
243  */
244 static inline uint32_t dp_rx_get_le32(const uint8_t *p)
245 {
246 	return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
247 }
248 
249 /*
250  * dp_rx_put_le32() - put little endian 32 bits
251  * @p: destination char array
252  * @v: source 32-bit integer
253  *
254  * Returns: None
255  */
256 static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
257 {
258 	p[0] = (v) & 0xff;
259 	p[1] = (v >> 8) & 0xff;
260 	p[2] = (v >> 16) & 0xff;
261 	p[3] = (v >> 24) & 0xff;
262 }
263 
264 /* Extract michal mic block of data */
265 #define dp_rx_michael_block(l, r)	\
266 	do {					\
267 		r ^= dp_rx_rotl(l, 17);	\
268 		l += r;				\
269 		r ^= dp_rx_xswap(l);		\
270 		l += r;				\
271 		r ^= dp_rx_rotl(l, 3);	\
272 		l += r;				\
273 		r ^= dp_rx_rotr(l, 2);	\
274 		l += r;				\
275 	} while (0)
276 
277 /**
278  * struct dp_rx_desc_list_elem_t
279  *
280  * @next		: Next pointer to form free list
281  * @rx_desc		: DP Rx descriptor
282  */
283 union dp_rx_desc_list_elem_t {
284 	union dp_rx_desc_list_elem_t *next;
285 	struct dp_rx_desc rx_desc;
286 };
287 
288 #ifdef RX_DESC_MULTI_PAGE_ALLOC
289 /**
290  * dp_rx_desc_find() - find dp rx descriptor from page ID and offset
291  * @page_id: Page ID
292  * @offset: Offset of the descriptor element
293  *
294  * Return: RX descriptor element
295  */
296 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
297 					      struct rx_desc_pool *rx_pool);
298 
299 static inline
300 struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc,
301 					      struct rx_desc_pool *pool,
302 					      uint32_t cookie)
303 {
304 	uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
305 	uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
306 	uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
307 	struct rx_desc_pool *rx_desc_pool;
308 	union dp_rx_desc_list_elem_t *rx_desc_elem;
309 
310 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
311 		return NULL;
312 
313 	rx_desc_pool = &pool[pool_id];
314 	rx_desc_elem = (union dp_rx_desc_list_elem_t *)
315 		(rx_desc_pool->desc_pages.cacheable_pages[page_id] +
316 		rx_desc_pool->elem_size * offset);
317 
318 	return &rx_desc_elem->rx_desc;
319 }
320 
321 /**
322  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
323  *			 the Rx descriptor on Rx DMA source ring buffer
324  * @soc: core txrx main context
325  * @cookie: cookie used to lookup virtual address
326  *
327  * Return: Pointer to the Rx descriptor
328  */
329 static inline
330 struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc,
331 					       uint32_t cookie)
332 {
333 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie);
334 }
335 
336 /**
337  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
338  *			 the Rx descriptor on monitor ring buffer
339  * @soc: core txrx main context
340  * @cookie: cookie used to lookup virtual address
341  *
342  * Return: Pointer to the Rx descriptor
343  */
344 static inline
345 struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc,
346 					     uint32_t cookie)
347 {
348 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie);
349 }
350 
351 /**
352  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
353  *			 the Rx descriptor on monitor status ring buffer
354  * @soc: core txrx main context
355  * @cookie: cookie used to lookup virtual address
356  *
357  * Return: Pointer to the Rx descriptor
358  */
359 static inline
360 struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc,
361 						uint32_t cookie)
362 {
363 	return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_status[0], cookie);
364 }
365 #else
366 /**
367  * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
368  *			 the Rx descriptor on Rx DMA source ring buffer
369  * @soc: core txrx main context
370  * @cookie: cookie used to lookup virtual address
371  *
372  * Return: void *: Virtual Address of the Rx descriptor
373  */
374 static inline
375 void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
376 {
377 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
378 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
379 	struct rx_desc_pool *rx_desc_pool;
380 
381 	if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
382 		return NULL;
383 
384 	rx_desc_pool = &soc->rx_desc_buf[pool_id];
385 
386 	if (qdf_unlikely(index >= rx_desc_pool->pool_size))
387 		return NULL;
388 
389 	return &(soc->rx_desc_buf[pool_id].array[index].rx_desc);
390 }
391 
392 /**
393  * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
394  *			 the Rx descriptor on monitor ring buffer
395  * @soc: core txrx main context
396  * @cookie: cookie used to lookup virtual address
397  *
398  * Return: void *: Virtual Address of the Rx descriptor
399  */
400 static inline
401 void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
402 {
403 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
404 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
405 	/* TODO */
406 	/* Add sanity for pool_id & index */
407 	return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
408 }
409 
410 /**
411  * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
412  *			 the Rx descriptor on monitor status ring buffer
413  * @soc: core txrx main context
414  * @cookie: cookie used to lookup virtual address
415  *
416  * Return: void *: Virtual Address of the Rx descriptor
417  */
418 static inline
419 void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
420 {
421 	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
422 	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
423 	/* TODO */
424 	/* Add sanity for pool_id & index */
425 	return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
426 }
427 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
428 
429 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
430 				union dp_rx_desc_list_elem_t **local_desc_list,
431 				union dp_rx_desc_list_elem_t **tail,
432 				uint16_t pool_id,
433 				struct rx_desc_pool *rx_desc_pool);
434 
435 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
436 				struct rx_desc_pool *rx_desc_pool,
437 				uint16_t num_descs,
438 				union dp_rx_desc_list_elem_t **desc_list,
439 				union dp_rx_desc_list_elem_t **tail);
440 
441 
442 QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
443 
444 void dp_rx_pdev_detach(struct dp_pdev *pdev);
445 
446 void dp_print_napi_stats(struct dp_soc *soc);
447 
448 /**
449  * dp_rx_vdev_detach() - detach vdev from dp rx
450  * @vdev: virtual device instance
451  *
452  * Return: QDF_STATUS_SUCCESS: success
453  *         QDF_STATUS_E_RESOURCES: Error return
454  */
455 QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev);
456 
457 uint32_t
458 dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
459 	      uint8_t reo_ring_num,
460 	      uint32_t quota);
461 
462 /**
463  * dp_rx_err_process() - Processes error frames routed to REO error ring
464  * @int_ctx: pointer to DP interrupt context
465  * @soc: core txrx main context
466  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
467  * @quota: No. of units (packets) that can be serviced in one shot.
468  *
469  * This function implements error processing and top level demultiplexer
470  * for all the frames routed to REO error ring.
471  *
472  * Return: uint32_t: No. of elements processed
473  */
474 uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
475 			   hal_ring_handle_t hal_ring_hdl, uint32_t quota);
476 
477 /**
478  * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
479  * @int_ctx: pointer to DP interrupt context
480  * @soc: core txrx main context
481  * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
482  * @quota: No. of units (packets) that can be serviced in one shot.
483  *
484  * This function implements error processing and top level demultiplexer
485  * for all the frames routed to WBM2HOST sw release ring.
486  *
487  * Return: uint32_t: No. of elements processed
488  */
489 uint32_t
490 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
491 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota);
492 
493 /**
494  * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
495  *		     multiple nbufs.
496  * @nbuf: pointer to the first msdu of an amsdu.
497  * @rx_tlv_hdr: pointer to the start of RX TLV headers.
498  *
499  * This function implements the creation of RX frag_list for cases
500  * where an MSDU is spread across multiple nbufs.
501  *
502  * Return: returns the head nbuf which contains complete frag_list.
503  */
504 qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr);
505 
506 /*
507  * dp_rx_desc_pool_alloc() - create a pool of software rx_descs
508  *			     at the time of dp rx initialization
509  *
510  * @soc: core txrx main context
511  * @pool_id: pool_id which is one of 3 mac_ids
512  * @pool_size: number of Rx descriptor in the pool
513  * @rx_desc_pool: rx descriptor pool pointer
514  *
515  * Return: QDF status
516  */
517 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
518 				 uint32_t pool_size, struct rx_desc_pool *pool);
519 
520 /*
521  * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during
522  *				     de-initialization of wifi module.
523  *
524  * @soc: core txrx main context
525  * @pool_id: pool_id which is one of 3 mac_ids
526  * @rx_desc_pool: rx descriptor pool pointer
527  *
528  * Return: None
529  */
530 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
531 				   struct rx_desc_pool *rx_desc_pool);
532 
533 /*
534  * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during
535  *			    de-initialization of wifi module.
536  *
537  * @soc: core txrx main context
538  * @pool_id: pool_id which is one of 3 mac_ids
539  * @rx_desc_pool: rx descriptor pool pointer
540  *
541  * Return: None
542  */
543 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
544 			  struct rx_desc_pool *rx_desc_pool);
545 
546 /*
547  * dp_rx_desc_pool_free() - free the sw rx desc array called during
548  *			    de-initialization of wifi module.
549  *
550  * @soc: core txrx main context
551  * @rx_desc_pool: rx descriptor pool pointer
552  *
553  * Return: None
554  */
555 void dp_rx_desc_pool_free(struct dp_soc *soc,
556 			  struct rx_desc_pool *rx_desc_pool);
557 
558 void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
559 				struct dp_peer *peer);
560 
561 /**
562  * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
563  *
564  * @head: pointer to the head of local free list
565  * @tail: pointer to the tail of local free list
566  * @new: new descriptor that is added to the free list
567  *
568  * Return: void:
569  */
570 static inline
571 void dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
572 				 union dp_rx_desc_list_elem_t **tail,
573 				 struct dp_rx_desc *new)
574 {
575 	qdf_assert(head && new);
576 
577 	new->nbuf = NULL;
578 	new->in_use = 0;
579 
580 	((union dp_rx_desc_list_elem_t *)new)->next = *head;
581 	*head = (union dp_rx_desc_list_elem_t *)new;
582 	if (!*tail)
583 		*tail = *head;
584 
585 }
586 
587 uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
588 				   uint8_t mac_id);
589 void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
590 		qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id);
591 void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
592 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer);
593 void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
594 		       uint16_t peer_id, uint8_t tid);
595 
596 
597 #define DP_RX_LIST_APPEND(head, tail, elem) \
598 	do {                                                          \
599 		if (!(head)) {                                        \
600 			(head) = (elem);                              \
601 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\
602 		} else {                                              \
603 			qdf_nbuf_set_next((tail), (elem));            \
604 			QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++;  \
605 		}                                                     \
606 		(tail) = (elem);                                      \
607 		qdf_nbuf_set_next((tail), NULL);                      \
608 	} while (0)
609 
610 /*for qcn9000 emulation the pcie is complete phy and no address restrictions*/
611 #if !defined(BUILD_X86) || defined(QCA_WIFI_QCN9000)
612 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
613 		qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
614 {
615 	return QDF_STATUS_SUCCESS;
616 }
617 #else
618 #define MAX_RETRY 100
619 static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
620 		qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
621 {
622 	uint32_t nbuf_retry = 0;
623 	int32_t ret;
624 	const uint32_t x86_phy_addr = 0x50000000;
625 	/*
626 	 * in M2M emulation platforms (x86) the memory below 0x50000000
627 	 * is reserved for target use, so any memory allocated in this
628 	 * region should not be used by host
629 	 */
630 	do {
631 		if (qdf_likely(*paddr > x86_phy_addr))
632 			return QDF_STATUS_SUCCESS;
633 		else {
634 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
635 					"phy addr %pK exceeded 0x50000000 trying again",
636 					paddr);
637 
638 			nbuf_retry++;
639 			if ((*rx_netbuf)) {
640 				qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
641 						QDF_DMA_FROM_DEVICE);
642 				/* Not freeing buffer intentionally.
643 				 * Observed that same buffer is getting
644 				 * re-allocated resulting in longer load time
645 				 * WMI init timeout.
646 				 * This buffer is anyway not useful so skip it.
647 				 **/
648 			}
649 
650 			*rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
651 							RX_BUFFER_SIZE,
652 							RX_BUFFER_RESERVATION,
653 							RX_BUFFER_ALIGNMENT,
654 							FALSE);
655 
656 			if (qdf_unlikely(!(*rx_netbuf)))
657 				return QDF_STATUS_E_FAILURE;
658 
659 			ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf,
660 							QDF_DMA_FROM_DEVICE);
661 
662 			if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
663 				qdf_nbuf_free(*rx_netbuf);
664 				*rx_netbuf = NULL;
665 				continue;
666 			}
667 
668 			*paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
669 		}
670 	} while (nbuf_retry < MAX_RETRY);
671 
672 	if ((*rx_netbuf)) {
673 		qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
674 					QDF_DMA_FROM_DEVICE);
675 		qdf_nbuf_free(*rx_netbuf);
676 	}
677 
678 	return QDF_STATUS_E_FAILURE;
679 }
680 #endif
681 
682 /**
683  * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
684  *				   the MSDU Link Descriptor
685  * @soc: core txrx main context
686  * @buf_info: buf_info include cookie that used to lookup virtual address of
687  * link descriptor Normally this is just an index into a per SOC array.
688  *
689  * This is the VA of the link descriptor, that HAL layer later uses to
690  * retrieve the list of MSDU's for a given MPDU.
691  *
692  * Return: void *: Virtual Address of the Rx descriptor
693  */
694 static inline
695 void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
696 				  struct hal_buf_info *buf_info)
697 {
698 	void *link_desc_va;
699 	uint32_t bank_id = LINK_DESC_COOKIE_BANK_ID(buf_info->sw_cookie);
700 
701 
702 	/* TODO */
703 	/* Add sanity for  cookie */
704 
705 	link_desc_va = soc->link_desc_banks[bank_id].base_vaddr +
706 		(buf_info->paddr -
707 			soc->link_desc_banks[bank_id].base_paddr);
708 
709 	return link_desc_va;
710 }
711 
712 /**
713  * dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of
714  *				   the MSDU Link Descriptor
715  * @pdev: core txrx pdev context
716  * @buf_info: buf_info includes cookie that used to lookup virtual address of
717  * link descriptor. Normally this is just an index into a per pdev array.
718  *
719  * This is the VA of the link descriptor in monitor mode destination ring,
720  * that HAL layer later uses to retrieve the list of MSDU's for a given MPDU.
721  *
722  * Return: void *: Virtual Address of the Rx descriptor
723  */
724 static inline
725 void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev,
726 				  struct hal_buf_info *buf_info,
727 				  int mac_id)
728 {
729 	void *link_desc_va;
730 	int mac_for_pdev = dp_get_mac_id_for_mac(pdev->soc, mac_id);
731 
732 	/* TODO */
733 	/* Add sanity for  cookie */
734 
735 	link_desc_va =
736 	   pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_vaddr +
737 	   (buf_info->paddr -
738 	   pdev->link_desc_banks[mac_for_pdev][buf_info->sw_cookie].base_paddr);
739 
740 	return link_desc_va;
741 }
742 
743 /**
744  * dp_rx_defrag_concat() - Concatenate the fragments
745  *
746  * @dst: destination pointer to the buffer
747  * @src: source pointer from where the fragment payload is to be copied
748  *
749  * Return: QDF_STATUS
750  */
751 static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
752 {
753 	/*
754 	 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
755 	 * to provide space for src, the headroom portion is copied from
756 	 * the original dst buffer to the larger new dst buffer.
757 	 * (This is needed, because the headroom of the dst buffer
758 	 * contains the rx desc.)
759 	 */
760 	if (!qdf_nbuf_cat(dst, src)) {
761 		/*
762 		 * qdf_nbuf_cat does not free the src memory.
763 		 * Free src nbuf before returning
764 		 * For failure case the caller takes of freeing the nbuf
765 		 */
766 		qdf_nbuf_free(src);
767 		return QDF_STATUS_SUCCESS;
768 	}
769 
770 	return QDF_STATUS_E_DEFRAG_ERROR;
771 }
772 
773 #ifndef FEATURE_WDS
774 static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
775 {
776 	return QDF_STATUS_SUCCESS;
777 }
778 
779 static inline void
780 dp_rx_wds_srcport_learn(struct dp_soc *soc,
781 			uint8_t *rx_tlv_hdr,
782 			struct dp_peer *ta_peer,
783 			qdf_nbuf_t nbuf)
784 {
785 }
786 #endif
787 
788 /*
789  * dp_rx_desc_dump() - dump the sw rx descriptor
790  *
791  * @rx_desc: sw rx descriptor
792  */
793 static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc)
794 {
795 	dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d",
796 		rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id,
797 		rx_desc->in_use, rx_desc->unmapped);
798 }
799 
800 /*
801  * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
802  *					In qwrap mode, packets originated from
803  *					any vdev should not loopback and
804  *					should be dropped.
805  * @vdev: vdev on which rx packet is received
806  * @nbuf: rx pkt
807  *
808  */
809 #if ATH_SUPPORT_WRAP
810 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
811 						qdf_nbuf_t nbuf)
812 {
813 	struct dp_vdev *psta_vdev;
814 	struct dp_pdev *pdev = vdev->pdev;
815 	uint8_t *data = qdf_nbuf_data(nbuf);
816 
817 	if (qdf_unlikely(vdev->proxysta_vdev)) {
818 		/* In qwrap isolation mode, allow loopback packets as all
819 		 * packets go to RootAP and Loopback on the mpsta.
820 		 */
821 		if (vdev->isolation_vdev)
822 			return false;
823 		TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
824 			if (qdf_unlikely(psta_vdev->proxysta_vdev &&
825 					 !qdf_mem_cmp(psta_vdev->mac_addr.raw,
826 						      &data[QDF_MAC_ADDR_SIZE],
827 						      QDF_MAC_ADDR_SIZE))) {
828 				/* Drop packet if source address is equal to
829 				 * any of the vdev addresses.
830 				 */
831 				return true;
832 			}
833 		}
834 	}
835 	return false;
836 }
837 #else
838 static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
839 						qdf_nbuf_t nbuf)
840 {
841 	return false;
842 }
843 #endif
844 
845 #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
846 	defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\
847 	defined(WLAN_SUPPORT_RX_FLOW_TAG)
848 #include "dp_rx_tag.h"
849 #endif
850 
851 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
852 /**
853  * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV
854  *                              and set the corresponding tag in QDF packet
855  * @soc: core txrx main context
856  * @vdev: vdev on which the packet is received
857  * @nbuf: QDF pkt buffer on which the protocol tag should be set
858  * @rx_tlv_hdr: rBbase address where the RX TLVs starts
859  * @ring_index: REO ring number, not used for error & monitor ring
860  * @is_reo_exception: flag to indicate if rx from REO ring or exception ring
861  * @is_update_stats: flag to indicate whether to update stats or not
862  * Return: void
863  */
864 static inline void
865 dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev,
866 			  qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
867 			  uint16_t ring_index,
868 			  bool is_reo_exception, bool is_update_stats)
869 {
870 }
871 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
872 
873 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
874 /**
875  * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV
876  *                           and set the corresponding tag in QDF packet
877  * @soc: core txrx main context
878  * @vdev: vdev on which the packet is received
879  * @nbuf: QDF pkt buffer on which the protocol tag should be set
880  * @rx_tlv_hdr: base address where the RX TLVs starts
881  * @is_update_stats: flag to indicate whether to update stats or not
882  *
883  * Return: void
884  */
885 static inline void
886 dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev,
887 		      qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats)
888 {
889 }
890 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
891 
892 #if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\
893 	!defined(WLAN_SUPPORT_RX_FLOW_TAG)
894 /**
895  * dp_rx_mon_update_protocol_flow_tag() - Performs necessary checks for monitor
896  *                                       mode and then tags appropriate packets
897  * @soc: core txrx main context
898  * @vdev: pdev on which packet is received
899  * @msdu: QDF packet buffer on which the protocol tag should be set
900  * @rx_desc: base address where the RX TLVs start
901  * Return: void
902  */
903 static inline
904 void dp_rx_mon_update_protocol_flow_tag(struct dp_soc *soc,
905 					struct dp_pdev *dp_pdev,
906 					qdf_nbuf_t msdu, void *rx_desc)
907 {
908 }
909 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG || WLAN_SUPPORT_RX_FLOW_TAG */
910 
911 /*
912  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
913  *			       called during dp rx initialization
914  *			       and at the end of dp_rx_process.
915  *
916  * @soc: core txrx main context
917  * @mac_id: mac_id which is one of 3 mac_ids
918  * @dp_rxdma_srng: dp rxdma circular ring
919  * @rx_desc_pool: Pointer to free Rx descriptor pool
920  * @num_req_buffers: number of buffer to be replenished
921  * @desc_list: list of descs if called from dp_rx_process
922  *	       or NULL during dp rx initialization or out of buffer
923  *	       interrupt.
924  * @tail: tail of descs list
925  * Return: return success or failure
926  */
927 QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
928 				 struct dp_srng *dp_rxdma_srng,
929 				 struct rx_desc_pool *rx_desc_pool,
930 				 uint32_t num_req_buffers,
931 				 union dp_rx_desc_list_elem_t **desc_list,
932 				 union dp_rx_desc_list_elem_t **tail);
933 
934 /*
935  * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs
936  *                               called during dp rx initialization
937  *
938  * @soc: core txrx main context
939  * @mac_id: mac_id which is one of 3 mac_ids
940  * @dp_rxdma_srng: dp rxdma circular ring
941  * @rx_desc_pool: Pointer to free Rx descriptor pool
942  * @num_req_buffers: number of buffer to be replenished
943  *
944  * Return: return success or failure
945  */
946 QDF_STATUS
947 dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
948 			  struct dp_srng *dp_rxdma_srng,
949 			  struct rx_desc_pool *rx_desc_pool,
950 			  uint32_t num_req_buffers);
951 
952 /**
953  * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
954  *			      (WBM), following error handling
955  *
956  * @soc: core DP main context
957  * @buf_addr_info: opaque pointer to the REO error ring descriptor
958  * @buf_addr_info: void pointer to the buffer_addr_info
959  * @bm_action: put to idle_list or release to msdu_list
960  *
961  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
962  */
963 QDF_STATUS
964 dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
965 		       uint8_t bm_action);
966 
967 /**
968  * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
969  *					(WBM) by address
970  *
971  * @soc: core DP main context
972  * @link_desc_addr: link descriptor addr
973  *
974  * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
975  */
976 QDF_STATUS
977 dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
978 			       hal_link_desc_t link_desc_addr,
979 			       uint8_t bm_action);
980 
981 /**
982  * dp_rxdma_err_process() - RxDMA error processing functionality
983  * @soc: core txrx main contex
984  * @mac_id: mac id which is one of 3 mac_ids
985  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
986  * @quota: No. of units (packets) that can be serviced in one shot.
987  *
988  * Return: num of buffers processed
989  */
990 uint32_t
991 dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
992 		     uint32_t mac_id, uint32_t quota);
993 
994 void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
995 				uint8_t *rx_tlv_hdr, struct dp_peer *peer);
996 QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
997 					uint8_t *rx_tlv_hdr);
998 
999 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
1000 			   struct dp_peer *peer);
1001 
1002 qdf_nbuf_t
1003 dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev);
1004 
1005 /*
1006  * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
1007  *
1008  * @soc: core txrx main context
1009  * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
1010  * @ring_desc: opaque pointer to the RX ring descriptor
1011  * @rx_desc: host rs descriptor
1012  *
1013  * Return: void
1014  */
1015 void dp_rx_dump_info_and_assert(struct dp_soc *soc,
1016 				hal_ring_handle_t hal_ring_hdl,
1017 				hal_ring_desc_t ring_desc,
1018 				struct dp_rx_desc *rx_desc);
1019 
1020 void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
1021 #ifdef RX_DESC_DEBUG_CHECK
1022 /**
1023  * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc
1024  * @rx_desc: rx descriptor pointer
1025  *
1026  * Return: true, if magic is correct, else false.
1027  */
1028 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1029 {
1030 	if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC))
1031 		return false;
1032 
1033 	rx_desc->magic = 0;
1034 	return true;
1035 }
1036 
1037 /**
1038  * dp_rx_desc_prep() - prepare rx desc
1039  * @rx_desc: rx descriptor pointer to be prepared
1040  * @nbuf: nbuf to be associated with rx_desc
1041  *
1042  * Note: assumption is that we are associating a nbuf which is mapped
1043  *
1044  * Return: none
1045  */
1046 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
1047 {
1048 	rx_desc->magic = DP_RX_DESC_MAGIC;
1049 	rx_desc->nbuf = nbuf;
1050 	rx_desc->unmapped = 0;
1051 }
1052 
1053 #else
1054 
1055 static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
1056 {
1057 	return true;
1058 }
1059 
1060 static inline void dp_rx_desc_prep(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
1061 {
1062 	rx_desc->nbuf = nbuf;
1063 	rx_desc->unmapped = 0;
1064 }
1065 #endif /* RX_DESC_DEBUG_CHECK */
1066 
1067 void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
1068 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer,
1069 			     uint8_t err_code, uint8_t mac_id);
1070 
1071 #ifdef PEER_CACHE_RX_PKTS
1072 /**
1073  * dp_rx_flush_rx_cached() - flush cached rx frames
1074  * @peer: peer
1075  * @drop: set flag to drop frames
1076  *
1077  * Return: None
1078  */
1079 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
1080 #else
1081 static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
1082 {
1083 }
1084 #endif
1085 
1086 #ifndef QCA_MULTIPASS_SUPPORT
1087 static inline
1088 bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf, uint8_t tid)
1089 {
1090 	return false;
1091 }
1092 #else
1093 bool dp_rx_multipass_process(struct dp_peer *peer, qdf_nbuf_t nbuf,
1094 			     uint8_t tid);
1095 #endif
1096 
1097 #endif /* _DP_RX_H */
1098