xref: /wlan-dirver/qcacld-3.0/core/dp/htt/htt_rx.c (revision 99923a8330d524fd331c8d65ac1e713b4b8086f9)
1 /*
2  * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * @file htt_rx.c
21  * @brief Implement receive aspects of HTT.
22  * @details
23  *  This file contains three categories of HTT rx code:
24  *  1.  An abstraction of the rx descriptor, to hide the
25  *      differences between the HL vs. LL rx descriptor.
26  *  2.  Functions for providing access to the (series of)
27  *      rx descriptor(s) and rx frame(s) associated with
28  *      an rx indication message.
29  *  3.  Functions for setting up and using the MAC DMA
30  *      rx ring (applies to LL only).
31  */
32 
33 #include <qdf_mem.h>         /* qdf_mem_malloc,free, etc. */
34 #include <qdf_types.h>          /* qdf_print, bool */
35 #include <qdf_nbuf.h>           /* qdf_nbuf_t, etc. */
36 #include <qdf_timer.h>		/* qdf_timer_free */
37 
38 #include <htt.h>                /* HTT_HL_RX_DESC_SIZE */
39 #include <ol_cfg.h>
40 #include <ol_rx.h>
41 #include <ol_htt_rx_api.h>
42 #include <htt_internal.h>       /* HTT_ASSERT, htt_pdev_t, HTT_RX_BUF_SIZE */
43 #include "regtable.h"
44 
45 #include <cds_ieee80211_common.h>   /* ieee80211_frame, ieee80211_qoscntl */
46 #include <cds_ieee80211_defines.h>  /* ieee80211_rx_status */
47 #include <cds_utils.h>
48 #include <wlan_policy_mgr_api.h>
49 #include "ol_txrx_types.h"
50 #ifdef DEBUG_DMA_DONE
51 #include <asm/barrier.h>
52 #include <wma_api.h>
53 #endif
54 #include <pktlog_ac_fmt.h>
55 
56 /* AR9888v1 WORKAROUND for EV#112367 */
57 /* FIX THIS - remove this WAR when the bug is fixed */
58 #define PEREGRINE_1_0_ZERO_LEN_PHY_ERR_WAR
59 
60 /*--- setup / tear-down functions -------------------------------------------*/
61 
62 #ifndef HTT_RX_RING_SIZE_MIN
63 #define HTT_RX_RING_SIZE_MIN 128        /* slightly > than one large A-MPDU */
64 #endif
65 
66 #ifndef HTT_RX_RING_SIZE_MAX
67 #define HTT_RX_RING_SIZE_MAX 2048       /* ~20 ms @ 1 Gbps of 1500B MSDUs */
68 #endif
69 
70 #ifndef HTT_RX_AVG_FRM_BYTES
71 #define HTT_RX_AVG_FRM_BYTES 1000
72 #endif
73 
74 #ifndef HTT_RX_HOST_LATENCY_MAX_MS
75 #define HTT_RX_HOST_LATENCY_MAX_MS 20 /* ms */	/* very conservative */
76 #endif
77 
78  /* very conservative to ensure enough buffers are allocated */
79 #ifndef HTT_RX_HOST_LATENCY_WORST_LIKELY_MS
80 #ifdef QCA_WIFI_3_0
81 #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 20
82 #else
83 #define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
84 #endif
85 #endif
86 
87 #ifndef HTT_RX_RING_REFILL_RETRY_TIME_MS
88 #define HTT_RX_RING_REFILL_RETRY_TIME_MS    50
89 #endif
90 
91 /*--- RX In Order Definitions ------------------------------------------------*/
92 
93 /* Number of buckets in the hash table */
94 #define RX_NUM_HASH_BUCKETS 1024        /* This should always be a power of 2 */
95 #define RX_NUM_HASH_BUCKETS_MASK (RX_NUM_HASH_BUCKETS - 1)
96 
97 /* Number of hash entries allocated per bucket */
98 #define RX_ENTRIES_SIZE 10
99 
100 #define RX_HASH_FUNCTION(a) (((a >> 14) ^ (a >> 4)) & RX_NUM_HASH_BUCKETS_MASK)
101 
102 #ifdef RX_HASH_DEBUG_LOG
103 #define RX_HASH_LOG(x) x
104 #else
105 #define RX_HASH_LOG(x)          /* no-op */
106 #endif
107 
108 #ifndef CONFIG_HL_SUPPORT
109 /**
110  * htt_get_first_packet_after_wow_wakeup() - get first packet after wow wakeup
111  * @msg_word: pointer to rx indication message word
112  * @buf: pointer to buffer
113  *
114  * Return: None
115  */
116 static void
117 htt_get_first_packet_after_wow_wakeup(uint32_t *msg_word, qdf_nbuf_t buf)
118 {
119 	if (HTT_RX_IN_ORD_PADDR_IND_MSDU_INFO_GET(*msg_word) &
120 			FW_MSDU_INFO_FIRST_WAKEUP_M) {
121 		qdf_nbuf_mark_wakeup_frame(buf);
122 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
123 			  "%s: First packet after WOW Wakeup rcvd", __func__);
124 	}
125 }
126 
127 /* De -initialization function of the rx buffer hash table. This function will
128  *   free up the hash table which includes freeing all the pending rx buffers
129  */
130 static void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
131 {
132 
133 	uint32_t i;
134 	struct htt_rx_hash_entry *hash_entry;
135 	struct htt_rx_hash_bucket **hash_table;
136 	struct htt_list_node *list_iter = NULL;
137 	qdf_mem_info_t mem_map_table = {0};
138 	bool ipa_smmu = false;
139 
140 	if (NULL == pdev->rx_ring.hash_table)
141 		return;
142 
143 	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
144 	    pdev->rx_ring.smmu_map)
145 		ipa_smmu = true;
146 
147 	qdf_spin_lock_bh(&(pdev->rx_ring.rx_hash_lock));
148 	hash_table = pdev->rx_ring.hash_table;
149 	pdev->rx_ring.hash_table = NULL;
150 	qdf_spin_unlock_bh(&(pdev->rx_ring.rx_hash_lock));
151 
152 	for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
153 		/* Free the hash entries in hash bucket i */
154 		list_iter = hash_table[i]->listhead.next;
155 		while (list_iter != &hash_table[i]->listhead) {
156 			hash_entry =
157 				(struct htt_rx_hash_entry *)((char *)list_iter -
158 							     pdev->rx_ring.
159 							     listnode_offset);
160 			if (hash_entry->netbuf) {
161 				if (ipa_smmu) {
162 					qdf_update_mem_map_table(pdev->osdev,
163 						&mem_map_table,
164 						QDF_NBUF_CB_PADDR(
165 							hash_entry->netbuf),
166 						HTT_RX_BUF_SIZE);
167 
168 					cds_smmu_map_unmap(false, 1,
169 							   &mem_map_table);
170 				}
171 #ifdef DEBUG_DMA_DONE
172 				qdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
173 					       QDF_DMA_BIDIRECTIONAL);
174 #else
175 				qdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
176 					       QDF_DMA_FROM_DEVICE);
177 #endif
178 				qdf_nbuf_free(hash_entry->netbuf);
179 				hash_entry->paddr = 0;
180 			}
181 			list_iter = list_iter->next;
182 
183 			if (!hash_entry->fromlist)
184 				qdf_mem_free(hash_entry);
185 		}
186 
187 		qdf_mem_free(hash_table[i]);
188 
189 	}
190 	qdf_mem_free(hash_table);
191 
192 	qdf_spinlock_destroy(&(pdev->rx_ring.rx_hash_lock));
193 }
194 #endif
195 
196 /*
197  * This function is used both below within this file (which the compiler
198  * will hopefully inline), and out-line from other files via the
199  * htt_rx_msdu_first_msdu_flag function pointer.
200  */
201 
202 static inline bool
203 htt_rx_msdu_first_msdu_flag_hl(htt_pdev_handle pdev, void *msdu_desc)
204 {
205 	return ((u_int8_t *)msdu_desc - sizeof(struct hl_htt_rx_ind_base))
206 		[HTT_ENDIAN_BYTE_IDX_SWAP(HTT_RX_IND_HL_FLAG_OFFSET)] &
207 		HTT_RX_IND_HL_FLAG_FIRST_MSDU ? true : false;
208 }
209 
210 u_int16_t
211 htt_rx_msdu_rx_desc_size_hl(
212 	htt_pdev_handle pdev,
213 	void *msdu_desc
214 		)
215 {
216 	return ((u_int8_t *)(msdu_desc) - HTT_RX_IND_HL_BYTES)
217 		[HTT_ENDIAN_BYTE_IDX_SWAP(HTT_RX_IND_HL_RX_DESC_LEN_OFFSET)];
218 }
219 
220 /**
221  * htt_rx_mpdu_desc_retry_hl() - Returns the retry bit from the Rx descriptor
222  *                               for the High Latency driver
223  * @pdev: Handle (pointer) to HTT pdev.
224  * @mpdu_desc: Void pointer to the Rx descriptor for MPDU
225  *             before the beginning of the payload.
226  *
227  *  This function returns the retry bit of the 802.11 header for the
228  *  provided rx MPDU descriptor. For the high latency driver, this function
229  *  pretends as if the retry bit is never set so that the mcast duplicate
230  *  detection never fails.
231  *
232  * Return:        boolean -- false always for HL
233  */
234 static inline bool
235 htt_rx_mpdu_desc_retry_hl(htt_pdev_handle pdev, void *mpdu_desc)
236 {
237 	return false;
238 }
239 
240 #ifdef CONFIG_HL_SUPPORT
241 static uint16_t
242 htt_rx_mpdu_desc_seq_num_hl(htt_pdev_handle pdev, void *mpdu_desc)
243 {
244 	if (pdev->rx_desc_size_hl) {
245 		return pdev->cur_seq_num_hl =
246 			(u_int16_t)(HTT_WORD_GET(*(u_int32_t *)mpdu_desc,
247 						HTT_HL_RX_DESC_MPDU_SEQ_NUM));
248 	} else {
249 		return (u_int16_t)(pdev->cur_seq_num_hl);
250 	}
251 }
252 
253 static void
254 htt_rx_mpdu_desc_pn_hl(
255 	htt_pdev_handle pdev,
256 	void *mpdu_desc,
257 	union htt_rx_pn_t *pn,
258 	int pn_len_bits)
259 {
260 	if (htt_rx_msdu_first_msdu_flag_hl(pdev, mpdu_desc) == true) {
261 		/* Fix Me: only for little endian */
262 		struct hl_htt_rx_desc_base *rx_desc =
263 			(struct hl_htt_rx_desc_base *)mpdu_desc;
264 		u_int32_t *word_ptr = (u_int32_t *)pn->pn128;
265 
266 		/* TODO: for Host of big endian */
267 		switch (pn_len_bits) {
268 		case 128:
269 			/* bits 128:64 */
270 			*(word_ptr + 3) = rx_desc->pn_127_96;
271 			/* bits 63:0 */
272 			*(word_ptr + 2) = rx_desc->pn_95_64;
273 		case 48:
274 			/* bits 48:0
275 			 * copy 64 bits
276 			 */
277 			*(word_ptr + 1) = rx_desc->u0.pn_63_32;
278 		case 24:
279 			/* bits 23:0
280 			 * copy 32 bits
281 			 */
282 			*(word_ptr + 0) = rx_desc->pn_31_0;
283 			break;
284 		default:
285 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
286 				  "Error: invalid length spec (%d bits) for PN",
287 				  pn_len_bits);
288 			qdf_assert(0);
289 			break;
290 		};
291 	} else {
292 		/* not first msdu, no pn info */
293 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
294 			  "Error: get pn from a not-first msdu.");
295 		qdf_assert(0);
296 	}
297 }
298 #endif
299 
300 /**
301  * htt_rx_mpdu_desc_tid_hl() - Returns the TID value from the Rx descriptor
302  *                             for High Latency driver
303  * @pdev:                        Handle (pointer) to HTT pdev.
304  * @mpdu_desc:                   Void pointer to the Rx descriptor for the MPDU
305  *                               before the beginning of the payload.
306  *
307  * This function returns the TID set in the 802.11 QoS Control for the MPDU
308  * in the packet header, by looking at the mpdu_start of the Rx descriptor.
309  * Rx descriptor gets a copy of the TID from the MAC.
310  * For the HL driver, this is currently uimplemented and always returns
311  * an invalid tid. It is the responsibility of the caller to make
312  * sure that return value is checked for valid range.
313  *
314  * Return:        Invalid TID value (0xff) for HL driver.
315  */
316 static inline uint8_t
317 htt_rx_mpdu_desc_tid_hl(htt_pdev_handle pdev, void *mpdu_desc)
318 {
319 	return 0xff;  /* Invalid TID */
320 }
321 
322 static inline bool
323 htt_rx_msdu_desc_completes_mpdu_hl(htt_pdev_handle pdev, void *msdu_desc)
324 {
325 	return (
326 		((u_int8_t *)(msdu_desc) - sizeof(struct hl_htt_rx_ind_base))
327 		[HTT_ENDIAN_BYTE_IDX_SWAP(HTT_RX_IND_HL_FLAG_OFFSET)]
328 		& HTT_RX_IND_HL_FLAG_LAST_MSDU)
329 		? true : false;
330 }
331 
332 static inline int
333 htt_rx_msdu_has_wlan_mcast_flag_hl(htt_pdev_handle pdev, void *msdu_desc)
334 {
335 	/* currently, only first msdu has hl rx_desc */
336 	return htt_rx_msdu_first_msdu_flag_hl(pdev, msdu_desc) == true;
337 }
338 
339 static inline bool
340 htt_rx_msdu_is_wlan_mcast_hl(htt_pdev_handle pdev, void *msdu_desc)
341 {
342 	struct hl_htt_rx_desc_base *rx_desc =
343 		(struct hl_htt_rx_desc_base *)msdu_desc;
344 
345 	return
346 		HTT_WORD_GET(*(u_int32_t *)rx_desc, HTT_HL_RX_DESC_MCAST_BCAST);
347 }
348 
349 static inline int
350 htt_rx_msdu_is_frag_hl(htt_pdev_handle pdev, void *msdu_desc)
351 {
352 	struct hl_htt_rx_desc_base *rx_desc =
353 		(struct hl_htt_rx_desc_base *)msdu_desc;
354 
355 	return
356 		HTT_WORD_GET(*(u_int32_t *)rx_desc, HTT_HL_RX_DESC_MCAST_BCAST);
357 }
358 
359 #ifdef ENABLE_DEBUG_ADDRESS_MARKING
360 static qdf_dma_addr_t
361 htt_rx_paddr_mark_high_bits(qdf_dma_addr_t paddr)
362 {
363 	if (sizeof(qdf_dma_addr_t) > 4) {
364 		/* clear high bits, leave lower 37 bits (paddr) */
365 		paddr &= 0x01FFFFFFFFF;
366 		/* mark upper 16 bits of paddr */
367 		paddr |= (((uint64_t)RX_PADDR_MAGIC_PATTERN) << 32);
368 	}
369 	return paddr;
370 }
371 #else
372 static qdf_dma_addr_t
373 htt_rx_paddr_mark_high_bits(qdf_dma_addr_t paddr)
374 {
375 	return paddr;
376 }
377 #endif
378 
379 #ifndef CONFIG_HL_SUPPORT
380 static bool
381 htt_rx_msdu_first_msdu_flag_ll(htt_pdev_handle pdev, void *msdu_desc)
382 {
383 	struct htt_host_rx_desc_base *rx_desc =
384 		(struct htt_host_rx_desc_base *)msdu_desc;
385 	return (bool)
386 		(((*(((uint32_t *)&rx_desc->msdu_end) + 4)) &
387 		  RX_MSDU_END_4_FIRST_MSDU_MASK) >>
388 		 RX_MSDU_END_4_FIRST_MSDU_LSB);
389 }
390 
391 #endif /* CONFIG_HL_SUPPORT*/
392 
393 /* full_reorder_offload case: this function is called with lock held */
394 static int htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
395 {
396 	int idx;
397 	QDF_STATUS status;
398 	struct htt_host_rx_desc_base *rx_desc;
399 	int filled = 0;
400 	int debt_served = 0;
401 	qdf_mem_info_t mem_map_table = {0};
402 	bool ipa_smmu = false;
403 
404 	idx = *(pdev->rx_ring.alloc_idx.vaddr);
405 
406 	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
407 	    pdev->rx_ring.smmu_map)
408 		ipa_smmu = true;
409 
410 	if ((idx < 0) || (idx > pdev->rx_ring.size_mask) ||
411 	    (num > pdev->rx_ring.size))  {
412 		QDF_TRACE(QDF_MODULE_ID_HTT,
413 			  QDF_TRACE_LEVEL_ERROR,
414 			  "%s:rx refill failed!", __func__);
415 		return filled;
416 	}
417 
418 moretofill:
419 	while (num > 0) {
420 		qdf_dma_addr_t paddr, paddr_marked;
421 		qdf_nbuf_t rx_netbuf;
422 		int headroom;
423 
424 		rx_netbuf =
425 			qdf_nbuf_alloc(pdev->osdev, HTT_RX_BUF_SIZE,
426 				       0, 4, false);
427 		if (!rx_netbuf) {
428 			qdf_timer_stop(&pdev->rx_ring.
429 						 refill_retry_timer);
430 			/*
431 			 * Failed to fill it to the desired level -
432 			 * we'll start a timer and try again next time.
433 			 * As long as enough buffers are left in the ring for
434 			 * another A-MPDU rx, no special recovery is needed.
435 			 */
436 #ifdef DEBUG_DMA_DONE
437 			pdev->rx_ring.dbg_refill_cnt++;
438 #endif
439 			pdev->refill_retry_timer_starts++;
440 			qdf_timer_start(
441 				&pdev->rx_ring.refill_retry_timer,
442 				HTT_RX_RING_REFILL_RETRY_TIME_MS);
443 			goto update_alloc_idx;
444 		}
445 
446 		/* Clear rx_desc attention word before posting to Rx ring */
447 		rx_desc = htt_rx_desc(rx_netbuf);
448 		*(uint32_t *) &rx_desc->attention = 0;
449 
450 #ifdef DEBUG_DMA_DONE
451 		*(uint32_t *) &rx_desc->msdu_end = 1;
452 
453 #define MAGIC_PATTERN 0xDEADBEEF
454 		*(uint32_t *) &rx_desc->msdu_start = MAGIC_PATTERN;
455 
456 		/*
457 		 * To ensure that attention bit is reset and msdu_end is set
458 		 * before calling dma_map
459 		 */
460 		smp_mb();
461 #endif
462 		/*
463 		 * Adjust qdf_nbuf_data to point to the location in the buffer
464 		 * where the rx descriptor will be filled in.
465 		 */
466 		headroom = qdf_nbuf_data(rx_netbuf) - (uint8_t *) rx_desc;
467 		qdf_nbuf_push_head(rx_netbuf, headroom);
468 
469 #ifdef DEBUG_DMA_DONE
470 		status =
471 			qdf_nbuf_map(pdev->osdev, rx_netbuf,
472 						QDF_DMA_BIDIRECTIONAL);
473 #else
474 		status =
475 			qdf_nbuf_map(pdev->osdev, rx_netbuf,
476 						QDF_DMA_FROM_DEVICE);
477 #endif
478 		if (status != QDF_STATUS_SUCCESS) {
479 			qdf_nbuf_free(rx_netbuf);
480 			goto update_alloc_idx;
481 		}
482 
483 		paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
484 		paddr_marked = htt_rx_paddr_mark_high_bits(paddr);
485 		if (pdev->cfg.is_full_reorder_offload) {
486 			if (qdf_unlikely(htt_rx_hash_list_insert(
487 					pdev, paddr_marked, rx_netbuf))) {
488 				QDF_TRACE(QDF_MODULE_ID_HTT,
489 					  QDF_TRACE_LEVEL_ERROR,
490 					  "%s: hash insert failed!", __func__);
491 #ifdef DEBUG_DMA_DONE
492 				qdf_nbuf_unmap(pdev->osdev, rx_netbuf,
493 					       QDF_DMA_BIDIRECTIONAL);
494 #else
495 				qdf_nbuf_unmap(pdev->osdev, rx_netbuf,
496 					       QDF_DMA_FROM_DEVICE);
497 #endif
498 				qdf_nbuf_free(rx_netbuf);
499 				goto update_alloc_idx;
500 			}
501 			htt_rx_dbg_rxbuf_set(pdev, paddr_marked, rx_netbuf);
502 		} else {
503 			pdev->rx_ring.buf.netbufs_ring[idx] = rx_netbuf;
504 		}
505 
506 		if (ipa_smmu) {
507 			qdf_update_mem_map_table(pdev->osdev, &mem_map_table,
508 						 paddr, HTT_RX_BUF_SIZE);
509 			cds_smmu_map_unmap(true, 1, &mem_map_table);
510 		}
511 
512 		pdev->rx_ring.buf.paddrs_ring[idx] = paddr_marked;
513 		pdev->rx_ring.fill_cnt++;
514 
515 		num--;
516 		idx++;
517 		filled++;
518 		idx &= pdev->rx_ring.size_mask;
519 	}
520 
521 	if (debt_served <  qdf_atomic_read(&pdev->rx_ring.refill_debt)) {
522 		num = qdf_atomic_read(&pdev->rx_ring.refill_debt);
523 		debt_served += num;
524 		goto moretofill;
525 	}
526 
527 update_alloc_idx:
528 	/*
529 	 * Make sure alloc index write is reflected correctly before FW polls
530 	 * remote ring write index as compiler can reorder the instructions
531 	 * based on optimizations.
532 	 */
533 	qdf_mb();
534 	*(pdev->rx_ring.alloc_idx.vaddr) = idx;
535 	htt_rx_dbg_rxbuf_indupd(pdev, idx);
536 
537 	return filled;
538 }
539 
540 #ifndef CONFIG_HL_SUPPORT
541 static int htt_rx_ring_size(struct htt_pdev_t *pdev)
542 {
543 	int size;
544 
545 	/*
546 	 * It is expected that the host CPU will typically be able to service
547 	 * the rx indication from one A-MPDU before the rx indication from
548 	 * the subsequent A-MPDU happens, roughly 1-2 ms later.
549 	 * However, the rx ring should be sized very conservatively, to
550 	 * accommodate the worst reasonable delay before the host CPU services
551 	 * a rx indication interrupt.
552 	 * The rx ring need not be kept full of empty buffers.  In theory,
553 	 * the htt host SW can dynamically track the low-water mark in the
554 	 * rx ring, and dynamically adjust the level to which the rx ring
555 	 * is filled with empty buffers, to dynamically meet the desired
556 	 * low-water mark.
557 	 * In contrast, it's difficult to resize the rx ring itself, once
558 	 * it's in use.
559 	 * Thus, the ring itself should be sized very conservatively, while
560 	 * the degree to which the ring is filled with empty buffers should
561 	 * be sized moderately conservatively.
562 	 */
563 	size =
564 		ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
565 		1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */  /
566 		(8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
567 
568 	if (size < HTT_RX_RING_SIZE_MIN)
569 		size = HTT_RX_RING_SIZE_MIN;
570 	else if (size > HTT_RX_RING_SIZE_MAX)
571 		size = HTT_RX_RING_SIZE_MAX;
572 
573 	size = qdf_get_pwr2(size);
574 	return size;
575 }
576 
577 static int htt_rx_ring_fill_level(struct htt_pdev_t *pdev)
578 {
579 	int size;
580 
581 	size = ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
582 		1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */  /
583 		(8 * HTT_RX_AVG_FRM_BYTES) *
584 		HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
585 
586 	size = qdf_get_pwr2(size);
587 	/*
588 	 * Make sure the fill level is at least 1 less than the ring size.
589 	 * Leaving 1 element empty allows the SW to easily distinguish
590 	 * between a full ring vs. an empty ring.
591 	 */
592 	if (size >= pdev->rx_ring.size)
593 		size = pdev->rx_ring.size - 1;
594 
595 	return size;
596 }
597 
598 static void htt_rx_ring_refill_retry(void *arg)
599 {
600 	htt_pdev_handle pdev = (htt_pdev_handle) arg;
601 	int             filled = 0;
602 	int             num;
603 
604 	pdev->refill_retry_timer_calls++;
605 	qdf_spin_lock_bh(&(pdev->rx_ring.refill_lock));
606 
607 	num = qdf_atomic_read(&pdev->rx_ring.refill_debt);
608 	qdf_atomic_sub(num, &pdev->rx_ring.refill_debt);
609 	filled = htt_rx_ring_fill_n(pdev, num);
610 
611 	if (filled > num) {
612 		/* we served ourselves and some other debt */
613 		/* sub is safer than  = 0 */
614 		qdf_atomic_sub(filled - num, &pdev->rx_ring.refill_debt);
615 	} else if (num == filled) { /* nothing to be done */
616 	} else {
617 		qdf_atomic_add(num - filled, &pdev->rx_ring.refill_debt);
618 		/* we could not fill all, timer must have been started */
619 		pdev->refill_retry_timer_doubles++;
620 	}
621 	qdf_spin_unlock_bh(&(pdev->rx_ring.refill_lock));
622 }
623 #endif
624 
625 static inline unsigned int htt_rx_ring_elems(struct htt_pdev_t *pdev)
626 {
627 	return
628 		(*pdev->rx_ring.alloc_idx.vaddr -
629 		 pdev->rx_ring.sw_rd_idx.msdu_payld) & pdev->rx_ring.size_mask;
630 }
631 
632 static inline unsigned int htt_rx_in_order_ring_elems(struct htt_pdev_t *pdev)
633 {
634 	return
635 		(*pdev->rx_ring.alloc_idx.vaddr -
636 		 *pdev->rx_ring.target_idx.vaddr) &
637 		pdev->rx_ring.size_mask;
638 }
639 
640 #ifndef CONFIG_HL_SUPPORT
641 
642 void htt_rx_detach(struct htt_pdev_t *pdev)
643 {
644 	bool ipa_smmu = false;
645 	qdf_timer_stop(&pdev->rx_ring.refill_retry_timer);
646 	qdf_timer_free(&pdev->rx_ring.refill_retry_timer);
647 	htt_rx_dbg_rxbuf_deinit(pdev);
648 
649 	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
650 	    pdev->rx_ring.smmu_map)
651 		ipa_smmu = true;
652 
653 	if (pdev->cfg.is_full_reorder_offload) {
654 		qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
655 					   sizeof(uint32_t),
656 					   pdev->rx_ring.target_idx.vaddr,
657 					   pdev->rx_ring.target_idx.paddr,
658 					   qdf_get_dma_mem_context((&pdev->
659 								    rx_ring.
660 								    target_idx),
661 								   memctx));
662 		htt_rx_hash_deinit(pdev);
663 	} else {
664 		int sw_rd_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
665 		qdf_mem_info_t mem_map_table = {0};
666 
667 		while (sw_rd_idx != *(pdev->rx_ring.alloc_idx.vaddr)) {
668 			if (ipa_smmu) {
669 				qdf_update_mem_map_table(pdev->osdev,
670 					&mem_map_table,
671 					QDF_NBUF_CB_PADDR(
672 						pdev->rx_ring.buf.
673 						netbufs_ring[sw_rd_idx]),
674 					HTT_RX_BUF_SIZE);
675 				cds_smmu_map_unmap(false, 1,
676 						   &mem_map_table);
677 			}
678 #ifdef DEBUG_DMA_DONE
679 			qdf_nbuf_unmap(pdev->osdev,
680 				       pdev->rx_ring.buf.
681 				       netbufs_ring[sw_rd_idx],
682 				       QDF_DMA_BIDIRECTIONAL);
683 #else
684 			qdf_nbuf_unmap(pdev->osdev,
685 				       pdev->rx_ring.buf.
686 				       netbufs_ring[sw_rd_idx],
687 				       QDF_DMA_FROM_DEVICE);
688 #endif
689 			qdf_nbuf_free(pdev->rx_ring.buf.
690 				      netbufs_ring[sw_rd_idx]);
691 			sw_rd_idx++;
692 			sw_rd_idx &= pdev->rx_ring.size_mask;
693 		}
694 		qdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
695 
696 	}
697 
698 	qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
699 				   sizeof(uint32_t),
700 				   pdev->rx_ring.alloc_idx.vaddr,
701 				   pdev->rx_ring.alloc_idx.paddr,
702 				   qdf_get_dma_mem_context((&pdev->rx_ring.
703 							    alloc_idx),
704 							   memctx));
705 
706 	qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
707 				   pdev->rx_ring.size * sizeof(target_paddr_t),
708 				   pdev->rx_ring.buf.paddrs_ring,
709 				   pdev->rx_ring.base_paddr,
710 				   qdf_get_dma_mem_context((&pdev->rx_ring.buf),
711 							   memctx));
712 
713 	/* destroy the rx-parallelization refill spinlock */
714 	qdf_spinlock_destroy(&(pdev->rx_ring.refill_lock));
715 }
716 #endif
717 
718 /**
719  * htt_rx_mpdu_wifi_hdr_retrieve() - retrieve 802.11 header
720  * @pdev - pdev handle
721  * @mpdu_desc - mpdu descriptor
722  *
723  * Return : pointer to 802.11 header
724  */
725 char *htt_rx_mpdu_wifi_hdr_retrieve(htt_pdev_handle pdev, void *mpdu_desc)
726 {
727 	struct htt_host_rx_desc_base *rx_desc =
728 		(struct htt_host_rx_desc_base *)mpdu_desc;
729 
730 	if (!rx_desc)
731 		return NULL;
732 	else
733 		return rx_desc->rx_hdr_status;
734 }
735 
736 /**
737  * htt_rx_mpdu_desc_tsf32() - Return the TSF timestamp indicating when
738  *                            a MPDU was received.
739  * @pdev - the HTT instance the rx data was received on
740  * @mpdu_desc - the abstract descriptor for the MPDU in question
741  *
742  * return : 32 LSBs of TSF time at which the MPDU's PPDU was received
743  */
744 uint32_t htt_rx_mpdu_desc_tsf32(htt_pdev_handle pdev, void *mpdu_desc)
745 {
746 	return 0;
747 }
748 
749 /*--- rx descriptor field access functions ----------------------------------*/
750 /*
751  * These functions need to use bit masks and shifts to extract fields
752  * from the rx descriptors, rather than directly using the bitfields.
753  * For example, use
754  *     (desc & FIELD_MASK) >> FIELD_LSB
755  * rather than
756  *     desc.field
757  * This allows the functions to work correctly on either little-endian
758  * machines (no endianness conversion needed) or big-endian machines
759  * (endianness conversion provided automatically by the HW DMA's
760  * byte-swizzling).
761  */
762 /* FIX THIS: APPLIES TO LL ONLY */
763 
764 #ifndef CONFIG_HL_SUPPORT
765 /**
766  * htt_rx_mpdu_desc_retry_ll() - Returns the retry bit from the Rx descriptor
767  *                               for the Low Latency driver
768  * @pdev:                          Handle (pointer) to HTT pdev.
769  * @mpdu_desc:                     Void pointer to the Rx descriptor for MPDU
770  *                                 before the beginning of the payload.
771  *
772  *  This function returns the retry bit of the 802.11 header for the
773  *  provided rx MPDU descriptor.
774  *
775  * Return:        boolean -- true if retry is set, false otherwise
776  */
777 static bool
778 htt_rx_mpdu_desc_retry_ll(htt_pdev_handle pdev, void *mpdu_desc)
779 {
780 	struct htt_host_rx_desc_base *rx_desc =
781 		(struct htt_host_rx_desc_base *) mpdu_desc;
782 
783 	return
784 		(bool)(((*((uint32_t *) &rx_desc->mpdu_start)) &
785 		RX_MPDU_START_0_RETRY_MASK) >>
786 		RX_MPDU_START_0_RETRY_LSB);
787 }
788 
789 static uint16_t htt_rx_mpdu_desc_seq_num_ll(htt_pdev_handle pdev,
790 					    void *mpdu_desc)
791 {
792 	struct htt_host_rx_desc_base *rx_desc =
793 		(struct htt_host_rx_desc_base *)mpdu_desc;
794 
795 	return
796 		(uint16_t) (((*((uint32_t *) &rx_desc->mpdu_start)) &
797 			     RX_MPDU_START_0_SEQ_NUM_MASK) >>
798 			    RX_MPDU_START_0_SEQ_NUM_LSB);
799 }
800 
801 /* FIX THIS: APPLIES TO LL ONLY */
802 static void
803 htt_rx_mpdu_desc_pn_ll(htt_pdev_handle pdev,
804 		       void *mpdu_desc, union htt_rx_pn_t *pn, int pn_len_bits)
805 {
806 	struct htt_host_rx_desc_base *rx_desc =
807 		(struct htt_host_rx_desc_base *)mpdu_desc;
808 
809 	switch (pn_len_bits) {
810 	case 24:
811 		/* bits 23:0 */
812 		pn->pn24 = rx_desc->mpdu_start.pn_31_0 & 0xffffff;
813 		break;
814 	case 48:
815 		/* bits 31:0 */
816 		pn->pn48 = rx_desc->mpdu_start.pn_31_0;
817 		/* bits 47:32 */
818 		pn->pn48 |= ((uint64_t)
819 			     ((*(((uint32_t *) &rx_desc->mpdu_start) + 2))
820 			      & RX_MPDU_START_2_PN_47_32_MASK))
821 			<< (32 - RX_MPDU_START_2_PN_47_32_LSB);
822 		break;
823 	case 128:
824 		/* bits 31:0 */
825 		pn->pn128[0] = rx_desc->mpdu_start.pn_31_0;
826 		/* bits 47:32 */
827 		pn->pn128[0] |=
828 			((uint64_t) ((*(((uint32_t *)&rx_desc->mpdu_start) + 2))
829 				     & RX_MPDU_START_2_PN_47_32_MASK))
830 			<< (32 - RX_MPDU_START_2_PN_47_32_LSB);
831 		/* bits 63:48 */
832 		pn->pn128[0] |=
833 			((uint64_t) ((*(((uint32_t *) &rx_desc->msdu_end) + 2))
834 				     & RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK))
835 			<< (48 - RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB);
836 		/* bits 95:64 */
837 		pn->pn128[1] = rx_desc->msdu_end.ext_wapi_pn_95_64;
838 		/* bits 127:96 */
839 		pn->pn128[1] |=
840 			((uint64_t) rx_desc->msdu_end.ext_wapi_pn_127_96) << 32;
841 		break;
842 	default:
843 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
844 			  "Error: invalid length spec (%d bits) for PN",
845 			  pn_len_bits);
846 	};
847 }
848 
849 /**
850  * htt_rx_mpdu_desc_tid_ll() - Returns the TID value from the Rx descriptor
851  *                             for Low Latency driver
852  * @pdev:                        Handle (pointer) to HTT pdev.
853  * @mpdu_desc:                   Void pointer to the Rx descriptor for the MPDU
854  *                               before the beginning of the payload.
855  *
856  * This function returns the TID set in the 802.11 QoS Control for the MPDU
857  * in the packet header, by looking at the mpdu_start of the Rx descriptor.
858  * Rx descriptor gets a copy of the TID from the MAC.
859  *
860  * Return:        Actual TID set in the packet header.
861  */
862 static uint8_t
863 htt_rx_mpdu_desc_tid_ll(htt_pdev_handle pdev, void *mpdu_desc)
864 {
865 	struct htt_host_rx_desc_base *rx_desc =
866 		(struct htt_host_rx_desc_base *) mpdu_desc;
867 
868 	return
869 		(uint8_t)(((*(((uint32_t *) &rx_desc->mpdu_start) + 2)) &
870 		RX_MPDU_START_2_TID_MASK) >>
871 		RX_MPDU_START_2_TID_LSB);
872 }
873 
874 /* FIX THIS: APPLIES TO LL ONLY */
875 static bool htt_rx_msdu_desc_completes_mpdu_ll(htt_pdev_handle pdev,
876 					       void *msdu_desc)
877 {
878 	struct htt_host_rx_desc_base *rx_desc =
879 		(struct htt_host_rx_desc_base *)msdu_desc;
880 	return (bool)
881 		(((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
882 		  RX_MSDU_END_4_LAST_MSDU_MASK) >> RX_MSDU_END_4_LAST_MSDU_LSB);
883 }
884 
885 /* FIX THIS: APPLIES TO LL ONLY */
886 static int htt_rx_msdu_has_wlan_mcast_flag_ll(htt_pdev_handle pdev,
887 					      void *msdu_desc)
888 {
889 	struct htt_host_rx_desc_base *rx_desc =
890 		(struct htt_host_rx_desc_base *)msdu_desc;
891 	/*
892 	 * HW rx desc: the mcast_bcast flag is only valid
893 	 * if first_msdu is set
894 	 */
895 	return
896 		((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
897 		 RX_MSDU_END_4_FIRST_MSDU_MASK) >> RX_MSDU_END_4_FIRST_MSDU_LSB;
898 }
899 
900 /* FIX THIS: APPLIES TO LL ONLY */
901 static bool htt_rx_msdu_is_wlan_mcast_ll(htt_pdev_handle pdev, void *msdu_desc)
902 {
903 	struct htt_host_rx_desc_base *rx_desc =
904 		(struct htt_host_rx_desc_base *)msdu_desc;
905 	return
906 		((*((uint32_t *) &rx_desc->attention)) &
907 		 RX_ATTENTION_0_MCAST_BCAST_MASK)
908 		>> RX_ATTENTION_0_MCAST_BCAST_LSB;
909 }
910 
911 /* FIX THIS: APPLIES TO LL ONLY */
912 static int htt_rx_msdu_is_frag_ll(htt_pdev_handle pdev, void *msdu_desc)
913 {
914 	struct htt_host_rx_desc_base *rx_desc =
915 		(struct htt_host_rx_desc_base *)msdu_desc;
916 	return
917 		((*((uint32_t *) &rx_desc->attention)) &
918 		 RX_ATTENTION_0_FRAGMENT_MASK) >> RX_ATTENTION_0_FRAGMENT_LSB;
919 }
920 #endif
921 
922 static inline
923 uint8_t htt_rx_msdu_fw_desc_get(htt_pdev_handle pdev, void *msdu_desc)
924 {
925 	/*
926 	 * HL and LL use the same format for FW rx desc, but have the FW rx desc
927 	 * in different locations.
928 	 * In LL, the FW rx descriptor has been copied into the same
929 	 * htt_host_rx_desc_base struct that holds the HW rx desc.
930 	 * In HL, the FW rx descriptor, along with the MSDU payload,
931 	 * is in the same buffer as the rx indication message.
932 	 *
933 	 * Use the FW rx desc offset configured during startup to account for
934 	 * this difference between HL vs. LL.
935 	 *
936 	 * An optimization would be to define the LL and HL msdu_desc pointer
937 	 * in such a way that they both use the same offset to the FW rx desc.
938 	 * Then the following functions could be converted to macros, without
939 	 * needing to expose the htt_pdev_t definition outside HTT.
940 	 */
941 	return *(((uint8_t *) msdu_desc) + pdev->rx_fw_desc_offset);
942 }
943 
944 int htt_rx_msdu_discard(htt_pdev_handle pdev, void *msdu_desc)
945 {
946 	return htt_rx_msdu_fw_desc_get(pdev, msdu_desc) & FW_RX_DESC_DISCARD_M;
947 }
948 
949 int htt_rx_msdu_forward(htt_pdev_handle pdev, void *msdu_desc)
950 {
951 	return htt_rx_msdu_fw_desc_get(pdev, msdu_desc) & FW_RX_DESC_FORWARD_M;
952 }
953 
954 int htt_rx_msdu_inspect(htt_pdev_handle pdev, void *msdu_desc)
955 {
956 	return htt_rx_msdu_fw_desc_get(pdev, msdu_desc) & FW_RX_DESC_INSPECT_M;
957 }
958 
959 void
960 htt_rx_msdu_actions(htt_pdev_handle pdev,
961 		    void *msdu_desc, int *discard, int *forward, int *inspect)
962 {
963 	uint8_t rx_msdu_fw_desc = htt_rx_msdu_fw_desc_get(pdev, msdu_desc);
964 #ifdef HTT_DEBUG_DATA
965 	HTT_PRINT("act:0x%x ", rx_msdu_fw_desc);
966 #endif
967 	*discard = rx_msdu_fw_desc & FW_RX_DESC_DISCARD_M;
968 	*forward = rx_msdu_fw_desc & FW_RX_DESC_FORWARD_M;
969 	*inspect = rx_msdu_fw_desc & FW_RX_DESC_INSPECT_M;
970 }
971 
972 static inline qdf_nbuf_t htt_rx_netbuf_pop(htt_pdev_handle pdev)
973 {
974 	int idx;
975 	qdf_nbuf_t msdu;
976 
977 	HTT_ASSERT1(htt_rx_ring_elems(pdev) != 0);
978 
979 #ifdef DEBUG_DMA_DONE
980 	pdev->rx_ring.dbg_ring_idx++;
981 	pdev->rx_ring.dbg_ring_idx &= pdev->rx_ring.size_mask;
982 #endif
983 
984 	idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
985 	msdu = pdev->rx_ring.buf.netbufs_ring[idx];
986 	idx++;
987 	idx &= pdev->rx_ring.size_mask;
988 	pdev->rx_ring.sw_rd_idx.msdu_payld = idx;
989 	pdev->rx_ring.fill_cnt--;
990 	return msdu;
991 }
992 
993 /*
994  * FIX ME: this function applies only to LL rx descs.
995  * An equivalent for HL rx descs is needed.
996  */
997 #ifdef CHECKSUM_OFFLOAD
998 static inline
999 void
1000 htt_set_checksum_result_ll(htt_pdev_handle pdev, qdf_nbuf_t msdu,
1001 			   struct htt_host_rx_desc_base *rx_desc)
1002 {
1003 #define MAX_IP_VER          2
1004 #define MAX_PROTO_VAL       4
1005 	struct rx_msdu_start *rx_msdu = &rx_desc->msdu_start;
1006 	unsigned int proto = (rx_msdu->tcp_proto) | (rx_msdu->udp_proto << 1);
1007 
1008 	/*
1009 	 * HW supports TCP & UDP checksum offload for ipv4 and ipv6
1010 	 */
1011 	static const qdf_nbuf_l4_rx_cksum_type_t
1012 		cksum_table[][MAX_PROTO_VAL][MAX_IP_VER] = {
1013 		{
1014 			/* non-fragmented IP packet */
1015 			/* non TCP/UDP packet */
1016 			{QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
1017 			/* TCP packet */
1018 			{QDF_NBUF_RX_CKSUM_TCP, QDF_NBUF_RX_CKSUM_TCPIPV6},
1019 			/* UDP packet */
1020 			{QDF_NBUF_RX_CKSUM_UDP, QDF_NBUF_RX_CKSUM_UDPIPV6},
1021 			/* invalid packet type */
1022 			{QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
1023 		},
1024 		{
1025 			/* fragmented IP packet */
1026 			{QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
1027 			{QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
1028 			{QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
1029 			{QDF_NBUF_RX_CKSUM_ZERO, QDF_NBUF_RX_CKSUM_ZERO},
1030 		}
1031 	};
1032 
1033 	qdf_nbuf_rx_cksum_t cksum = {
1034 		cksum_table[rx_msdu->ip_frag][proto][rx_msdu->ipv6_proto],
1035 		QDF_NBUF_RX_CKSUM_NONE,
1036 		0
1037 	};
1038 
1039 	if (cksum.l4_type !=
1040 	    (qdf_nbuf_l4_rx_cksum_type_t) QDF_NBUF_RX_CKSUM_NONE) {
1041 		cksum.l4_result =
1042 			((*(uint32_t *) &rx_desc->attention) &
1043 			 RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK) ?
1044 			QDF_NBUF_RX_CKSUM_NONE :
1045 			QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
1046 	}
1047 	qdf_nbuf_set_rx_cksum(msdu, &cksum);
1048 #undef MAX_IP_VER
1049 #undef MAX_PROTO_VAL
1050 }
1051 
1052 #if defined(CONFIG_HL_SUPPORT)
1053 
1054 static void
1055 htt_set_checksum_result_hl(qdf_nbuf_t msdu,
1056 			   struct htt_host_rx_desc_base *rx_desc)
1057 {
1058 	u_int8_t flag = ((u_int8_t *)rx_desc -
1059 				sizeof(struct hl_htt_rx_ind_base))[
1060 					HTT_ENDIAN_BYTE_IDX_SWAP(
1061 						HTT_RX_IND_HL_FLAG_OFFSET)];
1062 
1063 	int is_ipv6 = flag & HTT_RX_IND_HL_FLAG_IPV6 ? 1 : 0;
1064 	int is_tcp = flag & HTT_RX_IND_HL_FLAG_TCP ? 1 : 0;
1065 	int is_udp = flag & HTT_RX_IND_HL_FLAG_UDP ? 1 : 0;
1066 
1067 	qdf_nbuf_rx_cksum_t cksum = {
1068 		QDF_NBUF_RX_CKSUM_NONE,
1069 		QDF_NBUF_RX_CKSUM_NONE,
1070 		0
1071 	};
1072 
1073 	switch ((is_udp << 2) | (is_tcp << 1) | (is_ipv6 << 0)) {
1074 	case 0x4:
1075 		cksum.l4_type = QDF_NBUF_RX_CKSUM_UDP;
1076 		break;
1077 	case 0x2:
1078 		cksum.l4_type = QDF_NBUF_RX_CKSUM_TCP;
1079 		break;
1080 	case 0x5:
1081 		cksum.l4_type = QDF_NBUF_RX_CKSUM_UDPIPV6;
1082 		break;
1083 	case 0x3:
1084 		cksum.l4_type = QDF_NBUF_RX_CKSUM_TCPIPV6;
1085 		break;
1086 	default:
1087 		cksum.l4_type = QDF_NBUF_RX_CKSUM_NONE;
1088 		break;
1089 	}
1090 	if (cksum.l4_type != (qdf_nbuf_l4_rx_cksum_type_t)
1091 				QDF_NBUF_RX_CKSUM_NONE) {
1092 		cksum.l4_result = flag & HTT_RX_IND_HL_FLAG_C4_FAILED ?
1093 			QDF_NBUF_RX_CKSUM_NONE :
1094 				QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
1095 	}
1096 	qdf_nbuf_set_rx_cksum(msdu, &cksum);
1097 }
1098 #endif
1099 
1100 #else
1101 
1102 static inline
1103 void htt_set_checksum_result_ll(htt_pdev_handle pdev, qdf_nbuf_t msdu,
1104 			   struct htt_host_rx_desc_base *rx_desc)
1105 {
1106 }
1107 
1108 #if defined(CONFIG_HL_SUPPORT)
1109 
1110 static inline
1111 void htt_set_checksum_result_hl(qdf_nbuf_t msdu,
1112 			   struct htt_host_rx_desc_base *rx_desc)
1113 {
1114 }
1115 #endif
1116 
1117 #endif
1118 
1119 #ifdef DEBUG_DMA_DONE
1120 #define MAX_DONE_BIT_CHECK_ITER 5
1121 #endif
1122 
1123 #ifndef CONFIG_HL_SUPPORT
1124 static int
1125 htt_rx_amsdu_pop_ll(htt_pdev_handle pdev,
1126 		    qdf_nbuf_t rx_ind_msg,
1127 		    qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
1128 		    uint32_t *msdu_count)
1129 {
1130 	int msdu_len, msdu_chaining = 0;
1131 	qdf_nbuf_t msdu;
1132 	struct htt_host_rx_desc_base *rx_desc;
1133 	uint8_t *rx_ind_data;
1134 	uint32_t *msg_word, num_msdu_bytes;
1135 	qdf_dma_addr_t rx_desc_paddr;
1136 	enum htt_t2h_msg_type msg_type;
1137 	uint8_t pad_bytes = 0;
1138 
1139 	HTT_ASSERT1(htt_rx_ring_elems(pdev) != 0);
1140 	rx_ind_data = qdf_nbuf_data(rx_ind_msg);
1141 	msg_word = (uint32_t *) rx_ind_data;
1142 
1143 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
1144 
1145 	if (qdf_unlikely(HTT_T2H_MSG_TYPE_RX_FRAG_IND == msg_type)) {
1146 		num_msdu_bytes = HTT_RX_FRAG_IND_FW_RX_DESC_BYTES_GET(
1147 			*(msg_word + HTT_RX_FRAG_IND_HDR_PREFIX_SIZE32));
1148 	} else {
1149 		num_msdu_bytes = HTT_RX_IND_FW_RX_DESC_BYTES_GET(
1150 			*(msg_word
1151 			  + HTT_RX_IND_HDR_PREFIX_SIZE32
1152 			  + HTT_RX_PPDU_DESC_SIZE32));
1153 	}
1154 	msdu = *head_msdu = htt_rx_netbuf_pop(pdev);
1155 	while (1) {
1156 		int last_msdu, msdu_len_invalid, msdu_chained;
1157 		int byte_offset;
1158 		qdf_nbuf_t next;
1159 
1160 		/*
1161 		 * Set the netbuf length to be the entire buffer length
1162 		 * initially, so the unmap will unmap the entire buffer.
1163 		 */
1164 		qdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
1165 #ifdef DEBUG_DMA_DONE
1166 		qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_BIDIRECTIONAL);
1167 #else
1168 		qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
1169 #endif
1170 
1171 		/* cache consistency has been taken care of by qdf_nbuf_unmap */
1172 
1173 		/*
1174 		 * Now read the rx descriptor.
1175 		 * Set the length to the appropriate value.
1176 		 * Check if this MSDU completes a MPDU.
1177 		 */
1178 		rx_desc = htt_rx_desc(msdu);
1179 #if defined(HELIUMPLUS)
1180 		if (HTT_WIFI_IP(pdev, 2, 0))
1181 			pad_bytes = rx_desc->msdu_end.l3_header_padding;
1182 #endif /* defined(HELIUMPLUS) */
1183 
1184 		/*
1185 		 * Save PADDR of descriptor and make the netbuf's data pointer
1186 		 * point to the payload rather than the descriptor.
1187 		 */
1188 		rx_desc_paddr = QDF_NBUF_CB_PADDR(msdu);
1189 		qdf_nbuf_pull_head(msdu, HTT_RX_STD_DESC_RESERVATION +
1190 					 pad_bytes);
1191 
1192 		/*
1193 		 * Sanity check - confirm the HW is finished filling in
1194 		 * the rx data.
1195 		 * If the HW and SW are working correctly, then it's guaranteed
1196 		 * that the HW's MAC DMA is done before this point in the SW.
1197 		 * To prevent the case that we handle a stale Rx descriptor,
1198 		 * just assert for now until we have a way to recover.
1199 		 */
1200 
1201 #ifdef DEBUG_DMA_DONE
1202 		if (qdf_unlikely(!((*(uint32_t *) &rx_desc->attention)
1203 				   & RX_ATTENTION_0_MSDU_DONE_MASK))) {
1204 
1205 			int dbg_iter = MAX_DONE_BIT_CHECK_ITER;
1206 
1207 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
1208 				  "malformed frame");
1209 
1210 			while (dbg_iter &&
1211 			       (!((*(uint32_t *) &rx_desc->attention) &
1212 				  RX_ATTENTION_0_MSDU_DONE_MASK))) {
1213 				qdf_mdelay(1);
1214 				qdf_mem_dma_sync_single_for_cpu(
1215 					pdev->osdev,
1216 					rx_desc_paddr,
1217 					HTT_RX_STD_DESC_RESERVATION,
1218 					DMA_FROM_DEVICE);
1219 
1220 				QDF_TRACE(QDF_MODULE_ID_HTT,
1221 					  QDF_TRACE_LEVEL_INFO,
1222 					  "debug iter %d success %d", dbg_iter,
1223 					  pdev->rx_ring.dbg_sync_success);
1224 
1225 				dbg_iter--;
1226 			}
1227 
1228 			if (qdf_unlikely(!((*(uint32_t *) &rx_desc->attention)
1229 					   & RX_ATTENTION_0_MSDU_DONE_MASK))) {
1230 
1231 #ifdef HTT_RX_RESTORE
1232 				QDF_TRACE(QDF_MODULE_ID_HTT,
1233 					  QDF_TRACE_LEVEL_ERROR,
1234 					  "RX done bit error detected!");
1235 
1236 				qdf_nbuf_set_next(msdu, NULL);
1237 				*tail_msdu = msdu;
1238 				pdev->rx_ring.rx_reset = 1;
1239 				return msdu_chaining;
1240 #else
1241 				wma_cli_set_command(0, GEN_PARAM_CRASH_INJECT,
1242 						    0, GEN_CMD);
1243 				HTT_ASSERT_ALWAYS(0);
1244 #endif
1245 			}
1246 			pdev->rx_ring.dbg_sync_success++;
1247 			QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
1248 				  "debug iter %d success %d", dbg_iter,
1249 				  pdev->rx_ring.dbg_sync_success);
1250 		}
1251 #else
1252 		HTT_ASSERT_ALWAYS((*(uint32_t *) &rx_desc->attention) &
1253 				  RX_ATTENTION_0_MSDU_DONE_MASK);
1254 #endif
1255 		/*
1256 		 * Copy the FW rx descriptor for this MSDU from the rx
1257 		 * indication message into the MSDU's netbuf.
1258 		 * HL uses the same rx indication message definition as LL, and
1259 		 * simply appends new info (fields from the HW rx desc, and the
1260 		 * MSDU payload itself).
1261 		 * So, the offset into the rx indication message only has to
1262 		 * account for the standard offset of the per-MSDU FW rx
1263 		 * desc info within the message, and how many bytes of the
1264 		 * per-MSDU FW rx desc info have already been consumed.
1265 		 * (And the endianness of the host,
1266 		 * since for a big-endian host, the rx ind message contents,
1267 		 * including the per-MSDU rx desc bytes, were byteswapped during
1268 		 * upload.)
1269 		 */
1270 		if (pdev->rx_ind_msdu_byte_idx < num_msdu_bytes) {
1271 			if (qdf_unlikely
1272 				    (HTT_T2H_MSG_TYPE_RX_FRAG_IND == msg_type))
1273 				byte_offset =
1274 					HTT_ENDIAN_BYTE_IDX_SWAP
1275 					(HTT_RX_FRAG_IND_FW_DESC_BYTE_OFFSET);
1276 			else
1277 				byte_offset =
1278 					HTT_ENDIAN_BYTE_IDX_SWAP
1279 					(HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET +
1280 						pdev->rx_ind_msdu_byte_idx);
1281 
1282 			*((uint8_t *) &rx_desc->fw_desc.u.val) =
1283 				rx_ind_data[byte_offset];
1284 			/*
1285 			 * The target is expected to only provide the basic
1286 			 * per-MSDU rx descriptors.  Just to be sure,
1287 			 * verify that the target has not attached
1288 			 * extension data (e.g. LRO flow ID).
1289 			 */
1290 			/*
1291 			 * The assertion below currently doesn't work for
1292 			 * RX_FRAG_IND messages, since their format differs
1293 			 * from the RX_IND format (no FW rx PPDU desc in
1294 			 * the current RX_FRAG_IND message).
1295 			 * If the RX_FRAG_IND message format is updated to match
1296 			 * the RX_IND message format, then the following
1297 			 * assertion can be restored.
1298 			 */
1299 			/*
1300 			 * qdf_assert((rx_ind_data[byte_offset] &
1301 			 * FW_RX_DESC_EXT_M) == 0);
1302 			 */
1303 			pdev->rx_ind_msdu_byte_idx += 1;
1304 			/* or more, if there's ext data */
1305 		} else {
1306 			/*
1307 			 * When an oversized AMSDU happened, FW will lost some
1308 			 * of MSDU status - in this case, the FW descriptors
1309 			 * provided will be less than the actual MSDUs
1310 			 * inside this MPDU.
1311 			 * Mark the FW descriptors so that it will still
1312 			 * deliver to upper stack, if no CRC error for the MPDU.
1313 			 *
1314 			 * FIX THIS - the FW descriptors are actually for MSDUs
1315 			 * in the end of this A-MSDU instead of the beginning.
1316 			 */
1317 			*((uint8_t *) &rx_desc->fw_desc.u.val) = 0;
1318 		}
1319 
1320 		/*
1321 		 *  TCP/UDP checksum offload support
1322 		 */
1323 		htt_set_checksum_result_ll(pdev, msdu, rx_desc);
1324 
1325 		msdu_len_invalid = (*(uint32_t *) &rx_desc->attention) &
1326 				   RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK;
1327 		msdu_chained = (((*(uint32_t *) &rx_desc->frag_info) &
1328 				 RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK) >>
1329 				RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB);
1330 		msdu_len =
1331 			((*((uint32_t *) &rx_desc->msdu_start)) &
1332 			 RX_MSDU_START_0_MSDU_LENGTH_MASK) >>
1333 			RX_MSDU_START_0_MSDU_LENGTH_LSB;
1334 
1335 		do {
1336 			if (!msdu_len_invalid && !msdu_chained) {
1337 #if defined(PEREGRINE_1_0_ZERO_LEN_PHY_ERR_WAR)
1338 				if (msdu_len > 0x3000)
1339 					break;
1340 #endif
1341 				qdf_nbuf_trim_tail(msdu,
1342 						   HTT_RX_BUF_SIZE -
1343 						   (RX_STD_DESC_SIZE +
1344 						    msdu_len));
1345 			}
1346 		} while (0);
1347 
1348 		while (msdu_chained--) {
1349 			next = htt_rx_netbuf_pop(pdev);
1350 			qdf_nbuf_set_pktlen(next, HTT_RX_BUF_SIZE);
1351 			msdu_len -= HTT_RX_BUF_SIZE;
1352 			qdf_nbuf_set_next(msdu, next);
1353 			msdu = next;
1354 			msdu_chaining = 1;
1355 
1356 			if (msdu_chained == 0) {
1357 				/* Trim the last one to the correct size -
1358 				 * accounting for inconsistent HW lengths
1359 				 * causing length overflows and underflows
1360 				 */
1361 				if (((unsigned int)msdu_len) >
1362 				    ((unsigned int)
1363 				     (HTT_RX_BUF_SIZE - RX_STD_DESC_SIZE))) {
1364 					msdu_len =
1365 						(HTT_RX_BUF_SIZE -
1366 						 RX_STD_DESC_SIZE);
1367 				}
1368 
1369 				qdf_nbuf_trim_tail(next,
1370 						   HTT_RX_BUF_SIZE -
1371 						   (RX_STD_DESC_SIZE +
1372 						    msdu_len));
1373 			}
1374 		}
1375 
1376 		last_msdu =
1377 			((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
1378 			 RX_MSDU_END_4_LAST_MSDU_MASK) >>
1379 			RX_MSDU_END_4_LAST_MSDU_LSB;
1380 
1381 		if (last_msdu) {
1382 			qdf_nbuf_set_next(msdu, NULL);
1383 			break;
1384 		}
1385 
1386 		next = htt_rx_netbuf_pop(pdev);
1387 		qdf_nbuf_set_next(msdu, next);
1388 		msdu = next;
1389 	}
1390 	*tail_msdu = msdu;
1391 
1392 	/*
1393 	 * Don't refill the ring yet.
1394 	 * First, the elements popped here are still in use - it is
1395 	 * not safe to overwrite them until the matching call to
1396 	 * mpdu_desc_list_next.
1397 	 * Second, for efficiency it is preferable to refill the rx ring
1398 	 * with 1 PPDU's worth of rx buffers (something like 32 x 3 buffers),
1399 	 * rather than one MPDU's worth of rx buffers (sth like 3 buffers).
1400 	 * Consequently, we'll rely on the txrx SW to tell us when it is done
1401 	 * pulling all the PPDU's rx buffers out of the rx ring, and then
1402 	 * refill it just once.
1403 	 */
1404 	return msdu_chaining;
1405 }
1406 #endif
1407 
1408 #if defined(CONFIG_HL_SUPPORT)
1409 
1410 static int
1411 htt_rx_amsdu_pop_hl(
1412 	htt_pdev_handle pdev,
1413 	qdf_nbuf_t rx_ind_msg,
1414 	qdf_nbuf_t *head_msdu,
1415 	qdf_nbuf_t *tail_msdu,
1416 	uint32_t *msdu_count)
1417 {
1418 	pdev->rx_desc_size_hl =
1419 		(qdf_nbuf_data(rx_ind_msg))
1420 		[HTT_ENDIAN_BYTE_IDX_SWAP(
1421 				HTT_RX_IND_HL_RX_DESC_LEN_OFFSET)];
1422 
1423 	/* point to the rx desc */
1424 	qdf_nbuf_pull_head(rx_ind_msg,
1425 			   sizeof(struct hl_htt_rx_ind_base));
1426 	*head_msdu = *tail_msdu = rx_ind_msg;
1427 
1428 	htt_set_checksum_result_hl(rx_ind_msg,
1429 				   (struct htt_host_rx_desc_base *)
1430 				   (qdf_nbuf_data(rx_ind_msg)));
1431 
1432 	qdf_nbuf_set_next(*tail_msdu, NULL);
1433 	return 0;
1434 }
1435 
1436 static int
1437 htt_rx_frag_pop_hl(
1438 	htt_pdev_handle pdev,
1439 	qdf_nbuf_t frag_msg,
1440 	qdf_nbuf_t *head_msdu,
1441 	qdf_nbuf_t *tail_msdu,
1442 	uint32_t *msdu_count)
1443 {
1444 	qdf_nbuf_pull_head(frag_msg, HTT_RX_FRAG_IND_BYTES);
1445 	pdev->rx_desc_size_hl =
1446 		(qdf_nbuf_data(frag_msg))
1447 		[HTT_ENDIAN_BYTE_IDX_SWAP(
1448 				HTT_RX_IND_HL_RX_DESC_LEN_OFFSET)];
1449 
1450 	/* point to the rx desc */
1451 	qdf_nbuf_pull_head(frag_msg,
1452 			   sizeof(struct hl_htt_rx_ind_base));
1453 	*head_msdu = *tail_msdu = frag_msg;
1454 
1455 	qdf_nbuf_set_next(*tail_msdu, NULL);
1456 	return 0;
1457 }
1458 
1459 static inline int
1460 htt_rx_offload_msdu_cnt_hl(
1461     htt_pdev_handle pdev)
1462 {
1463     return 1;
1464 }
1465 
1466 static inline int
1467 htt_rx_offload_msdu_pop_hl(htt_pdev_handle pdev,
1468 			   qdf_nbuf_t offload_deliver_msg,
1469 			   int *vdev_id,
1470 			   int *peer_id,
1471 			   int *tid,
1472 			   u_int8_t *fw_desc,
1473 			   qdf_nbuf_t *head_buf,
1474 			   qdf_nbuf_t *tail_buf)
1475 {
1476 	qdf_nbuf_t buf;
1477 	u_int32_t *msdu_hdr, msdu_len;
1478 	int ret = 0;
1479 
1480 	*head_buf = *tail_buf = buf = offload_deliver_msg;
1481 	msdu_hdr = (u_int32_t *)qdf_nbuf_data(buf);
1482 	/* First dword */
1483 
1484 	/* Second dword */
1485 	msdu_hdr++;
1486 	msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
1487 	*peer_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_PEER_ID_GET(*msdu_hdr);
1488 
1489 	/* Third dword */
1490 	msdu_hdr++;
1491 	*vdev_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_VDEV_ID_GET(*msdu_hdr);
1492 	*tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
1493 	*fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
1494 
1495 	qdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES
1496 			+ HTT_RX_OFFLOAD_DELIVER_IND_HDR_BYTES);
1497 
1498 	if (msdu_len <= qdf_nbuf_len(buf)) {
1499 		qdf_nbuf_set_pktlen(buf, msdu_len);
1500 	} else {
1501 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
1502 			  "%s: drop frame with invalid msdu len %d %d",
1503 			  __func__, msdu_len, (int)qdf_nbuf_len(buf));
1504 		qdf_nbuf_free(offload_deliver_msg);
1505 		ret = -1;
1506 	}
1507 
1508 	return ret;
1509 }
1510 #endif
1511 
1512 static inline int
1513 htt_rx_offload_msdu_cnt_ll(
1514     htt_pdev_handle pdev)
1515 {
1516     return htt_rx_ring_elems(pdev);
1517 }
1518 
1519 #ifndef CONFIG_HL_SUPPORT
1520 static int
1521 htt_rx_offload_msdu_pop_ll(htt_pdev_handle pdev,
1522 			   qdf_nbuf_t offload_deliver_msg,
1523 			   int *vdev_id,
1524 			   int *peer_id,
1525 			   int *tid,
1526 			   uint8_t *fw_desc,
1527 			   qdf_nbuf_t *head_buf, qdf_nbuf_t *tail_buf)
1528 {
1529 	qdf_nbuf_t buf;
1530 	uint32_t *msdu_hdr, msdu_len;
1531 
1532 	*head_buf = *tail_buf = buf = htt_rx_netbuf_pop(pdev);
1533 
1534 	if (qdf_unlikely(NULL == buf)) {
1535 		qdf_print("%s: netbuf pop failed!\n", __func__);
1536 		return 1;
1537 	}
1538 
1539 	/* Fake read mpdu_desc to keep desc ptr in sync */
1540 	htt_rx_mpdu_desc_list_next(pdev, NULL);
1541 	qdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
1542 #ifdef DEBUG_DMA_DONE
1543 	qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_BIDIRECTIONAL);
1544 #else
1545 	qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_FROM_DEVICE);
1546 #endif
1547 	msdu_hdr = (uint32_t *) qdf_nbuf_data(buf);
1548 
1549 	/* First dword */
1550 	msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
1551 	*peer_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_PEER_ID_GET(*msdu_hdr);
1552 
1553 	/* Second dword */
1554 	msdu_hdr++;
1555 	*vdev_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_VDEV_ID_GET(*msdu_hdr);
1556 	*tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
1557 	*fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
1558 
1559 	qdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
1560 	qdf_nbuf_set_pktlen(buf, msdu_len);
1561 	return 0;
1562 }
1563 
1564 int
1565 htt_rx_offload_paddr_msdu_pop_ll(htt_pdev_handle pdev,
1566 				 uint32_t *msg_word,
1567 				 int msdu_iter,
1568 				 int *vdev_id,
1569 				 int *peer_id,
1570 				 int *tid,
1571 				 uint8_t *fw_desc,
1572 				 qdf_nbuf_t *head_buf, qdf_nbuf_t *tail_buf)
1573 {
1574 	qdf_nbuf_t buf;
1575 	uint32_t *msdu_hdr, msdu_len;
1576 	uint32_t *curr_msdu;
1577 	qdf_dma_addr_t paddr;
1578 
1579 	curr_msdu =
1580 		msg_word + (msdu_iter * HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS);
1581 	paddr = htt_rx_in_ord_paddr_get(curr_msdu);
1582 	*head_buf = *tail_buf = buf = htt_rx_in_order_netbuf_pop(pdev, paddr);
1583 
1584 	if (qdf_unlikely(NULL == buf)) {
1585 		qdf_print("%s: netbuf pop failed!\n", __func__);
1586 		return 1;
1587 	}
1588 	qdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
1589 #ifdef DEBUG_DMA_DONE
1590 	qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_BIDIRECTIONAL);
1591 #else
1592 	qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_FROM_DEVICE);
1593 #endif
1594 
1595 	if (pdev->cfg.is_first_wakeup_packet)
1596 		htt_get_first_packet_after_wow_wakeup(
1597 			msg_word + NEXT_FIELD_OFFSET_IN32, buf);
1598 
1599 	msdu_hdr = (uint32_t *) qdf_nbuf_data(buf);
1600 
1601 	/* First dword */
1602 	msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
1603 	*peer_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_PEER_ID_GET(*msdu_hdr);
1604 
1605 	/* Second dword */
1606 	msdu_hdr++;
1607 	*vdev_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_VDEV_ID_GET(*msdu_hdr);
1608 	*tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
1609 	*fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
1610 
1611 	qdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
1612 	qdf_nbuf_set_pktlen(buf, msdu_len);
1613 	return 0;
1614 }
1615 #endif
1616 
1617 uint32_t htt_rx_amsdu_rx_in_order_get_pktlog(qdf_nbuf_t rx_ind_msg)
1618 {
1619 	uint32_t *msg_word;
1620 
1621 	msg_word = (uint32_t *) qdf_nbuf_data(rx_ind_msg);
1622 	return HTT_RX_IN_ORD_PADDR_IND_PKTLOG_GET(*msg_word);
1623 }
1624 
1625 #ifndef CONFIG_HL_SUPPORT
1626 /* Return values: 1 - success, 0 - failure */
1627 #define RX_DESC_DISCARD_IS_SET ((*((u_int8_t *) &rx_desc->fw_desc.u.val)) & \
1628 							FW_RX_DESC_DISCARD_M)
1629 #define RX_DESC_MIC_ERR_IS_SET ((*((u_int8_t *) &rx_desc->fw_desc.u.val)) & \
1630 							FW_RX_DESC_ANY_ERR_M)
1631 
1632 static int
1633 htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
1634 				qdf_nbuf_t rx_ind_msg,
1635 				qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
1636 				uint32_t *replenish_cnt)
1637 {
1638 	qdf_nbuf_t msdu, next, prev = NULL;
1639 	uint8_t *rx_ind_data;
1640 	uint32_t *msg_word;
1641 	uint32_t rx_ctx_id;
1642 	unsigned int msdu_count = 0;
1643 	uint8_t offload_ind, frag_ind;
1644 	uint8_t peer_id;
1645 	struct htt_host_rx_desc_base *rx_desc;
1646 	enum rx_pkt_fate status = RX_PKT_FATE_SUCCESS;
1647 	qdf_dma_addr_t paddr;
1648 	qdf_mem_info_t mem_map_table = {0};
1649 	int ret = 1;
1650 	bool ipa_smmu = false;
1651 
1652 	HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
1653 
1654 	rx_ind_data = qdf_nbuf_data(rx_ind_msg);
1655 	rx_ctx_id = QDF_NBUF_CB_RX_CTX_ID(rx_ind_msg);
1656 	msg_word = (uint32_t *) rx_ind_data;
1657 	peer_id = HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(
1658 					*(u_int32_t *)rx_ind_data);
1659 
1660 	offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word);
1661 	frag_ind = HTT_RX_IN_ORD_PADDR_IND_FRAG_GET(*msg_word);
1662 
1663 	/* Get the total number of MSDUs */
1664 	msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1));
1665 	HTT_RX_CHECK_MSDU_COUNT(msdu_count);
1666 
1667 	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
1668 	    pdev->rx_ring.smmu_map)
1669 		ipa_smmu = true;
1670 
1671 	ol_rx_update_histogram_stats(msdu_count, frag_ind, offload_ind);
1672 	htt_rx_dbg_rxbuf_httrxind(pdev, msdu_count);
1673 
1674 	msg_word =
1675 		(uint32_t *) (rx_ind_data + HTT_RX_IN_ORD_PADDR_IND_HDR_BYTES);
1676 	if (offload_ind) {
1677 		ol_rx_offload_paddr_deliver_ind_handler(pdev, msdu_count,
1678 							msg_word);
1679 		*head_msdu = *tail_msdu = NULL;
1680 		ret = 0;
1681 		goto end;
1682 	}
1683 
1684 	paddr = htt_rx_in_ord_paddr_get(msg_word);
1685 	(*head_msdu) = msdu = htt_rx_in_order_netbuf_pop(pdev, paddr);
1686 
1687 	if (qdf_unlikely(NULL == msdu)) {
1688 		qdf_print("%s: netbuf pop failed!\n", __func__);
1689 		*tail_msdu = NULL;
1690 		pdev->rx_ring.pop_fail_cnt++;
1691 		ret = 0;
1692 		goto end;
1693 	}
1694 
1695 	while (msdu_count > 0) {
1696 		if (ipa_smmu) {
1697 			qdf_update_mem_map_table(pdev->osdev, &mem_map_table,
1698 						 QDF_NBUF_CB_PADDR(msdu),
1699 						 HTT_RX_BUF_SIZE);
1700 			cds_smmu_map_unmap(false, 1, &mem_map_table);
1701 		}
1702 
1703 		/*
1704 		 * Set the netbuf length to be the entire buffer length
1705 		 * initially, so the unmap will unmap the entire buffer.
1706 		 */
1707 		qdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
1708 #ifdef DEBUG_DMA_DONE
1709 		qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_BIDIRECTIONAL);
1710 #else
1711 		qdf_nbuf_unmap(pdev->osdev, msdu, QDF_DMA_FROM_DEVICE);
1712 #endif
1713 
1714 		/* cache consistency has been taken care of by qdf_nbuf_unmap */
1715 		rx_desc = htt_rx_desc(msdu);
1716 		htt_rx_extract_lro_info(msdu, rx_desc);
1717 
1718 		/*
1719 		 * Make the netbuf's data pointer point to the payload rather
1720 		 * than the descriptor.
1721 		 */
1722 		qdf_nbuf_pull_head(msdu, HTT_RX_STD_DESC_RESERVATION);
1723 
1724 		QDF_NBUF_CB_DP_TRACE_PRINT(msdu) = false;
1725 		qdf_dp_trace_set_track(msdu, QDF_RX);
1726 		QDF_NBUF_CB_TX_PACKET_TRACK(msdu) = QDF_NBUF_TX_PKT_DATA_TRACK;
1727 		QDF_NBUF_CB_RX_CTX_ID(msdu) = rx_ctx_id;
1728 		DPTRACE(qdf_dp_trace(msdu,
1729 			QDF_DP_TRACE_RX_HTT_PACKET_PTR_RECORD,
1730 			QDF_TRACE_DEFAULT_PDEV_ID,
1731 			qdf_nbuf_data_addr(msdu),
1732 			sizeof(qdf_nbuf_data(msdu)), QDF_RX));
1733 
1734 		qdf_nbuf_trim_tail(msdu,
1735 				   HTT_RX_BUF_SIZE -
1736 				   (RX_STD_DESC_SIZE +
1737 				    HTT_RX_IN_ORD_PADDR_IND_MSDU_LEN_GET(
1738 				    *(msg_word + NEXT_FIELD_OFFSET_IN32))));
1739 #if defined(HELIUMPLUS_DEBUG)
1740 		ol_txrx_dump_pkt(msdu, 0, 64);
1741 #endif
1742 		*((uint8_t *) &rx_desc->fw_desc.u.val) =
1743 			HTT_RX_IN_ORD_PADDR_IND_FW_DESC_GET(*(msg_word +
1744 						NEXT_FIELD_OFFSET_IN32));
1745 
1746 		msdu_count--;
1747 
1748 		/* calling callback function for packet logging */
1749 		if (pdev->rx_pkt_dump_cb) {
1750 			if (qdf_unlikely(RX_DESC_MIC_ERR_IS_SET &&
1751 					 !RX_DESC_DISCARD_IS_SET))
1752 				status = RX_PKT_FATE_FW_DROP_INVALID;
1753 			pdev->rx_pkt_dump_cb(msdu, peer_id, status);
1754 		}
1755 
1756 		if (pdev->cfg.is_first_wakeup_packet)
1757 			htt_get_first_packet_after_wow_wakeup(
1758 				msg_word + NEXT_FIELD_OFFSET_IN32, msdu);
1759 
1760 		/* if discard flag is set (SA is self MAC), then
1761 		 * don't check mic failure.
1762 		 */
1763 		if (qdf_unlikely(RX_DESC_MIC_ERR_IS_SET &&
1764 					!RX_DESC_DISCARD_IS_SET)) {
1765 			uint8_t tid =
1766 				HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(
1767 					*(u_int32_t *)rx_ind_data);
1768 			ol_rx_mic_error_handler(pdev->txrx_pdev, tid, peer_id,
1769 						rx_desc, msdu);
1770 
1771 			htt_rx_desc_frame_free(pdev, msdu);
1772 			/* if this is the last msdu */
1773 			if (!msdu_count) {
1774 				/* if this is the only msdu */
1775 				if (!prev) {
1776 					*head_msdu = *tail_msdu = NULL;
1777 					ret = 0;
1778 					goto end;
1779 				}
1780 				*tail_msdu = prev;
1781 				qdf_nbuf_set_next(prev, NULL);
1782 				goto end;
1783 			} else { /* if this is not the last msdu */
1784 				/* get the next msdu */
1785 				msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
1786 				paddr = htt_rx_in_ord_paddr_get(msg_word);
1787 				next = htt_rx_in_order_netbuf_pop(pdev, paddr);
1788 				if (qdf_unlikely(NULL == next)) {
1789 					qdf_print("%s: netbuf pop failed!\n",
1790 								 __func__);
1791 					*tail_msdu = NULL;
1792 					pdev->rx_ring.pop_fail_cnt++;
1793 					ret = 0;
1794 					goto end;
1795 				}
1796 
1797 				/* if this is not the first msdu, update the
1798 				 * next pointer of the preceding msdu
1799 				 */
1800 				if (prev) {
1801 					qdf_nbuf_set_next(prev, next);
1802 				} else {
1803 					/* if this is the first msdu, update the
1804 					 * head pointer
1805 					 */
1806 					*head_msdu = next;
1807 				}
1808 				msdu = next;
1809 				continue;
1810 			}
1811 		}
1812 
1813 		/* Update checksum result */
1814 		htt_set_checksum_result_ll(pdev, msdu, rx_desc);
1815 
1816 		/* check if this is the last msdu */
1817 		if (msdu_count) {
1818 			msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
1819 			paddr = htt_rx_in_ord_paddr_get(msg_word);
1820 			next = htt_rx_in_order_netbuf_pop(pdev, paddr);
1821 			if (qdf_unlikely(NULL == next)) {
1822 				qdf_print("%s: netbuf pop failed!\n",
1823 					  __func__);
1824 				*tail_msdu = NULL;
1825 				pdev->rx_ring.pop_fail_cnt++;
1826 				ret = 0;
1827 				goto end;
1828 			}
1829 			qdf_nbuf_set_next(msdu, next);
1830 			prev = msdu;
1831 			msdu = next;
1832 		} else {
1833 			*tail_msdu = msdu;
1834 			qdf_nbuf_set_next(msdu, NULL);
1835 		}
1836 	}
1837 
1838 end:
1839 	return ret;
1840 }
1841 #endif
1842 
1843 int16_t htt_rx_mpdu_desc_rssi_dbm(htt_pdev_handle pdev, void *mpdu_desc)
1844 {
1845 	/*
1846 	 * Currently the RSSI is provided only as a field in the
1847 	 * HTT_T2H_RX_IND message, rather than in each rx descriptor.
1848 	 */
1849 	return HTT_RSSI_INVALID;
1850 }
1851 
1852 /*
1853  * htt_rx_amsdu_pop -
1854  * global function pointer that is programmed during attach to point
1855  * to either htt_rx_amsdu_pop_ll or htt_rx_amsdu_rx_in_order_pop_ll.
1856  */
1857 int (*htt_rx_amsdu_pop)(htt_pdev_handle pdev,
1858 			qdf_nbuf_t rx_ind_msg,
1859 			qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
1860 			uint32_t *msdu_count);
1861 
1862 /*
1863  * htt_rx_frag_pop -
1864  * global function pointer that is programmed during attach to point
1865  * to either htt_rx_amsdu_pop_ll
1866  */
1867 int (*htt_rx_frag_pop)(htt_pdev_handle pdev,
1868 		       qdf_nbuf_t rx_ind_msg,
1869 		       qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
1870 		       uint32_t *msdu_count);
1871 
1872 int
1873 (*htt_rx_offload_msdu_cnt)(
1874     htt_pdev_handle pdev);
1875 
1876 int
1877 (*htt_rx_offload_msdu_pop)(htt_pdev_handle pdev,
1878 			   qdf_nbuf_t offload_deliver_msg,
1879 			   int *vdev_id,
1880 			   int *peer_id,
1881 			   int *tid,
1882 			   uint8_t *fw_desc,
1883 			   qdf_nbuf_t *head_buf, qdf_nbuf_t *tail_buf);
1884 
1885 void * (*htt_rx_mpdu_desc_list_next)(htt_pdev_handle pdev,
1886 				    qdf_nbuf_t rx_ind_msg);
1887 
1888 bool (*htt_rx_mpdu_desc_retry)(htt_pdev_handle pdev, void *mpdu_desc);
1889 
1890 uint16_t (*htt_rx_mpdu_desc_seq_num)(htt_pdev_handle pdev, void *mpdu_desc);
1891 
1892 void (*htt_rx_mpdu_desc_pn)(htt_pdev_handle pdev,
1893 			    void *mpdu_desc,
1894 			    union htt_rx_pn_t *pn, int pn_len_bits);
1895 
1896 uint8_t (*htt_rx_mpdu_desc_tid)(htt_pdev_handle pdev, void *mpdu_desc);
1897 
1898 bool (*htt_rx_msdu_desc_completes_mpdu)(htt_pdev_handle pdev, void *msdu_desc);
1899 
1900 bool (*htt_rx_msdu_first_msdu_flag)(htt_pdev_handle pdev, void *msdu_desc);
1901 
1902 int (*htt_rx_msdu_has_wlan_mcast_flag)(htt_pdev_handle pdev, void *msdu_desc);
1903 
1904 bool (*htt_rx_msdu_is_wlan_mcast)(htt_pdev_handle pdev, void *msdu_desc);
1905 
1906 int (*htt_rx_msdu_is_frag)(htt_pdev_handle pdev, void *msdu_desc);
1907 
1908 void * (*htt_rx_msdu_desc_retrieve)(htt_pdev_handle pdev, qdf_nbuf_t msdu);
1909 
1910 bool (*htt_rx_mpdu_is_encrypted)(htt_pdev_handle pdev, void *mpdu_desc);
1911 
1912 bool (*htt_rx_msdu_desc_key_id)(htt_pdev_handle pdev,
1913 				void *mpdu_desc, uint8_t *key_id);
1914 
1915 #ifndef CONFIG_HL_SUPPORT
1916 static
1917 void *htt_rx_mpdu_desc_list_next_ll(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
1918 {
1919 	int idx = pdev->rx_ring.sw_rd_idx.msdu_desc;
1920 	qdf_nbuf_t netbuf = pdev->rx_ring.buf.netbufs_ring[idx];
1921 
1922 	pdev->rx_ring.sw_rd_idx.msdu_desc = pdev->rx_ring.sw_rd_idx.msdu_payld;
1923 	return (void *)htt_rx_desc(netbuf);
1924 }
1925 #endif
1926 
1927 bool (*htt_rx_msdu_chan_info_present)(
1928 	htt_pdev_handle pdev,
1929 	void *mpdu_desc);
1930 
1931 bool (*htt_rx_msdu_center_freq)(
1932 	htt_pdev_handle pdev,
1933 	struct ol_txrx_peer_t *peer,
1934 	void *mpdu_desc,
1935 	uint16_t *primary_chan_center_freq_mhz,
1936 	uint16_t *contig_chan1_center_freq_mhz,
1937 	uint16_t *contig_chan2_center_freq_mhz,
1938 	uint8_t *phy_mode);
1939 
1940 #ifndef CONFIG_HL_SUPPORT
1941 static void *htt_rx_in_ord_mpdu_desc_list_next_ll(htt_pdev_handle pdev,
1942 						  qdf_nbuf_t netbuf)
1943 {
1944 	return (void *)htt_rx_desc(netbuf);
1945 }
1946 #endif
1947 
1948 #if defined(CONFIG_HL_SUPPORT)
1949 
1950 /**
1951  * htt_rx_mpdu_desc_list_next_hl() - provides an abstract way to obtain
1952  *				     the next MPDU descriptor
1953  * @pdev: the HTT instance the rx data was received on
1954  * @rx_ind_msg: the netbuf containing the rx indication message
1955  *
1956  * for HL, the returned value is not mpdu_desc,
1957  * it's translated hl_rx_desc just after the hl_ind_msg
1958  * for HL AMSDU, we can't point to payload now, because
1959  * hl rx desc is not fixed, we can't retrieve the desc
1960  * by minus rx_desc_size when release. keep point to hl rx desc
1961  * now
1962  *
1963  * Return: next abstract rx descriptor from the series of MPDUs
1964  *		   referenced by an rx ind msg
1965  */
1966 static inline void *
1967 htt_rx_mpdu_desc_list_next_hl(htt_pdev_handle pdev, qdf_nbuf_t rx_ind_msg)
1968 {
1969 	void *mpdu_desc = (void *)qdf_nbuf_data(rx_ind_msg);
1970 	return mpdu_desc;
1971 }
1972 
1973 /**
1974  * htt_rx_msdu_desc_retrieve_hl() - Retrieve a previously-stored rx descriptor
1975  *				    from a MSDU buffer
1976  * @pdev: the HTT instance the rx data was received on
1977  * @msdu - the buffer containing the MSDU payload
1978  *
1979  * currently for HL AMSDU, we don't point to payload.
1980  * we shift to payload in ol_rx_deliver later
1981  *
1982  * Return: the corresponding abstract rx MSDU descriptor
1983  */
1984 static inline void *
1985 htt_rx_msdu_desc_retrieve_hl(htt_pdev_handle pdev, qdf_nbuf_t msdu)
1986 {
1987 	return qdf_nbuf_data(msdu);
1988 }
1989 
1990 static
1991 bool htt_rx_mpdu_is_encrypted_hl(htt_pdev_handle pdev, void *mpdu_desc)
1992 {
1993 	if (htt_rx_msdu_first_msdu_flag_hl(pdev, mpdu_desc) == true) {
1994 		/* Fix Me: only for little endian */
1995 		struct hl_htt_rx_desc_base *rx_desc =
1996 			(struct hl_htt_rx_desc_base *)mpdu_desc;
1997 
1998 		return HTT_WORD_GET(*(u_int32_t *)rx_desc,
1999 					HTT_HL_RX_DESC_MPDU_ENC);
2000 	} else {
2001 		/* not first msdu, no encrypt info for hl */
2002 		qdf_print(
2003 			"Error: get encrypted from a not-first msdu.\n");
2004 		qdf_assert(0);
2005 		return false;
2006 	}
2007 }
2008 
2009 static inline bool
2010 htt_rx_msdu_chan_info_present_hl(htt_pdev_handle pdev, void *mpdu_desc)
2011 {
2012 	if (htt_rx_msdu_first_msdu_flag_hl(pdev, mpdu_desc) == true &&
2013 	    HTT_WORD_GET(*(u_int32_t *)mpdu_desc,
2014 			 HTT_HL_RX_DESC_CHAN_INFO_PRESENT))
2015 		return true;
2016 
2017 	return false;
2018 }
2019 
2020 static bool
2021 htt_rx_msdu_center_freq_hl(htt_pdev_handle pdev,
2022 			   struct ol_txrx_peer_t *peer,
2023 			   void *mpdu_desc,
2024 			   uint16_t *primary_chan_center_freq_mhz,
2025 			   uint16_t *contig_chan1_center_freq_mhz,
2026 			   uint16_t *contig_chan2_center_freq_mhz,
2027 			   uint8_t *phy_mode)
2028 {
2029 	int pn_len, index;
2030 	uint32_t *chan_info;
2031 
2032 	index = htt_rx_msdu_is_wlan_mcast(pdev, mpdu_desc) ?
2033 		txrx_sec_mcast : txrx_sec_ucast;
2034 
2035 	pn_len = (peer ?
2036 			pdev->txrx_pdev->rx_pn[peer->security[index].sec_type].
2037 								len : 0);
2038 	chan_info = (uint32_t *)((uint8_t *)mpdu_desc +
2039 			HTT_HL_RX_DESC_PN_OFFSET + pn_len);
2040 
2041 	if (htt_rx_msdu_chan_info_present_hl(pdev, mpdu_desc)) {
2042 		if (primary_chan_center_freq_mhz)
2043 			*primary_chan_center_freq_mhz =
2044 				HTT_WORD_GET(
2045 					*chan_info,
2046 					HTT_CHAN_INFO_PRIMARY_CHAN_CENTER_FREQ);
2047 		if (contig_chan1_center_freq_mhz)
2048 			*contig_chan1_center_freq_mhz =
2049 				HTT_WORD_GET(
2050 					*chan_info,
2051 					HTT_CHAN_INFO_CONTIG_CHAN1_CENTER_FREQ);
2052 		chan_info++;
2053 		if (contig_chan2_center_freq_mhz)
2054 			*contig_chan2_center_freq_mhz =
2055 				HTT_WORD_GET(
2056 					*chan_info,
2057 					HTT_CHAN_INFO_CONTIG_CHAN2_CENTER_FREQ);
2058 		if (phy_mode)
2059 			*phy_mode =
2060 				HTT_WORD_GET(*chan_info,
2061 					     HTT_CHAN_INFO_PHY_MODE);
2062 		return true;
2063 	}
2064 
2065 	if (primary_chan_center_freq_mhz)
2066 		*primary_chan_center_freq_mhz = 0;
2067 	if (contig_chan1_center_freq_mhz)
2068 		*contig_chan1_center_freq_mhz = 0;
2069 	if (contig_chan2_center_freq_mhz)
2070 		*contig_chan2_center_freq_mhz = 0;
2071 	if (phy_mode)
2072 		*phy_mode = 0;
2073 	return false;
2074 }
2075 
2076 static bool
2077 htt_rx_msdu_desc_key_id_hl(htt_pdev_handle htt_pdev,
2078 			   void *mpdu_desc, u_int8_t *key_id)
2079 {
2080 	if (htt_rx_msdu_first_msdu_flag_hl(htt_pdev, mpdu_desc) == true) {
2081 		/* Fix Me: only for little endian */
2082 		struct hl_htt_rx_desc_base *rx_desc =
2083 			(struct hl_htt_rx_desc_base *)mpdu_desc;
2084 
2085 		*key_id = rx_desc->key_id_oct;
2086 		return true;
2087 	}
2088 
2089 	return false;
2090 }
2091 
2092 #endif
2093 
2094 #ifndef CONFIG_HL_SUPPORT
2095 static void *htt_rx_msdu_desc_retrieve_ll(htt_pdev_handle pdev, qdf_nbuf_t msdu)
2096 {
2097 	return htt_rx_desc(msdu);
2098 }
2099 
2100 static bool htt_rx_mpdu_is_encrypted_ll(htt_pdev_handle pdev, void *mpdu_desc)
2101 {
2102 	struct htt_host_rx_desc_base *rx_desc =
2103 		(struct htt_host_rx_desc_base *)mpdu_desc;
2104 
2105 	return (((*((uint32_t *) &rx_desc->mpdu_start)) &
2106 		 RX_MPDU_START_0_ENCRYPTED_MASK) >>
2107 		RX_MPDU_START_0_ENCRYPTED_LSB) ? true : false;
2108 }
2109 
2110 static
2111 bool htt_rx_msdu_chan_info_present_ll(htt_pdev_handle pdev, void *mpdu_desc)
2112 {
2113 	return false;
2114 }
2115 
2116 static bool htt_rx_msdu_center_freq_ll(htt_pdev_handle pdev,
2117 	struct ol_txrx_peer_t *peer,
2118 	void *mpdu_desc,
2119 	uint16_t *primary_chan_center_freq_mhz,
2120 	uint16_t *contig_chan1_center_freq_mhz,
2121 	uint16_t *contig_chan2_center_freq_mhz,
2122 	uint8_t *phy_mode)
2123 {
2124 	if (primary_chan_center_freq_mhz)
2125 		*primary_chan_center_freq_mhz = 0;
2126 	if (contig_chan1_center_freq_mhz)
2127 		*contig_chan1_center_freq_mhz = 0;
2128 	if (contig_chan2_center_freq_mhz)
2129 		*contig_chan2_center_freq_mhz = 0;
2130 	if (phy_mode)
2131 		*phy_mode = 0;
2132 	return false;
2133 }
2134 
2135 static bool
2136 htt_rx_msdu_desc_key_id_ll(htt_pdev_handle pdev, void *mpdu_desc,
2137 			   uint8_t *key_id)
2138 {
2139 	struct htt_host_rx_desc_base *rx_desc = (struct htt_host_rx_desc_base *)
2140 						mpdu_desc;
2141 
2142 	if (!htt_rx_msdu_first_msdu_flag_ll(pdev, mpdu_desc))
2143 		return false;
2144 
2145 	*key_id = ((*(((uint32_t *) &rx_desc->msdu_end) + 1)) &
2146 		   (RX_MSDU_END_1_KEY_ID_OCT_MASK >>
2147 		    RX_MSDU_END_1_KEY_ID_OCT_LSB));
2148 
2149 	return true;
2150 }
2151 #endif
2152 
2153 void htt_rx_desc_frame_free(htt_pdev_handle htt_pdev, qdf_nbuf_t msdu)
2154 {
2155 	qdf_nbuf_free(msdu);
2156 }
2157 
2158 void htt_rx_msdu_desc_free(htt_pdev_handle htt_pdev, qdf_nbuf_t msdu)
2159 {
2160 	/*
2161 	 * The rx descriptor is in the same buffer as the rx MSDU payload,
2162 	 * and does not need to be freed separately.
2163 	 */
2164 }
2165 
2166 #if defined(CONFIG_HL_SUPPORT)
2167 
2168 /**
2169  * htt_rx_fill_ring_count() - replenish rx msdu buffer
2170  * @pdev: Handle (pointer) to HTT pdev.
2171  *
2172  * This funciton will replenish the rx buffer to the max number
2173  * that can be kept in the ring
2174  *
2175  * Return: None
2176  */
2177 static inline void htt_rx_fill_ring_count(htt_pdev_handle pdev)
2178 {
2179 }
2180 #else
2181 
2182 static void htt_rx_fill_ring_count(htt_pdev_handle pdev)
2183 {
2184 	int num_to_fill;
2185 
2186 	num_to_fill = pdev->rx_ring.fill_level - pdev->rx_ring.fill_cnt;
2187 	htt_rx_ring_fill_n(pdev, num_to_fill /* okay if <= 0 */);
2188 }
2189 #endif
2190 
2191 void htt_rx_msdu_buff_replenish(htt_pdev_handle pdev)
2192 {
2193 	if (qdf_atomic_dec_and_test(&pdev->rx_ring.refill_ref_cnt))
2194 		htt_rx_fill_ring_count(pdev);
2195 
2196 	qdf_atomic_inc(&pdev->rx_ring.refill_ref_cnt);
2197 }
2198 
2199 #define RX_RING_REFILL_DEBT_MAX 128
2200 int htt_rx_msdu_buff_in_order_replenish(htt_pdev_handle pdev, uint32_t num)
2201 {
2202 	int filled = 0;
2203 
2204 	if (!qdf_spin_trylock_bh(&(pdev->rx_ring.refill_lock))) {
2205 		if (qdf_atomic_read(&pdev->rx_ring.refill_debt)
2206 			 < RX_RING_REFILL_DEBT_MAX) {
2207 			qdf_atomic_add(num, &pdev->rx_ring.refill_debt);
2208 			pdev->rx_buff_debt_invoked++;
2209 			return filled; /* 0 */
2210 		}
2211 		/*
2212 		 * else:
2213 		 * If we have quite a debt, then it is better for the lock
2214 		 * holder to finish its work and then acquire the lock and
2215 		 * fill our own part.
2216 		 */
2217 		qdf_spin_lock_bh(&(pdev->rx_ring.refill_lock));
2218 	}
2219 	pdev->rx_buff_fill_n_invoked++;
2220 
2221 	filled = htt_rx_ring_fill_n(pdev, num);
2222 
2223 	if (filled > num) {
2224 		/* we served ourselves and some other debt */
2225 		/* sub is safer than  = 0 */
2226 		qdf_atomic_sub(filled - num, &pdev->rx_ring.refill_debt);
2227 	} else {
2228 		qdf_atomic_add(num - filled, &pdev->rx_ring.refill_debt);
2229 	}
2230 	qdf_spin_unlock_bh(&(pdev->rx_ring.refill_lock));
2231 
2232 	return filled;
2233 }
2234 
2235 #ifndef CONFIG_HL_SUPPORT
2236 #define AR600P_ASSEMBLE_HW_RATECODE(_rate, _nss, _pream)     \
2237 	(((_pream) << 6) | ((_nss) << 4) | (_rate))
2238 
2239 enum AR600P_HW_RATECODE_PREAM_TYPE {
2240 	AR600P_HW_RATECODE_PREAM_OFDM,
2241 	AR600P_HW_RATECODE_PREAM_CCK,
2242 	AR600P_HW_RATECODE_PREAM_HT,
2243 	AR600P_HW_RATECODE_PREAM_VHT,
2244 };
2245 
2246 /*--- RX In Order Hash Code --------------------------------------------------*/
2247 
2248 /* Initializes the circular linked list */
2249 static inline void htt_list_init(struct htt_list_node *head)
2250 {
2251 	head->prev = head;
2252 	head->next = head;
2253 }
2254 
2255 /* Adds entry to the end of the linked list */
2256 static inline void htt_list_add_tail(struct htt_list_node *head,
2257 				     struct htt_list_node *node)
2258 {
2259 	head->prev->next = node;
2260 	node->prev = head->prev;
2261 	node->next = head;
2262 	head->prev = node;
2263 }
2264 
2265 /* Removes the entry corresponding to the input node from the linked list */
2266 static inline void htt_list_remove(struct htt_list_node *node)
2267 {
2268 	node->prev->next = node->next;
2269 	node->next->prev = node->prev;
2270 }
2271 
2272 /* Helper macro to iterate through the linked list */
2273 #define HTT_LIST_ITER_FWD(iter, head) for (iter = (head)->next;		\
2274 					   (iter) != (head);		\
2275 					   (iter) = (iter)->next)	\
2276 
2277 #ifdef RX_HASH_DEBUG
2278 /* Hash cookie related macros */
2279 #define HTT_RX_HASH_COOKIE 0xDEED
2280 
2281 #define HTT_RX_HASH_COOKIE_SET(hash_element) \
2282 	((hash_element)->cookie = HTT_RX_HASH_COOKIE)
2283 
2284 #define HTT_RX_HASH_COOKIE_CHECK(hash_element) \
2285 	HTT_ASSERT_ALWAYS((hash_element)->cookie == HTT_RX_HASH_COOKIE)
2286 
2287 /* Hash count related macros */
2288 #define HTT_RX_HASH_COUNT_INCR(hash_bucket) \
2289 	((hash_bucket)->count++)
2290 
2291 #define HTT_RX_HASH_COUNT_DECR(hash_bucket) \
2292 	((hash_bucket)->count--)
2293 
2294 #define HTT_RX_HASH_COUNT_RESET(hash_bucket) ((hash_bucket)->count = 0)
2295 
2296 #define HTT_RX_HASH_COUNT_PRINT(hash_bucket) \
2297 	RX_HASH_LOG(qdf_print(" count %d\n", (hash_bucket)->count))
2298 #else                           /* RX_HASH_DEBUG */
2299 /* Hash cookie related macros */
2300 #define HTT_RX_HASH_COOKIE_SET(hash_element)    /* no-op */
2301 #define HTT_RX_HASH_COOKIE_CHECK(hash_element)  /* no-op */
2302 /* Hash count related macros */
2303 #define HTT_RX_HASH_COUNT_INCR(hash_bucket)     /* no-op */
2304 #define HTT_RX_HASH_COUNT_DECR(hash_bucket)     /* no-op */
2305 #define HTT_RX_HASH_COUNT_PRINT(hash_bucket)    /* no-op */
2306 #define HTT_RX_HASH_COUNT_RESET(hash_bucket)    /* no-op */
2307 #endif /* RX_HASH_DEBUG */
2308 
2309 /*
2310  * Inserts the given "physical address - network buffer" pair into the
2311  * hash table for the given pdev. This function will do the following:
2312  * 1. Determine which bucket to insert the pair into
2313  * 2. First try to allocate the hash entry for this pair from the pre-allocated
2314  *    entries list
2315  * 3. If there are no more entries in the pre-allocated entries list, allocate
2316  *    the hash entry from the hash memory pool
2317  * Note: this function is not thread-safe
2318  * Returns 0 - success, 1 - failure
2319  */
2320 int
2321 htt_rx_hash_list_insert(struct htt_pdev_t *pdev,
2322 			qdf_dma_addr_t paddr,
2323 			qdf_nbuf_t netbuf)
2324 {
2325 	int i;
2326 	int rc = 0;
2327 	struct htt_rx_hash_entry *hash_element = NULL;
2328 
2329 	qdf_spin_lock_bh(&(pdev->rx_ring.rx_hash_lock));
2330 
2331 	/* get rid of the marking bits if they are available */
2332 	paddr = htt_paddr_trim_to_37(paddr);
2333 
2334 	i = RX_HASH_FUNCTION(paddr);
2335 
2336 	/* Check if there are any entries in the pre-allocated free list */
2337 	if (pdev->rx_ring.hash_table[i]->freepool.next !=
2338 	    &pdev->rx_ring.hash_table[i]->freepool) {
2339 
2340 		hash_element =
2341 			(struct htt_rx_hash_entry *)(
2342 				(char *)
2343 				pdev->rx_ring.hash_table[i]->freepool.next -
2344 				pdev->rx_ring.listnode_offset);
2345 		if (qdf_unlikely(NULL == hash_element)) {
2346 			HTT_ASSERT_ALWAYS(0);
2347 			rc = 1;
2348 			goto hli_end;
2349 		}
2350 
2351 		htt_list_remove(pdev->rx_ring.hash_table[i]->freepool.next);
2352 	} else {
2353 		hash_element = qdf_mem_malloc(sizeof(struct htt_rx_hash_entry));
2354 		if (qdf_unlikely(NULL == hash_element)) {
2355 			HTT_ASSERT_ALWAYS(0);
2356 			rc = 1;
2357 			goto hli_end;
2358 		}
2359 		hash_element->fromlist = 0;
2360 	}
2361 
2362 	hash_element->netbuf = netbuf;
2363 	hash_element->paddr = paddr;
2364 	HTT_RX_HASH_COOKIE_SET(hash_element);
2365 
2366 	htt_list_add_tail(&pdev->rx_ring.hash_table[i]->listhead,
2367 			  &hash_element->listnode);
2368 
2369 	RX_HASH_LOG(qdf_print("rx hash: %s: paddr 0x%x netbuf %pK bucket %d\n",
2370 			      __func__, paddr, netbuf, (int)i));
2371 
2372 	HTT_RX_HASH_COUNT_INCR(pdev->rx_ring.hash_table[i]);
2373 	HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
2374 
2375 hli_end:
2376 	qdf_spin_unlock_bh(&(pdev->rx_ring.rx_hash_lock));
2377 	return rc;
2378 }
2379 #endif
2380 
2381 #ifndef CONFIG_HL_SUPPORT
2382 /*
2383  * Given a physical address this function will find the corresponding network
2384  *  buffer from the hash table.
2385  *  paddr is already stripped off of higher marking bits.
2386  */
2387 qdf_nbuf_t htt_rx_hash_list_lookup(struct htt_pdev_t *pdev,
2388 				   qdf_dma_addr_t     paddr)
2389 {
2390 	uint32_t i;
2391 	struct htt_list_node *list_iter = NULL;
2392 	qdf_nbuf_t netbuf = NULL;
2393 	struct htt_rx_hash_entry *hash_entry;
2394 
2395 	qdf_spin_lock_bh(&(pdev->rx_ring.rx_hash_lock));
2396 
2397 	if (!pdev->rx_ring.hash_table) {
2398 		qdf_spin_unlock_bh(&(pdev->rx_ring.rx_hash_lock));
2399 		return NULL;
2400 	}
2401 
2402 	i = RX_HASH_FUNCTION(paddr);
2403 
2404 	HTT_LIST_ITER_FWD(list_iter, &pdev->rx_ring.hash_table[i]->listhead) {
2405 		hash_entry = (struct htt_rx_hash_entry *)
2406 			     ((char *)list_iter -
2407 			      pdev->rx_ring.listnode_offset);
2408 
2409 		HTT_RX_HASH_COOKIE_CHECK(hash_entry);
2410 
2411 		if (hash_entry->paddr == paddr) {
2412 			/* Found the entry corresponding to paddr */
2413 			netbuf = hash_entry->netbuf;
2414 			/* set netbuf to NULL to trace if freed entry
2415 			 * is getting unmapped in hash deinit.
2416 			 */
2417 			hash_entry->netbuf = NULL;
2418 			htt_list_remove(&hash_entry->listnode);
2419 			HTT_RX_HASH_COUNT_DECR(pdev->rx_ring.hash_table[i]);
2420 			/*
2421 			 * if the rx entry is from the pre-allocated list,
2422 			 * return it
2423 			 */
2424 			if (hash_entry->fromlist)
2425 				htt_list_add_tail(
2426 					&pdev->rx_ring.hash_table[i]->freepool,
2427 					&hash_entry->listnode);
2428 			else
2429 				qdf_mem_free(hash_entry);
2430 
2431 			htt_rx_dbg_rxbuf_reset(pdev, netbuf);
2432 			break;
2433 		}
2434 	}
2435 
2436 	RX_HASH_LOG(qdf_print("rx hash: %s: paddr 0x%x, netbuf %pK, bucket %d\n",
2437 			      __func__, paddr, netbuf, (int)i));
2438 	HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
2439 
2440 	qdf_spin_unlock_bh(&(pdev->rx_ring.rx_hash_lock));
2441 
2442 	if (netbuf == NULL) {
2443 		qdf_print("rx hash: %s: no entry found for %pK!\n",
2444 			  __func__, (void *)paddr);
2445 		if (cds_is_self_recovery_enabled())
2446 			cds_trigger_recovery(QDF_RX_HASH_NO_ENTRY_FOUND);
2447 		else
2448 			HTT_ASSERT_ALWAYS(0);
2449 	}
2450 
2451 	return netbuf;
2452 }
2453 
2454 /*
2455  * Initialization function of the rx buffer hash table. This function will
2456  * allocate a hash table of a certain pre-determined size and initialize all
2457  * the elements
2458  */
2459 static int htt_rx_hash_init(struct htt_pdev_t *pdev)
2460 {
2461 	int i, j;
2462 	int rc = 0;
2463 	void *allocation;
2464 
2465 	HTT_ASSERT2(QDF_IS_PWR2(RX_NUM_HASH_BUCKETS));
2466 
2467 	/* hash table is array of bucket pointers */
2468 	pdev->rx_ring.hash_table =
2469 		qdf_mem_malloc(RX_NUM_HASH_BUCKETS *
2470 			       sizeof(struct htt_rx_hash_bucket *));
2471 
2472 	if (NULL == pdev->rx_ring.hash_table) {
2473 		qdf_print("rx hash table allocation failed!\n");
2474 		return 1;
2475 	}
2476 
2477 	qdf_spinlock_create(&(pdev->rx_ring.rx_hash_lock));
2478 	qdf_spin_lock_bh(&(pdev->rx_ring.rx_hash_lock));
2479 
2480 	for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
2481 
2482 		qdf_spin_unlock_bh(&(pdev->rx_ring.rx_hash_lock));
2483 		/* pre-allocate bucket and pool of entries for this bucket */
2484 		allocation = qdf_mem_malloc((sizeof(struct htt_rx_hash_bucket) +
2485 			(RX_ENTRIES_SIZE * sizeof(struct htt_rx_hash_entry))));
2486 		qdf_spin_lock_bh(&(pdev->rx_ring.rx_hash_lock));
2487 		pdev->rx_ring.hash_table[i] = allocation;
2488 
2489 
2490 		HTT_RX_HASH_COUNT_RESET(pdev->rx_ring.hash_table[i]);
2491 
2492 		/* initialize the hash table buckets */
2493 		htt_list_init(&pdev->rx_ring.hash_table[i]->listhead);
2494 
2495 		/* initialize the hash table free pool per bucket */
2496 		htt_list_init(&pdev->rx_ring.hash_table[i]->freepool);
2497 
2498 		/* pre-allocate a pool of entries for this bucket */
2499 		pdev->rx_ring.hash_table[i]->entries =
2500 			(struct htt_rx_hash_entry *)
2501 			((uint8_t *)pdev->rx_ring.hash_table[i] +
2502 			sizeof(struct htt_rx_hash_bucket));
2503 
2504 		if (NULL == pdev->rx_ring.hash_table[i]->entries) {
2505 			qdf_print("rx hash bucket %d entries alloc failed\n",
2506 				(int)i);
2507 			while (i) {
2508 				i--;
2509 				qdf_mem_free(pdev->rx_ring.hash_table[i]);
2510 			}
2511 			qdf_mem_free(pdev->rx_ring.hash_table);
2512 			pdev->rx_ring.hash_table = NULL;
2513 			rc = 1;
2514 			goto hi_end;
2515 		}
2516 
2517 		/* initialize the free list with pre-allocated entries */
2518 		for (j = 0; j < RX_ENTRIES_SIZE; j++) {
2519 			pdev->rx_ring.hash_table[i]->entries[j].fromlist = 1;
2520 			htt_list_add_tail(
2521 				&pdev->rx_ring.hash_table[i]->freepool,
2522 				&pdev->rx_ring.hash_table[i]->entries[j].
2523 				listnode);
2524 		}
2525 	}
2526 
2527 	pdev->rx_ring.listnode_offset =
2528 		qdf_offsetof(struct htt_rx_hash_entry, listnode);
2529 hi_end:
2530 	qdf_spin_unlock_bh(&(pdev->rx_ring.rx_hash_lock));
2531 
2532 	return rc;
2533 }
2534 #endif
2535 
2536 /*--- RX In Order Hash Code --------------------------------------------------*/
2537 
2538 /* move the function to the end of file
2539  * to omit ll/hl pre-declaration
2540  */
2541 
2542 #if defined(CONFIG_HL_SUPPORT)
2543 
2544 int htt_rx_attach(struct htt_pdev_t *pdev)
2545 {
2546 	pdev->rx_ring.size = HTT_RX_RING_SIZE_MIN;
2547 	HTT_ASSERT2(IS_PWR2(pdev->rx_ring.size));
2548 	pdev->rx_ring.size_mask = pdev->rx_ring.size - 1;
2549 	/* host can force ring base address if it wish to do so */
2550 	pdev->rx_ring.base_paddr = 0;
2551 	htt_rx_amsdu_pop = htt_rx_amsdu_pop_hl;
2552 	htt_rx_frag_pop = htt_rx_frag_pop_hl;
2553 	htt_rx_offload_msdu_cnt = htt_rx_offload_msdu_cnt_hl;
2554 	htt_rx_offload_msdu_pop = htt_rx_offload_msdu_pop_hl;
2555 	htt_rx_mpdu_desc_list_next = htt_rx_mpdu_desc_list_next_hl;
2556 	htt_rx_mpdu_desc_retry = htt_rx_mpdu_desc_retry_hl;
2557 	htt_rx_mpdu_desc_seq_num = htt_rx_mpdu_desc_seq_num_hl;
2558 	htt_rx_mpdu_desc_pn = htt_rx_mpdu_desc_pn_hl;
2559 	htt_rx_mpdu_desc_tid = htt_rx_mpdu_desc_tid_hl;
2560 	htt_rx_msdu_desc_completes_mpdu = htt_rx_msdu_desc_completes_mpdu_hl;
2561 	htt_rx_msdu_first_msdu_flag = htt_rx_msdu_first_msdu_flag_hl;
2562 	htt_rx_msdu_has_wlan_mcast_flag = htt_rx_msdu_has_wlan_mcast_flag_hl;
2563 	htt_rx_msdu_is_wlan_mcast = htt_rx_msdu_is_wlan_mcast_hl;
2564 	htt_rx_msdu_is_frag = htt_rx_msdu_is_frag_hl;
2565 	htt_rx_msdu_desc_retrieve = htt_rx_msdu_desc_retrieve_hl;
2566 	htt_rx_mpdu_is_encrypted = htt_rx_mpdu_is_encrypted_hl;
2567 	htt_rx_msdu_desc_key_id = htt_rx_msdu_desc_key_id_hl;
2568 	htt_rx_msdu_chan_info_present = htt_rx_msdu_chan_info_present_hl;
2569 	htt_rx_msdu_center_freq = htt_rx_msdu_center_freq_hl;
2570 
2571 	/*
2572 	 * HL case, the rx descriptor can be different sizes for
2573 	 * different sub-types of RX_IND messages, e.g. for the
2574 	 * initial vs. interior vs. final MSDUs within a PPDU.
2575 	 * The size of each RX_IND message's rx desc is read from
2576 	 * a field within the RX_IND message itself.
2577 	 * In the meantime, until the rx_desc_size_hl variable is
2578 	 * set to its real value based on the RX_IND message,
2579 	 * initialize it to a reasonable value (zero).
2580 	 */
2581 	pdev->rx_desc_size_hl = 0;
2582 	return 0;	/* success */
2583 }
2584 
2585 #else
2586 
2587 int htt_rx_attach(struct htt_pdev_t *pdev)
2588 {
2589 	qdf_dma_addr_t paddr;
2590 	uint32_t ring_elem_size = sizeof(target_paddr_t);
2591 
2592 	pdev->rx_ring.size = htt_rx_ring_size(pdev);
2593 	HTT_ASSERT2(QDF_IS_PWR2(pdev->rx_ring.size));
2594 	pdev->rx_ring.size_mask = pdev->rx_ring.size - 1;
2595 
2596 	/*
2597 	 * Set the initial value for the level to which the rx ring
2598 	 * should be filled, based on the max throughput and the worst
2599 	 * likely latency for the host to fill the rx ring.
2600 	 * In theory, this fill level can be dynamically adjusted from
2601 	 * the initial value set here to reflect the actual host latency
2602 	 * rather than a conservative assumption.
2603 	 */
2604 	pdev->rx_ring.fill_level = htt_rx_ring_fill_level(pdev);
2605 
2606 	if (pdev->cfg.is_full_reorder_offload) {
2607 		if (htt_rx_hash_init(pdev))
2608 			goto fail1;
2609 
2610 		/* allocate the target index */
2611 		pdev->rx_ring.target_idx.vaddr =
2612 			 qdf_mem_alloc_consistent(pdev->osdev, pdev->osdev->dev,
2613 				 sizeof(uint32_t),
2614 				 &paddr);
2615 
2616 		if (!pdev->rx_ring.target_idx.vaddr)
2617 			goto fail2;
2618 
2619 		pdev->rx_ring.target_idx.paddr = paddr;
2620 		*pdev->rx_ring.target_idx.vaddr = 0;
2621 	} else {
2622 		pdev->rx_ring.buf.netbufs_ring =
2623 			qdf_mem_malloc(pdev->rx_ring.size * sizeof(qdf_nbuf_t));
2624 		if (!pdev->rx_ring.buf.netbufs_ring)
2625 			goto fail1;
2626 
2627 		pdev->rx_ring.sw_rd_idx.msdu_payld = 0;
2628 		pdev->rx_ring.sw_rd_idx.msdu_desc = 0;
2629 	}
2630 
2631 	pdev->rx_ring.buf.paddrs_ring =
2632 		qdf_mem_alloc_consistent(
2633 			pdev->osdev, pdev->osdev->dev,
2634 			 pdev->rx_ring.size * ring_elem_size,
2635 			 &paddr);
2636 	if (!pdev->rx_ring.buf.paddrs_ring)
2637 		goto fail3;
2638 
2639 	pdev->rx_ring.base_paddr = paddr;
2640 	pdev->rx_ring.alloc_idx.vaddr =
2641 		 qdf_mem_alloc_consistent(
2642 			pdev->osdev, pdev->osdev->dev,
2643 			 sizeof(uint32_t), &paddr);
2644 
2645 	if (!pdev->rx_ring.alloc_idx.vaddr)
2646 		goto fail4;
2647 
2648 	pdev->rx_ring.alloc_idx.paddr = paddr;
2649 	*pdev->rx_ring.alloc_idx.vaddr = 0;
2650 
2651 	/*
2652 	 * Initialize the Rx refill reference counter to be one so that
2653 	 * only one thread is allowed to refill the Rx ring.
2654 	 */
2655 	qdf_atomic_init(&pdev->rx_ring.refill_ref_cnt);
2656 	qdf_atomic_inc(&pdev->rx_ring.refill_ref_cnt);
2657 
2658 	/* Initialize the refill_lock and debt (for rx-parallelization) */
2659 	qdf_spinlock_create(&(pdev->rx_ring.refill_lock));
2660 	qdf_atomic_init(&pdev->rx_ring.refill_debt);
2661 
2662 
2663 	/* Initialize the Rx refill retry timer */
2664 	qdf_timer_init(pdev->osdev,
2665 		 &pdev->rx_ring.refill_retry_timer,
2666 		 htt_rx_ring_refill_retry, (void *)pdev,
2667 		 QDF_TIMER_TYPE_SW);
2668 
2669 	pdev->rx_ring.fill_cnt = 0;
2670 	pdev->rx_ring.pop_fail_cnt = 0;
2671 #ifdef DEBUG_DMA_DONE
2672 	pdev->rx_ring.dbg_ring_idx = 0;
2673 	pdev->rx_ring.dbg_refill_cnt = 0;
2674 	pdev->rx_ring.dbg_sync_success = 0;
2675 #endif
2676 #ifdef HTT_RX_RESTORE
2677 	pdev->rx_ring.rx_reset = 0;
2678 	pdev->rx_ring.htt_rx_restore = 0;
2679 #endif
2680 	htt_rx_dbg_rxbuf_init(pdev);
2681 	htt_rx_ring_fill_n(pdev, pdev->rx_ring.fill_level);
2682 
2683 	if (pdev->cfg.is_full_reorder_offload) {
2684 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
2685 			"HTT: full reorder offload enabled");
2686 		htt_rx_amsdu_pop = htt_rx_amsdu_rx_in_order_pop_ll;
2687 		htt_rx_frag_pop = htt_rx_amsdu_rx_in_order_pop_ll;
2688 		htt_rx_mpdu_desc_list_next =
2689 			 htt_rx_in_ord_mpdu_desc_list_next_ll;
2690 	} else {
2691 		htt_rx_amsdu_pop = htt_rx_amsdu_pop_ll;
2692 		htt_rx_frag_pop = htt_rx_amsdu_pop_ll;
2693 		htt_rx_mpdu_desc_list_next = htt_rx_mpdu_desc_list_next_ll;
2694 	}
2695 
2696 	if (cds_get_conparam() == QDF_GLOBAL_MONITOR_MODE)
2697 		htt_rx_amsdu_pop = htt_rx_mon_amsdu_rx_in_order_pop_ll;
2698 
2699 	htt_rx_offload_msdu_cnt = htt_rx_offload_msdu_cnt_ll;
2700 	htt_rx_offload_msdu_pop = htt_rx_offload_msdu_pop_ll;
2701 	htt_rx_mpdu_desc_retry = htt_rx_mpdu_desc_retry_ll;
2702 	htt_rx_mpdu_desc_seq_num = htt_rx_mpdu_desc_seq_num_ll;
2703 	htt_rx_mpdu_desc_pn = htt_rx_mpdu_desc_pn_ll;
2704 	htt_rx_mpdu_desc_tid = htt_rx_mpdu_desc_tid_ll;
2705 	htt_rx_msdu_desc_completes_mpdu = htt_rx_msdu_desc_completes_mpdu_ll;
2706 	htt_rx_msdu_first_msdu_flag = htt_rx_msdu_first_msdu_flag_ll;
2707 	htt_rx_msdu_has_wlan_mcast_flag = htt_rx_msdu_has_wlan_mcast_flag_ll;
2708 	htt_rx_msdu_is_wlan_mcast = htt_rx_msdu_is_wlan_mcast_ll;
2709 	htt_rx_msdu_is_frag = htt_rx_msdu_is_frag_ll;
2710 	htt_rx_msdu_desc_retrieve = htt_rx_msdu_desc_retrieve_ll;
2711 	htt_rx_mpdu_is_encrypted = htt_rx_mpdu_is_encrypted_ll;
2712 	htt_rx_msdu_desc_key_id = htt_rx_msdu_desc_key_id_ll;
2713 	htt_rx_msdu_chan_info_present = htt_rx_msdu_chan_info_present_ll;
2714 	htt_rx_msdu_center_freq = htt_rx_msdu_center_freq_ll;
2715 
2716 	return 0;               /* success */
2717 
2718 fail4:
2719 	qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
2720 				   pdev->rx_ring.size * sizeof(target_paddr_t),
2721 				   pdev->rx_ring.buf.paddrs_ring,
2722 				   pdev->rx_ring.base_paddr,
2723 				   qdf_get_dma_mem_context((&pdev->rx_ring.buf),
2724 							   memctx));
2725 
2726 fail3:
2727 	if (pdev->cfg.is_full_reorder_offload)
2728 		qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
2729 					   sizeof(uint32_t),
2730 					   pdev->rx_ring.target_idx.vaddr,
2731 					   pdev->rx_ring.target_idx.paddr,
2732 					   qdf_get_dma_mem_context((&pdev->
2733 								    rx_ring.
2734 								    target_idx),
2735 								   memctx));
2736 	else
2737 		qdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
2738 
2739 fail2:
2740 	if (pdev->cfg.is_full_reorder_offload)
2741 		htt_rx_hash_deinit(pdev);
2742 
2743 fail1:
2744 	return 1;               /* failure */
2745 }
2746 #endif
2747 
2748 #ifdef IPA_OFFLOAD
2749 #ifdef QCA_WIFI_3_0
2750 /**
2751  * htt_rx_ipa_uc_alloc_wdi2_rsc() - Allocate WDI2.0 resources
2752  * @pdev: htt context
2753  * @rx_ind_ring_elements: rx ring elements
2754  *
2755  * Return: 0 success
2756  */
2757 static int htt_rx_ipa_uc_alloc_wdi2_rsc(struct htt_pdev_t *pdev,
2758 			 unsigned int rx_ind_ring_elements)
2759 {
2760 	/*
2761 	 * Allocate RX2 indication ring
2762 	 * RX2 IND ring element
2763 	 *   4bytes: pointer
2764 	 *   2bytes: VDEV ID
2765 	 *   2bytes: length
2766 	 *
2767 	 * RX indication ring size, by bytes
2768 	 */
2769 	pdev->ipa_uc_rx_rsc.rx2_ind_ring =
2770 		qdf_mem_shared_mem_alloc(pdev->osdev,
2771 					 rx_ind_ring_elements *
2772 					 sizeof(qdf_dma_addr_t));
2773 	if (!pdev->ipa_uc_rx_rsc.rx2_ind_ring) {
2774 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
2775 			  "%s: Unable to allocate memory for IPA rx2 ind ring",
2776 			  __func__);
2777 		return 1;
2778 	}
2779 
2780 	pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx =
2781 		qdf_mem_shared_mem_alloc(pdev->osdev, 4);
2782 	if (!pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx) {
2783 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
2784 			  "%s: Unable to allocate memory for IPA rx proc done index",
2785 			  __func__);
2786 		qdf_mem_shared_mem_free(pdev->osdev,
2787 					pdev->ipa_uc_rx_rsc.rx2_ind_ring);
2788 		return 1;
2789 	}
2790 
2791 	return 0;
2792 }
2793 
2794 /**
2795  * htt_rx_ipa_uc_free_wdi2_rsc() - Free WDI2.0 resources
2796  * @pdev: htt context
2797  *
2798  * Return: None
2799  */
2800 static void htt_rx_ipa_uc_free_wdi2_rsc(struct htt_pdev_t *pdev)
2801 {
2802 	qdf_mem_shared_mem_free(pdev->osdev, pdev->ipa_uc_rx_rsc.rx2_ind_ring);
2803 	qdf_mem_shared_mem_free(pdev->osdev,
2804 				pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx);
2805 }
2806 #else
2807 static int htt_rx_ipa_uc_alloc_wdi2_rsc(struct htt_pdev_t *pdev,
2808 			 unsigned int rx_ind_ring_elements)
2809 {
2810 	return 0;
2811 }
2812 
2813 static void htt_rx_ipa_uc_free_wdi2_rsc(struct htt_pdev_t *pdev)
2814 {
2815 }
2816 #endif
2817 
2818 /**
2819  * htt_rx_ipa_uc_attach() - attach htt ipa uc rx resource
2820  * @pdev: htt context
2821  * @rx_ind_ring_size: rx ring size
2822  *
2823  * Return: 0 success
2824  */
2825 int htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev,
2826 			 unsigned int rx_ind_ring_elements)
2827 {
2828 	int ret = 0;
2829 
2830 	/*
2831 	 * Allocate RX indication ring
2832 	 * RX IND ring element
2833 	 *   4bytes: pointer
2834 	 *   2bytes: VDEV ID
2835 	 *   2bytes: length
2836 	 */
2837 	pdev->ipa_uc_rx_rsc.rx_ind_ring =
2838 		qdf_mem_shared_mem_alloc(pdev->osdev,
2839 					 rx_ind_ring_elements *
2840 					 sizeof(struct ipa_uc_rx_ring_elem_t));
2841 	if (!pdev->ipa_uc_rx_rsc.rx_ind_ring) {
2842 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
2843 			  "%s: Unable to allocate memory for IPA rx ind ring",
2844 			  __func__);
2845 		return 1;
2846 	}
2847 
2848 	pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx =
2849 		qdf_mem_shared_mem_alloc(pdev->osdev, 4);
2850 	if (!pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx) {
2851 		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
2852 			  "%s: Unable to allocate memory for IPA rx proc done index",
2853 			  __func__);
2854 		qdf_mem_shared_mem_free(pdev->osdev,
2855 					pdev->ipa_uc_rx_rsc.rx_ind_ring);
2856 		return 1;
2857 	}
2858 
2859 	ret = htt_rx_ipa_uc_alloc_wdi2_rsc(pdev, rx_ind_ring_elements);
2860 	if (ret) {
2861 		qdf_mem_shared_mem_free(pdev->osdev, pdev->ipa_uc_rx_rsc.rx_ind_ring);
2862 		qdf_mem_shared_mem_free(pdev->osdev,
2863 					pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx);
2864 	}
2865 	return ret;
2866 }
2867 
2868 int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
2869 {
2870 	qdf_mem_shared_mem_free(pdev->osdev, pdev->ipa_uc_rx_rsc.rx_ind_ring);
2871 	qdf_mem_shared_mem_free(pdev->osdev,
2872 				pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx);
2873 
2874 	htt_rx_ipa_uc_free_wdi2_rsc(pdev);
2875 	return 0;
2876 }
2877 #endif /* IPA_OFFLOAD */
2878 
2879 #ifndef REMOVE_PKT_LOG
2880 /**
2881  * htt_register_rx_pkt_dump_callback() - registers callback to
2882  *   get rx pkt status and call callback to do rx packet dump
2883  *
2884  * @pdev: htt pdev handle
2885  * @callback: callback to get rx pkt status and
2886  *     call callback to do rx packet dump
2887  *
2888  * This function is used to register the callback to get
2889  * rx pkt status and call callback to do rx packet dump
2890  *
2891  * Return: None
2892  *
2893  */
2894 void htt_register_rx_pkt_dump_callback(struct htt_pdev_t *pdev,
2895 				tp_rx_pkt_dump_cb callback)
2896 {
2897 	if (!pdev) {
2898 		qdf_print("%s: %s, %s",
2899 			__func__,
2900 			"htt pdev is NULL",
2901 			"rx packet status callback register unsuccessful\n");
2902 		return;
2903 	}
2904 	pdev->rx_pkt_dump_cb = callback;
2905 }
2906 
2907 /**
2908  * htt_deregister_rx_pkt_dump_callback() - deregisters callback to
2909  *   get rx pkt status and call callback to do rx packet dump
2910  *
2911  * @pdev: htt pdev handle
2912  *
2913  * This function is used to deregister the callback to get
2914  * rx pkt status and call callback to do rx packet dump
2915  *
2916  * Return: None
2917  *
2918  */
2919 void htt_deregister_rx_pkt_dump_callback(struct htt_pdev_t *pdev)
2920 {
2921 	if (!pdev) {
2922 		qdf_print("%s: %s, %s",
2923 			__func__,
2924 			"htt pdev is NULL",
2925 			"rx packet status callback deregister unsuccessful\n");
2926 		return;
2927 	}
2928 	pdev->rx_pkt_dump_cb = NULL;
2929 }
2930 
2931 static QDF_STATUS htt_rx_hash_smmu_map(bool map, struct htt_pdev_t *pdev)
2932 {
2933 	uint32_t i;
2934 	struct htt_rx_hash_entry *hash_entry;
2935 	struct htt_rx_hash_bucket **hash_table;
2936 	struct htt_list_node *list_iter = NULL;
2937 	qdf_mem_info_t mem_map_table = {0};
2938 	int ret;
2939 
2940 	qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock);
2941 	hash_table = pdev->rx_ring.hash_table;
2942 
2943 	for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
2944 		/* Free the hash entries in hash bucket i */
2945 		list_iter = hash_table[i]->listhead.next;
2946 		while (list_iter != &hash_table[i]->listhead) {
2947 			hash_entry =
2948 				(struct htt_rx_hash_entry *)((char *)list_iter -
2949 							     pdev->rx_ring.
2950 							     listnode_offset);
2951 			if (hash_entry->netbuf) {
2952 				qdf_update_mem_map_table(pdev->osdev,
2953 						&mem_map_table,
2954 						QDF_NBUF_CB_PADDR(
2955 							hash_entry->netbuf),
2956 						HTT_RX_BUF_SIZE);
2957 				ret = cds_smmu_map_unmap(map, 1,
2958 							 &mem_map_table);
2959 				if (ret)
2960 					return QDF_STATUS_E_FAILURE;
2961 			}
2962 			list_iter = list_iter->next;
2963 		}
2964 	}
2965 	qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
2966 
2967 	return QDF_STATUS_SUCCESS;
2968 }
2969 
2970 QDF_STATUS htt_rx_update_smmu_map(struct htt_pdev_t *pdev, bool map)
2971 {
2972 	QDF_STATUS status;
2973 
2974 	if (NULL == pdev->rx_ring.hash_table)
2975 		return QDF_STATUS_SUCCESS;
2976 
2977 	if (!qdf_mem_smmu_s1_enabled(pdev->osdev) || !pdev->is_ipa_uc_enabled)
2978 		return QDF_STATUS_SUCCESS;
2979 
2980 	qdf_spin_lock_bh(&pdev->rx_ring.refill_lock);
2981 	pdev->rx_ring.smmu_map = map;
2982 	status = htt_rx_hash_smmu_map(map, pdev);
2983 	qdf_spin_unlock_bh(&pdev->rx_ring.refill_lock);
2984 
2985 	return status;
2986 }
2987 #endif
2988