xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/li/hal_li_reo.c (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "qdf_module.h"
21 #include "hal_li_hw_headers.h"
22 #include "hal_reo.h"
23 #include "hal_li_reo.h"
24 #include "hal_li_api.h"
25 
26 uint32_t hal_get_reo_reg_base_offset_li(void)
27 {
28 	return SEQ_WCSS_UMAC_REO_REG_OFFSET;
29 }
30 
31 /**
32  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
33  *
34  * @hal_soc: Opaque HAL SOC handle
35  * @ba_window_size: BlockAck window size
36  * @start_seq: Starting sequence number
37  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
38  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
39  * @tid: TID
40  *
41  */
42 void hal_reo_qdesc_setup_li(hal_soc_handle_t hal_soc_hdl, int tid,
43 			    uint32_t ba_window_size,
44 			    uint32_t start_seq, void *hw_qdesc_vaddr,
45 			    qdf_dma_addr_t hw_qdesc_paddr,
46 			    int pn_type, uint8_t vdev_stats_id)
47 {
48 	uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
49 	uint32_t *reo_queue_ext_desc;
50 	uint32_t reg_val;
51 	uint32_t pn_enable;
52 	uint32_t pn_size = 0;
53 
54 	qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
55 
56 	hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
57 				   HAL_REO_QUEUE_DESC);
58 	/* Fixed pattern in reserved bits for debugging */
59 	HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER_0,
60 			   RESERVED_0A, 0xDDBEEF);
61 
62 	/* This a just a SW meta data and will be copied to REO destination
63 	 * descriptors indicated by hardware.
64 	 * TODO: Setting TID in this field. See if we should set something else.
65 	 */
66 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_1,
67 			   RECEIVE_QUEUE_NUMBER, tid);
68 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
69 			   VLD, 1);
70 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
71 			   ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
72 			   HAL_RX_LINK_DESC_CNTR);
73 
74 	/*
75 	 * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
76 	 */
77 
78 	reg_val = TID_TO_WME_AC(tid);
79 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, AC, reg_val);
80 
81 	if (ba_window_size < 1)
82 		ba_window_size = 1;
83 
84 	/* WAR to get 2k exception in Non BA case.
85 	 * Setting window size to 2 to get 2k jump exception
86 	 * when we receive aggregates in Non BA case
87 	 */
88 	ba_window_size = hal_update_non_ba_win_size(tid, ba_window_size);
89 
90 	/* Set RTY bit for non-BA case. Duplicate detection is currently not
91 	 * done by HW in non-BA case if RTY bit is not set.
92 	 * TODO: This is a temporary War and should be removed once HW fix is
93 	 * made to check and discard duplicates even if RTY bit is not set.
94 	 */
95 	if (ba_window_size == 1)
96 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, RTY, 1);
97 
98 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, BA_WINDOW_SIZE,
99 			   ba_window_size - 1);
100 
101 	switch (pn_type) {
102 	case HAL_PN_WPA:
103 		pn_enable = 1;
104 		pn_size = PN_SIZE_48;
105 		break;
106 	case HAL_PN_WAPI_EVEN:
107 	case HAL_PN_WAPI_UNEVEN:
108 		pn_enable = 1;
109 		pn_size = PN_SIZE_128;
110 		break;
111 	default:
112 		pn_enable = 0;
113 		break;
114 	}
115 
116 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_CHECK_NEEDED,
117 			   pn_enable);
118 
119 	if (pn_type == HAL_PN_WAPI_EVEN)
120 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
121 				   PN_SHALL_BE_EVEN, 1);
122 	else if (pn_type == HAL_PN_WAPI_UNEVEN)
123 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
124 				   PN_SHALL_BE_UNEVEN, 1);
125 
126 	/*
127 	 *  TODO: Need to check if PN handling in SW needs to be enabled
128 	 *  So far this is not a requirement
129 	 */
130 
131 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_SIZE,
132 			   pn_size);
133 
134 	/* TODO: Check if RX_REO_QUEUE_2_IGNORE_AMPDU_FLAG need to be set
135 	 * based on BA window size and/or AMPDU capabilities
136 	 */
137 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
138 			   IGNORE_AMPDU_FLAG, 1);
139 
140 	if (start_seq <= 0xfff)
141 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SSN,
142 				   start_seq);
143 
144 	/* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA,
145 	 * but REO is not delivering packets if we set it to 1. Need to enable
146 	 * this once the issue is resolved
147 	 */
148 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SVLD, 0);
149 
150 	/* TODO: Check if we should set start PN for WAPI */
151 
152 	/* TODO: HW queue descriptors are currently allocated for max BA
153 	 * window size for all QOS TIDs so that same descriptor can be used
154 	 * later when ADDBA request is received. This should be changed to
155 	 * allocate HW queue descriptors based on BA window size being
156 	 * negotiated (0 for non BA cases), and reallocate when BA window
157 	 * size changes and also send WMI message to FW to change the REO
158 	 * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
159 	 */
160 	if (tid == HAL_NON_QOS_TID)
161 		return;
162 
163 	reo_queue_ext_desc = (uint32_t *)
164 		(((struct rx_reo_queue *)reo_queue_desc) + 1);
165 	qdf_mem_zero(reo_queue_ext_desc, 3 *
166 		sizeof(struct rx_reo_queue_ext));
167 	/* Initialize first reo queue extension descriptor */
168 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
169 				   HAL_DESC_REO_OWNED,
170 				   HAL_REO_QUEUE_EXT_DESC);
171 	/* Fixed pattern in reserved bits for debugging */
172 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
173 			   UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A,
174 			   0xADBEEF);
175 	/* Initialize second reo queue extension descriptor */
176 	reo_queue_ext_desc = (uint32_t *)
177 		(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
178 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
179 				   HAL_DESC_REO_OWNED,
180 				   HAL_REO_QUEUE_EXT_DESC);
181 	/* Fixed pattern in reserved bits for debugging */
182 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
183 			   UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A,
184 			   0xBDBEEF);
185 	/* Initialize third reo queue extension descriptor */
186 	reo_queue_ext_desc = (uint32_t *)
187 		(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
188 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
189 				   HAL_DESC_REO_OWNED,
190 				   HAL_REO_QUEUE_EXT_DESC);
191 	/* Fixed pattern in reserved bits for debugging */
192 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
193 			   UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A,
194 				   0xCDBEEF);
195 }
196 
197 qdf_export_symbol(hal_reo_qdesc_setup_li);
198 
199 /**
200  * hal_get_ba_aging_timeout_li - Get BA Aging timeout
201  *
202  * @hal_soc: Opaque HAL SOC handle
203  * @ac: Access category
204  * @value: window size to get
205  */
206 void hal_get_ba_aging_timeout_li(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
207 				 uint32_t *value)
208 {
209 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
210 
211 	switch (ac) {
212 	case WME_AC_BE:
213 		*value = HAL_REG_READ(soc,
214 				      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
215 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
216 		break;
217 	case WME_AC_BK:
218 		*value = HAL_REG_READ(soc,
219 				      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
220 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
221 		break;
222 	case WME_AC_VI:
223 		*value = HAL_REG_READ(soc,
224 				      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
225 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
226 		break;
227 	case WME_AC_VO:
228 		*value = HAL_REG_READ(soc,
229 				      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
230 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
231 		break;
232 	default:
233 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
234 			  "Invalid AC: %d\n", ac);
235 	}
236 }
237 qdf_export_symbol(hal_get_ba_aging_timeout_li);
238 
239 /**
240  * hal_set_ba_aging_timeout_li - Set BA Aging timeout
241  *
242  * @hal_soc: Opaque HAL SOC handle
243  * @ac: Access category
244  * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice
245  * @value: Input value to set
246  */
247 void hal_set_ba_aging_timeout_li(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
248 				 uint32_t value)
249 {
250 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
251 
252 	switch (ac) {
253 	case WME_AC_BE:
254 		HAL_REG_WRITE(soc,
255 			      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
256 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
257 			      value * 1000);
258 		break;
259 	case WME_AC_BK:
260 		HAL_REG_WRITE(soc,
261 			      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
262 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
263 			      value * 1000);
264 		break;
265 	case WME_AC_VI:
266 		HAL_REG_WRITE(soc,
267 			      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
268 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
269 			      value * 1000);
270 		break;
271 	case WME_AC_VO:
272 		HAL_REG_WRITE(soc,
273 			      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
274 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
275 			      value * 1000);
276 		break;
277 	default:
278 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
279 			  "Invalid AC: %d\n", ac);
280 	}
281 }
282 qdf_export_symbol(hal_set_ba_aging_timeout_li);
283 
284 static inline void
285 hal_reo_cmd_set_descr_addr_li(uint32_t *reo_desc, enum hal_reo_cmd_type type,
286 			      uint32_t paddr_lo, uint8_t paddr_hi)
287 {
288 	switch (type) {
289 	case CMD_GET_QUEUE_STATS:
290 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
291 				   RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
292 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
293 				   RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
294 		break;
295 	case CMD_FLUSH_QUEUE:
296 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
297 				   FLUSH_DESC_ADDR_31_0, paddr_lo);
298 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
299 				   FLUSH_DESC_ADDR_39_32, paddr_hi);
300 		break;
301 	case CMD_FLUSH_CACHE:
302 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
303 				   FLUSH_ADDR_31_0, paddr_lo);
304 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
305 				   FLUSH_ADDR_39_32, paddr_hi);
306 		break;
307 	case CMD_UPDATE_RX_REO_QUEUE:
308 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
309 				   RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
310 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
311 				   RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
312 		break;
313 	default:
314 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
315 			  "%s: Invalid REO command type", __func__);
316 		break;
317 	}
318 }
319 
320 static inline int
321 hal_reo_cmd_queue_stats_li(hal_ring_handle_t  hal_ring_hdl,
322 			   hal_soc_handle_t hal_soc_hdl,
323 			   struct hal_reo_cmd_params *cmd)
324 {
325 	uint32_t *reo_desc, val;
326 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
327 
328 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
329 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
330 	if (!reo_desc) {
331 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
332 			  "%s: Out of cmd ring entries", __func__);
333 		hal_srng_access_end(hal_soc, hal_ring_hdl);
334 		return -EBUSY;
335 	}
336 
337 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
338 			sizeof(struct reo_get_queue_stats));
339 
340 	/*
341 	 * Offsets of descriptor fields defined in HW headers start from
342 	 * the field after TLV header
343 	 */
344 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
345 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
346 		     sizeof(struct reo_get_queue_stats) -
347 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
348 
349 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
350 			   REO_STATUS_REQUIRED, cmd->std.need_status);
351 
352 	hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_GET_QUEUE_STATS,
353 				      cmd->std.addr_lo,
354 				      cmd->std.addr_hi);
355 
356 	HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
357 			   cmd->u.stats_params.clear);
358 
359 	if (hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_HAL_REO_CMD) == 0) {
360 		if (hif_system_pm_state_check(hal_soc->hif_handle)) {
361 			hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
362 			hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
363 			hal_srng_inc_flush_cnt(hal_ring_hdl);
364 		} else {
365 			hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
366 		}
367 
368 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_HAL_REO_CMD);
369 	} else {
370 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
371 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
372 		hal_srng_inc_flush_cnt(hal_ring_hdl);
373 	}
374 
375 	val = reo_desc[CMD_HEADER_DW_OFFSET];
376 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
377 			     val);
378 }
379 
380 static inline int
381 hal_reo_cmd_flush_queue_li(hal_ring_handle_t hal_ring_hdl,
382 			   hal_soc_handle_t hal_soc_hdl,
383 			   struct hal_reo_cmd_params *cmd)
384 {
385 	uint32_t *reo_desc, val;
386 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
387 
388 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
389 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
390 	if (!reo_desc) {
391 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
392 			  "%s: Out of cmd ring entries", __func__);
393 		hal_srng_access_end(hal_soc, hal_ring_hdl);
394 		return -EBUSY;
395 	}
396 
397 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
398 			sizeof(struct reo_flush_queue));
399 
400 	/*
401 	 * Offsets of descriptor fields defined in HW headers start from
402 	 * the field after TLV header
403 	 */
404 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
405 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
406 		     sizeof(struct reo_flush_queue) -
407 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
408 
409 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
410 			   REO_STATUS_REQUIRED, cmd->std.need_status);
411 
412 	hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_FLUSH_QUEUE,
413 				      cmd->std.addr_lo, cmd->std.addr_hi);
414 
415 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
416 			   BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
417 			   cmd->u.fl_queue_params.block_use_after_flush);
418 
419 	if (cmd->u.fl_queue_params.block_use_after_flush) {
420 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
421 				   BLOCK_RESOURCE_INDEX,
422 				   cmd->u.fl_queue_params.index);
423 	}
424 
425 	hal_srng_access_end(hal_soc, hal_ring_hdl);
426 	val = reo_desc[CMD_HEADER_DW_OFFSET];
427 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
428 			     val);
429 }
430 
431 static inline int
432 hal_reo_cmd_flush_cache_li(hal_ring_handle_t hal_ring_hdl,
433 			   hal_soc_handle_t hal_soc_hdl,
434 			   struct hal_reo_cmd_params *cmd)
435 {
436 	uint32_t *reo_desc, val;
437 	struct hal_reo_cmd_flush_cache_params *cp;
438 	uint8_t index = 0;
439 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
440 
441 	cp = &cmd->u.fl_cache_params;
442 
443 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
444 
445 	/* We need a cache block resource for this operation, and REO HW has
446 	 * only 4 such blocking resources. These resources are managed using
447 	 * reo_res_bitmap, and we return failure if none is available.
448 	 */
449 	if (cp->block_use_after_flush) {
450 		index = hal_find_zero_bit(hal_soc->reo_res_bitmap);
451 		if (index > 3) {
452 			qdf_print("No blocking resource available!");
453 			hal_srng_access_end(hal_soc, hal_ring_hdl);
454 			return -EBUSY;
455 		}
456 		hal_soc->index = index;
457 	}
458 
459 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
460 	if (!reo_desc) {
461 		hal_srng_access_end(hal_soc, hal_ring_hdl);
462 		hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl));
463 		return -EBUSY;
464 	}
465 
466 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
467 			sizeof(struct reo_flush_cache));
468 
469 	/*
470 	 * Offsets of descriptor fields defined in HW headers start from
471 	 * the field after TLV header
472 	 */
473 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
474 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
475 		     sizeof(struct reo_flush_cache) -
476 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
477 
478 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
479 			   REO_STATUS_REQUIRED, cmd->std.need_status);
480 
481 	hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_FLUSH_CACHE,
482 				      cmd->std.addr_lo, cmd->std.addr_hi);
483 
484 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
485 			   FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
486 
487 	/* set it to 0 for now */
488 	cp->rel_block_index = 0;
489 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
490 			   RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
491 
492 	if (cp->block_use_after_flush) {
493 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
494 				   CACHE_BLOCK_RESOURCE_INDEX, index);
495 	}
496 
497 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
498 			   FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
499 
500 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
501 			   BLOCK_CACHE_USAGE_AFTER_FLUSH,
502 			   cp->block_use_after_flush);
503 
504 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
505 			   cp->flush_entire_cache);
506 
507 	if (hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_HAL_REO_CMD) == 0) {
508 		if (hif_system_pm_state_check(hal_soc->hif_handle)) {
509 			hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
510 			hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
511 			hal_srng_inc_flush_cnt(hal_ring_hdl);
512 		} else {
513 			hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
514 		}
515 
516 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_HAL_REO_CMD);
517 	} else {
518 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
519 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
520 		hal_srng_inc_flush_cnt(hal_ring_hdl);
521 	}
522 
523 	val = reo_desc[CMD_HEADER_DW_OFFSET];
524 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
525 			     val);
526 }
527 
528 static inline int
529 hal_reo_cmd_unblock_cache_li(hal_ring_handle_t hal_ring_hdl,
530 			     hal_soc_handle_t hal_soc_hdl,
531 			     struct hal_reo_cmd_params *cmd)
532 
533 {
534 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
535 	uint32_t *reo_desc, val;
536 	uint8_t index = 0;
537 
538 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
539 
540 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
541 		index = hal_find_one_bit(hal_soc->reo_res_bitmap);
542 		if (index > 3) {
543 			hal_srng_access_end(hal_soc, hal_ring_hdl);
544 			qdf_print("No blocking resource to unblock!");
545 			return -EBUSY;
546 		}
547 	}
548 
549 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
550 	if (!reo_desc) {
551 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
552 			  "%s: Out of cmd ring entries", __func__);
553 		hal_srng_access_end(hal_soc, hal_ring_hdl);
554 		return -EBUSY;
555 	}
556 
557 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
558 			sizeof(struct reo_unblock_cache));
559 
560 	/*
561 	 * Offsets of descriptor fields defined in HW headers start from
562 	 * the field after TLV header
563 	 */
564 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
565 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
566 		     sizeof(struct reo_unblock_cache) -
567 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
568 
569 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
570 			   REO_STATUS_REQUIRED, cmd->std.need_status);
571 
572 	HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
573 			   UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
574 
575 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
576 		HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
577 				   CACHE_BLOCK_RESOURCE_INDEX,
578 				   cmd->u.unblk_cache_params.index);
579 	}
580 
581 	hal_srng_access_end(hal_soc, hal_ring_hdl);
582 	val = reo_desc[CMD_HEADER_DW_OFFSET];
583 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
584 			     val);
585 }
586 
587 static inline int
588 hal_reo_cmd_flush_timeout_list_li(hal_ring_handle_t hal_ring_hdl,
589 				  hal_soc_handle_t hal_soc_hdl,
590 				  struct hal_reo_cmd_params *cmd)
591 {
592 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
593 	uint32_t *reo_desc, val;
594 
595 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
596 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
597 	if (!reo_desc) {
598 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
599 			  "%s: Out of cmd ring entries", __func__);
600 		hal_srng_access_end(hal_soc, hal_ring_hdl);
601 		return -EBUSY;
602 	}
603 
604 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
605 			sizeof(struct reo_flush_timeout_list));
606 
607 	/*
608 	 * Offsets of descriptor fields defined in HW headers start from
609 	 * the field after TLV header
610 	 */
611 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
612 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
613 		     sizeof(struct reo_flush_timeout_list) -
614 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
615 
616 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
617 			   REO_STATUS_REQUIRED, cmd->std.need_status);
618 
619 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
620 			   cmd->u.fl_tim_list_params.ac_list);
621 
622 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
623 			   MINIMUM_RELEASE_DESC_COUNT,
624 			   cmd->u.fl_tim_list_params.min_rel_desc);
625 
626 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
627 			   MINIMUM_FORWARD_BUF_COUNT,
628 			   cmd->u.fl_tim_list_params.min_fwd_buf);
629 
630 	hal_srng_access_end(hal_soc, hal_ring_hdl);
631 	val = reo_desc[CMD_HEADER_DW_OFFSET];
632 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
633 			     val);
634 }
635 
636 static inline int
637 hal_reo_cmd_update_rx_queue_li(hal_ring_handle_t hal_ring_hdl,
638 			       hal_soc_handle_t hal_soc_hdl,
639 			       struct hal_reo_cmd_params *cmd)
640 {
641 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
642 	uint32_t *reo_desc, val;
643 	struct hal_reo_cmd_update_queue_params *p;
644 
645 	p = &cmd->u.upd_queue_params;
646 
647 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
648 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
649 	if (!reo_desc) {
650 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
651 			  "%s: Out of cmd ring entries", __func__);
652 		hal_srng_access_end(hal_soc, hal_ring_hdl);
653 		return -EBUSY;
654 	}
655 
656 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
657 			sizeof(struct reo_update_rx_reo_queue));
658 
659 	/*
660 	 * Offsets of descriptor fields defined in HW headers start from
661 	 * the field after TLV header
662 	 */
663 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
664 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
665 		     sizeof(struct reo_update_rx_reo_queue) -
666 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
667 
668 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
669 			   REO_STATUS_REQUIRED, cmd->std.need_status);
670 
671 	hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
672 				      cmd->std.addr_lo, cmd->std.addr_hi);
673 
674 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
675 			   UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
676 
677 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
678 			   p->update_vld);
679 
680 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
681 			   UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
682 			   p->update_assoc_link_desc);
683 
684 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
685 			   UPDATE_DISABLE_DUPLICATE_DETECTION,
686 			   p->update_disable_dup_detect);
687 
688 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
689 			   UPDATE_DISABLE_DUPLICATE_DETECTION,
690 			   p->update_disable_dup_detect);
691 
692 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
693 			   UPDATE_SOFT_REORDER_ENABLE,
694 			   p->update_soft_reorder_enab);
695 
696 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
697 			   UPDATE_AC, p->update_ac);
698 
699 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
700 			   UPDATE_BAR, p->update_bar);
701 
702 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
703 			   UPDATE_BAR, p->update_bar);
704 
705 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
706 			   UPDATE_RTY, p->update_rty);
707 
708 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
709 			   UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
710 
711 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
712 			   UPDATE_OOR_MODE, p->update_oor_mode);
713 
714 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
715 			   UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
716 
717 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
718 			   UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
719 
720 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
721 			   UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
722 
723 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
724 			   UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
725 
726 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
727 			   UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
728 
729 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
730 			   UPDATE_PN_SIZE, p->update_pn_size);
731 
732 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
733 			   UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
734 
735 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
736 			   UPDATE_SVLD, p->update_svld);
737 
738 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
739 			   UPDATE_SSN, p->update_ssn);
740 
741 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
742 			   UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
743 			   p->update_seq_2k_err_detect);
744 
745 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
746 			   UPDATE_PN_VALID, p->update_pn_valid);
747 
748 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
749 			   UPDATE_PN, p->update_pn);
750 
751 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
752 			   RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
753 
754 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
755 			   VLD, p->vld);
756 
757 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
758 			   ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
759 			   p->assoc_link_desc);
760 
761 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
762 			   DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
763 
764 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
765 			   SOFT_REORDER_ENABLE, p->soft_reorder_enab);
766 
767 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
768 
769 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
770 			   BAR, p->bar);
771 
772 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
773 			   CHK_2K_MODE, p->chk_2k_mode);
774 
775 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
776 			   RTY, p->rty);
777 
778 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
779 			   OOR_MODE, p->oor_mode);
780 
781 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
782 			   PN_CHECK_NEEDED, p->pn_check_needed);
783 
784 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
785 			   PN_SHALL_BE_EVEN, p->pn_even);
786 
787 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
788 			   PN_SHALL_BE_UNEVEN, p->pn_uneven);
789 
790 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
791 			   PN_HANDLING_ENABLE, p->pn_hand_enab);
792 
793 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
794 			   IGNORE_AMPDU_FLAG, p->ignore_ampdu);
795 
796 	if (p->ba_window_size < 1)
797 		p->ba_window_size = 1;
798 	/*
799 	 * WAR to get 2k exception in Non BA case.
800 	 * Setting window size to 2 to get 2k jump exception
801 	 * when we receive aggregates in Non BA case
802 	 */
803 	if (p->ba_window_size == 1)
804 		p->ba_window_size++;
805 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
806 			   BA_WINDOW_SIZE, p->ba_window_size - 1);
807 
808 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
809 			   PN_SIZE, p->pn_size);
810 
811 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
812 			   SVLD, p->svld);
813 
814 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
815 			   SSN, p->ssn);
816 
817 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
818 			   SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
819 
820 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
821 			   PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
822 
823 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
824 			   PN_31_0, p->pn_31_0);
825 
826 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
827 			   PN_63_32, p->pn_63_32);
828 
829 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
830 			   PN_95_64, p->pn_95_64);
831 
832 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
833 			   PN_127_96, p->pn_127_96);
834 
835 	if (hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_HAL_REO_CMD) == 0) {
836 		if (hif_system_pm_state_check(hal_soc->hif_handle)) {
837 			hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
838 			hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
839 			hal_srng_inc_flush_cnt(hal_ring_hdl);
840 		} else {
841 			hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
842 		}
843 
844 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_HAL_REO_CMD);
845 	} else {
846 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
847 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
848 		hal_srng_inc_flush_cnt(hal_ring_hdl);
849 	}
850 
851 	val = reo_desc[CMD_HEADER_DW_OFFSET];
852 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
853 			     val);
854 }
855 
856 int hal_reo_send_cmd_li(hal_soc_handle_t hal_soc_hdl,
857 			hal_ring_handle_t  hal_ring_hdl,
858 			enum hal_reo_cmd_type cmd,
859 			void *params)
860 {
861 	struct hal_reo_cmd_params *cmd_params =
862 			(struct hal_reo_cmd_params *)params;
863 	int num = 0;
864 
865 	switch (cmd) {
866 	case CMD_GET_QUEUE_STATS:
867 		num = hal_reo_cmd_queue_stats_li(hal_ring_hdl,
868 						 hal_soc_hdl, cmd_params);
869 		break;
870 	case CMD_FLUSH_QUEUE:
871 		num = hal_reo_cmd_flush_queue_li(hal_ring_hdl,
872 						 hal_soc_hdl, cmd_params);
873 		break;
874 	case CMD_FLUSH_CACHE:
875 		num = hal_reo_cmd_flush_cache_li(hal_ring_hdl,
876 						 hal_soc_hdl, cmd_params);
877 		break;
878 	case CMD_UNBLOCK_CACHE:
879 		num = hal_reo_cmd_unblock_cache_li(hal_ring_hdl,
880 						   hal_soc_hdl, cmd_params);
881 		break;
882 	case CMD_FLUSH_TIMEOUT_LIST:
883 		num = hal_reo_cmd_flush_timeout_list_li(hal_ring_hdl,
884 							hal_soc_hdl,
885 							cmd_params);
886 		break;
887 	case CMD_UPDATE_RX_REO_QUEUE:
888 		num = hal_reo_cmd_update_rx_queue_li(hal_ring_hdl,
889 						     hal_soc_hdl, cmd_params);
890 		break;
891 	default:
892 		hal_err("Invalid REO command type: %d", cmd);
893 		return -EINVAL;
894 	};
895 
896 	return num;
897 }
898 
899 void
900 hal_reo_queue_stats_status_li(hal_ring_desc_t ring_desc,
901 			      void *st_handle,
902 			      hal_soc_handle_t hal_soc_hdl)
903 {
904 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
905 	struct hal_reo_queue_status *st =
906 		(struct hal_reo_queue_status *)st_handle;
907 	uint32_t *reo_desc = (uint32_t *)ring_desc;
908 	uint32_t val;
909 
910 	/*
911 	 * Offsets of descriptor fields defined in HW headers start
912 	 * from the field after TLV header
913 	 */
914 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
915 
916 	/* header */
917 	hal_reo_status_get_header(ring_desc, HAL_REO_QUEUE_STATS_STATUS_TLV,
918 				  &(st->header), hal_soc);
919 
920 	/* SSN */
921 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
922 	st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
923 
924 	/* current index */
925 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
926 					 CURRENT_INDEX)];
927 	st->curr_idx =
928 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
929 			      CURRENT_INDEX, val);
930 
931 	/* PN bits */
932 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
933 					 PN_31_0)];
934 	st->pn_31_0 =
935 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
936 			      PN_31_0, val);
937 
938 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
939 					 PN_63_32)];
940 	st->pn_63_32 =
941 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
942 			      PN_63_32, val);
943 
944 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
945 					 PN_95_64)];
946 	st->pn_95_64 =
947 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
948 			      PN_95_64, val);
949 
950 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
951 					 PN_127_96)];
952 	st->pn_127_96 =
953 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
954 			      PN_127_96, val);
955 
956 	/* timestamps */
957 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
958 					 LAST_RX_ENQUEUE_TIMESTAMP)];
959 	st->last_rx_enq_tstamp =
960 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
961 			      LAST_RX_ENQUEUE_TIMESTAMP, val);
962 
963 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
964 					 LAST_RX_DEQUEUE_TIMESTAMP)];
965 	st->last_rx_deq_tstamp =
966 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
967 			      LAST_RX_DEQUEUE_TIMESTAMP, val);
968 
969 	/* rx bitmap */
970 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
971 					 RX_BITMAP_31_0)];
972 	st->rx_bitmap_31_0 =
973 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
974 			      RX_BITMAP_31_0, val);
975 
976 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
977 					 RX_BITMAP_63_32)];
978 	st->rx_bitmap_63_32 =
979 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
980 			      RX_BITMAP_63_32, val);
981 
982 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
983 					 RX_BITMAP_95_64)];
984 	st->rx_bitmap_95_64 =
985 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
986 			      RX_BITMAP_95_64, val);
987 
988 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
989 					 RX_BITMAP_127_96)];
990 	st->rx_bitmap_127_96 =
991 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
992 			      RX_BITMAP_127_96, val);
993 
994 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
995 					 RX_BITMAP_159_128)];
996 	st->rx_bitmap_159_128 =
997 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
998 			      RX_BITMAP_159_128, val);
999 
1000 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
1001 					 RX_BITMAP_191_160)];
1002 	st->rx_bitmap_191_160 =
1003 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
1004 			      RX_BITMAP_191_160, val);
1005 
1006 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
1007 					 RX_BITMAP_223_192)];
1008 	st->rx_bitmap_223_192 =
1009 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
1010 			      RX_BITMAP_223_192, val);
1011 
1012 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
1013 					 RX_BITMAP_255_224)];
1014 	st->rx_bitmap_255_224 =
1015 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
1016 			      RX_BITMAP_255_224, val);
1017 
1018 	/* various counts */
1019 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
1020 					 CURRENT_MPDU_COUNT)];
1021 	st->curr_mpdu_cnt =
1022 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
1023 			      CURRENT_MPDU_COUNT, val);
1024 
1025 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
1026 					 CURRENT_MSDU_COUNT)];
1027 	st->curr_msdu_cnt =
1028 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
1029 			      CURRENT_MSDU_COUNT, val);
1030 
1031 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1032 					 TIMEOUT_COUNT)];
1033 	st->fwd_timeout_cnt =
1034 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1035 			      TIMEOUT_COUNT, val);
1036 
1037 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1038 					 FORWARD_DUE_TO_BAR_COUNT)];
1039 	st->fwd_bar_cnt =
1040 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1041 			      FORWARD_DUE_TO_BAR_COUNT, val);
1042 
1043 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1044 					 DUPLICATE_COUNT)];
1045 	st->dup_cnt =
1046 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1047 			      DUPLICATE_COUNT, val);
1048 
1049 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1050 					 FRAMES_IN_ORDER_COUNT)];
1051 	st->frms_in_order_cnt =
1052 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1053 			      FRAMES_IN_ORDER_COUNT, val);
1054 
1055 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1056 					 BAR_RECEIVED_COUNT)];
1057 	st->bar_rcvd_cnt =
1058 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1059 			      BAR_RECEIVED_COUNT, val);
1060 
1061 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
1062 					 MPDU_FRAMES_PROCESSED_COUNT)];
1063 	st->mpdu_frms_cnt =
1064 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
1065 			      MPDU_FRAMES_PROCESSED_COUNT, val);
1066 
1067 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
1068 					 MSDU_FRAMES_PROCESSED_COUNT)];
1069 	st->msdu_frms_cnt =
1070 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
1071 			      MSDU_FRAMES_PROCESSED_COUNT, val);
1072 
1073 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
1074 					 TOTAL_PROCESSED_BYTE_COUNT)];
1075 	st->total_cnt =
1076 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
1077 			      TOTAL_PROCESSED_BYTE_COUNT, val);
1078 
1079 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1080 					 LATE_RECEIVE_MPDU_COUNT)];
1081 	st->late_recv_mpdu_cnt =
1082 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1083 			      LATE_RECEIVE_MPDU_COUNT, val);
1084 
1085 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1086 					 WINDOW_JUMP_2K)];
1087 	st->win_jump_2k =
1088 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1089 			      WINDOW_JUMP_2K, val);
1090 
1091 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1092 					 HOLE_COUNT)];
1093 	st->hole_cnt =
1094 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1095 			      HOLE_COUNT, val);
1096 }
1097 
1098 void
1099 hal_reo_flush_queue_status_li(hal_ring_desc_t ring_desc,
1100 			      void *st_handle,
1101 			      hal_soc_handle_t hal_soc_hdl)
1102 {
1103 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1104 	struct hal_reo_flush_queue_status *st =
1105 			(struct hal_reo_flush_queue_status *)st_handle;
1106 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1107 	uint32_t val;
1108 
1109 	/*
1110 	 * Offsets of descriptor fields defined in HW headers start
1111 	 * from the field after TLV header
1112 	 */
1113 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1114 
1115 	/* header */
1116 	hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV,
1117 				  &(st->header), hal_soc);
1118 
1119 	/* error bit */
1120 	val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
1121 					 ERROR_DETECTED)];
1122 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1123 				  val);
1124 }
1125 
1126 void
1127 hal_reo_flush_cache_status_li(hal_ring_desc_t ring_desc,
1128 			      void *st_handle,
1129 			      hal_soc_handle_t hal_soc_hdl)
1130 {
1131 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1132 	struct hal_reo_flush_cache_status *st =
1133 			(struct hal_reo_flush_cache_status *)st_handle;
1134 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1135 	uint32_t val;
1136 
1137 	/*
1138 	 * Offsets of descriptor fields defined in HW headers start
1139 	 * from the field after TLV header
1140 	 */
1141 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1142 
1143 	/* header */
1144 	hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV,
1145 				  &(st->header), hal_soc);
1146 
1147 	/* error bit */
1148 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1149 					 ERROR_DETECTED)];
1150 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1151 				  val);
1152 
1153 	/* block error */
1154 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1155 					 BLOCK_ERROR_DETAILS)];
1156 	st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1157 					BLOCK_ERROR_DETAILS,
1158 					val);
1159 	if (!st->block_error)
1160 		qdf_set_bit(hal_soc->index,
1161 			    (unsigned long *)&hal_soc->reo_res_bitmap);
1162 
1163 	/* cache flush status */
1164 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1165 				     CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
1166 	st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1167 					CACHE_CONTROLLER_FLUSH_STATUS_HIT,
1168 					val);
1169 
1170 	/* cache flush descriptor type */
1171 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1172 				  CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
1173 	st->cache_flush_status_desc_type =
1174 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1175 			      CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
1176 			      val);
1177 
1178 	/* cache flush count */
1179 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1180 				  CACHE_CONTROLLER_FLUSH_COUNT)];
1181 	st->cache_flush_cnt =
1182 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1183 			      CACHE_CONTROLLER_FLUSH_COUNT,
1184 			      val);
1185 }
1186 
1187 void
1188 hal_reo_unblock_cache_status_li(hal_ring_desc_t ring_desc,
1189 				hal_soc_handle_t hal_soc_hdl,
1190 				void *st_handle)
1191 {
1192 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1193 	struct hal_reo_unblk_cache_status *st =
1194 			(struct hal_reo_unblk_cache_status *)st_handle;
1195 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1196 	uint32_t val;
1197 
1198 	/*
1199 	 * Offsets of descriptor fields defined in HW headers start
1200 	 * from the field after TLV header
1201 	 */
1202 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1203 
1204 	/* header */
1205 	hal_reo_status_get_header(ring_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV,
1206 				  &st->header, hal_soc);
1207 
1208 	/* error bit */
1209 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1210 				  ERROR_DETECTED)];
1211 	st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1212 				  ERROR_DETECTED,
1213 				  val);
1214 
1215 	/* unblock type */
1216 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1217 				  UNBLOCK_TYPE)];
1218 	st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1219 					 UNBLOCK_TYPE,
1220 					 val);
1221 
1222 	if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
1223 		qdf_clear_bit(hal_soc->index,
1224 			      (unsigned long *)&hal_soc->reo_res_bitmap);
1225 }
1226 
1227 void hal_reo_flush_timeout_list_status_li(hal_ring_desc_t ring_desc,
1228 					  void *st_handle,
1229 					  hal_soc_handle_t hal_soc_hdl)
1230 {
1231 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1232 	struct hal_reo_flush_timeout_list_status *st =
1233 			(struct hal_reo_flush_timeout_list_status *)st_handle;
1234 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1235 	uint32_t val;
1236 
1237 	/*
1238 	 * Offsets of descriptor fields defined in HW headers start
1239 	 * from the field after TLV header
1240 	 */
1241 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1242 
1243 	/* header */
1244 	hal_reo_status_get_header(ring_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV,
1245 				  &(st->header), hal_soc);
1246 
1247 	/* error bit */
1248 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1249 					 ERROR_DETECTED)];
1250 	st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1251 				  ERROR_DETECTED,
1252 				  val);
1253 
1254 	/* list empty */
1255 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1256 					 TIMOUT_LIST_EMPTY)];
1257 	st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1258 				       TIMOUT_LIST_EMPTY,
1259 				       val);
1260 
1261 	/* release descriptor count */
1262 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1263 					 RELEASE_DESC_COUNT)];
1264 	st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1265 					 RELEASE_DESC_COUNT,
1266 					 val);
1267 
1268 	/* forward buf count */
1269 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1270 				     FORWARD_BUF_COUNT)];
1271 	st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1272 					FORWARD_BUF_COUNT,
1273 					val);
1274 }
1275 
1276 void hal_reo_desc_thres_reached_status_li(hal_ring_desc_t ring_desc,
1277 					  void *st_handle,
1278 					  hal_soc_handle_t hal_soc_hdl)
1279 {
1280 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1281 	struct hal_reo_desc_thres_reached_status *st =
1282 			(struct hal_reo_desc_thres_reached_status *)st_handle;
1283 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1284 	uint32_t val;
1285 
1286 	/*
1287 	 * Offsets of descriptor fields defined in HW headers start
1288 	 * from the field after TLV header
1289 	 */
1290 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1291 
1292 	/* header */
1293 	hal_reo_status_get_header(ring_desc,
1294 				  HAL_REO_DESC_THRES_STATUS_TLV,
1295 				  &(st->header), hal_soc);
1296 
1297 	/* threshold index */
1298 	val = reo_desc[HAL_OFFSET_DW(
1299 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1300 				 THRESHOLD_INDEX)];
1301 	st->thres_index = HAL_GET_FIELD(
1302 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1303 				THRESHOLD_INDEX,
1304 				val);
1305 
1306 	/* link desc counters */
1307 	val = reo_desc[HAL_OFFSET_DW(
1308 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1309 				 LINK_DESCRIPTOR_COUNTER0)];
1310 	st->link_desc_counter0 = HAL_GET_FIELD(
1311 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1312 				LINK_DESCRIPTOR_COUNTER0,
1313 				val);
1314 
1315 	val = reo_desc[HAL_OFFSET_DW(
1316 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1317 				 LINK_DESCRIPTOR_COUNTER1)];
1318 	st->link_desc_counter1 = HAL_GET_FIELD(
1319 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1320 				LINK_DESCRIPTOR_COUNTER1,
1321 				val);
1322 
1323 	val = reo_desc[HAL_OFFSET_DW(
1324 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1325 				 LINK_DESCRIPTOR_COUNTER2)];
1326 	st->link_desc_counter2 = HAL_GET_FIELD(
1327 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1328 				LINK_DESCRIPTOR_COUNTER2,
1329 				val);
1330 
1331 	val = reo_desc[HAL_OFFSET_DW(
1332 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1333 				 LINK_DESCRIPTOR_COUNTER_SUM)];
1334 	st->link_desc_counter_sum = HAL_GET_FIELD(
1335 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1336 				LINK_DESCRIPTOR_COUNTER_SUM,
1337 				val);
1338 }
1339 
1340 void
1341 hal_reo_rx_update_queue_status_li(hal_ring_desc_t ring_desc,
1342 				  void *st_handle,
1343 				  hal_soc_handle_t hal_soc_hdl)
1344 {
1345 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1346 	struct hal_reo_update_rx_queue_status *st =
1347 			(struct hal_reo_update_rx_queue_status *)st_handle;
1348 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1349 
1350 	/*
1351 	 * Offsets of descriptor fields defined in HW headers start
1352 	 * from the field after TLV header
1353 	 */
1354 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1355 
1356 	/* header */
1357 	hal_reo_status_get_header(ring_desc,
1358 				  HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV,
1359 				  &(st->header), hal_soc);
1360 }
1361 
1362 uint8_t hal_get_tlv_hdr_size_li(void)
1363 {
1364 	return sizeof(struct tlv_32_hdr);
1365 }
1366 
1367 uint64_t hal_rx_get_qdesc_addr_li(uint8_t *dst_ring_desc, uint8_t *buf)
1368 {
1369 	uint8_t *dst_qdesc_addr = dst_ring_desc +
1370 		REO_DESTINATION_RING_6_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET;
1371 
1372 	return *(uint64_t *)dst_qdesc_addr;
1373 }
1374