xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_reo.c (revision bea437e2293c3d4fb1b5704fcf633aedac996962)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_api.h"
20 #include "hal_hw_headers.h"
21 #include "hal_reo.h"
22 #include "hal_tx.h"
23 #include "hal_rx.h"
24 #include "qdf_module.h"
25 
26 /* TODO: See if the following definition is available in HW headers */
27 #define HAL_REO_OWNED 4
28 #define HAL_REO_QUEUE_DESC 8
29 #define HAL_REO_QUEUE_EXT_DESC 9
30 
31 /* TODO: Using associated link desc counter 1 for Rx. Check with FW on
32  * how these counters are assigned
33  */
34 #define HAL_RX_LINK_DESC_CNTR 1
35 /* TODO: Following definition should be from HW headers */
36 #define HAL_DESC_REO_OWNED 4
37 
38 /**
39  * hal_uniform_desc_hdr_setup - setup reo_queue_ext descritpro
40  * @owner - owner info
41  * @buffer_type - buffer type
42  */
43 static inline void hal_uniform_desc_hdr_setup(uint32_t *desc, uint32_t owner,
44 	uint32_t buffer_type)
45 {
46 	HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, OWNER,
47 		owner);
48 	HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, BUFFER_TYPE,
49 		buffer_type);
50 }
51 
52 #ifndef TID_TO_WME_AC
53 #define WME_AC_BE 0 /* best effort */
54 #define WME_AC_BK 1 /* background */
55 #define WME_AC_VI 2 /* video */
56 #define WME_AC_VO 3 /* voice */
57 
58 #define TID_TO_WME_AC(_tid) ( \
59 	(((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
60 	(((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
61 	(((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
62 	WME_AC_VO)
63 #endif
64 #define HAL_NON_QOS_TID 16
65 
66 #ifdef HAL_DISABLE_NON_BA_2K_JUMP_ERROR
67 static inline uint32_t hal_update_non_ba_win_size(int tid,
68 						  uint32_t ba_window_size)
69 {
70 	return ba_window_size;
71 }
72 #else
73 static inline uint32_t hal_update_non_ba_win_size(int tid,
74 						  uint32_t ba_window_size)
75 {
76 	if ((ba_window_size == 1) && (tid != HAL_NON_QOS_TID))
77 		ba_window_size++;
78 
79 	return ba_window_size;
80 }
81 #endif
82 
83 /**
84  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
85  *
86  * @hal_soc: Opaque HAL SOC handle
87  * @ba_window_size: BlockAck window size
88  * @start_seq: Starting sequence number
89  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
90  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
91  * @tid: TID
92  *
93  */
94 void hal_reo_qdesc_setup(hal_soc_handle_t hal_soc_hdl, int tid,
95 			 uint32_t ba_window_size,
96 			 uint32_t start_seq, void *hw_qdesc_vaddr,
97 			 qdf_dma_addr_t hw_qdesc_paddr,
98 			 int pn_type)
99 {
100 	uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
101 	uint32_t *reo_queue_ext_desc;
102 	uint32_t reg_val;
103 	uint32_t pn_enable;
104 	uint32_t pn_size = 0;
105 
106 	qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
107 
108 	hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
109 		HAL_REO_QUEUE_DESC);
110 	/* Fixed pattern in reserved bits for debugging */
111 	HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER_0,
112 		RESERVED_0A, 0xDDBEEF);
113 
114 	/* This a just a SW meta data and will be copied to REO destination
115 	 * descriptors indicated by hardware.
116 	 * TODO: Setting TID in this field. See if we should set something else.
117 	 */
118 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_1,
119 		RECEIVE_QUEUE_NUMBER, tid);
120 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
121 		VLD, 1);
122 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
123 		ASSOCIATED_LINK_DESCRIPTOR_COUNTER, HAL_RX_LINK_DESC_CNTR);
124 
125 	/*
126 	 * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
127 	 */
128 
129 	reg_val = TID_TO_WME_AC(tid);
130 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, AC, reg_val);
131 
132 	if (ba_window_size < 1)
133 		ba_window_size = 1;
134 
135 	/* WAR to get 2k exception in Non BA case.
136 	 * Setting window size to 2 to get 2k jump exception
137 	 * when we receive aggregates in Non BA case
138 	 */
139 	ba_window_size = hal_update_non_ba_win_size(tid, ba_window_size);
140 
141 	/* Set RTY bit for non-BA case. Duplicate detection is currently not
142 	 * done by HW in non-BA case if RTY bit is not set.
143 	 * TODO: This is a temporary War and should be removed once HW fix is
144 	 * made to check and discard duplicates even if RTY bit is not set.
145 	 */
146 	if (ba_window_size == 1)
147 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, RTY, 1);
148 
149 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, BA_WINDOW_SIZE,
150 		ba_window_size - 1);
151 
152 	switch (pn_type) {
153 	case HAL_PN_WPA:
154 		pn_enable = 1;
155 		pn_size = PN_SIZE_48;
156 		break;
157 	case HAL_PN_WAPI_EVEN:
158 	case HAL_PN_WAPI_UNEVEN:
159 		pn_enable = 1;
160 		pn_size = PN_SIZE_128;
161 		break;
162 	default:
163 		pn_enable = 0;
164 		break;
165 	}
166 
167 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_CHECK_NEEDED,
168 		pn_enable);
169 
170 	if (pn_type == HAL_PN_WAPI_EVEN)
171 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
172 			PN_SHALL_BE_EVEN, 1);
173 	else if (pn_type == HAL_PN_WAPI_UNEVEN)
174 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
175 			PN_SHALL_BE_UNEVEN, 1);
176 
177 	/*
178 	 *  TODO: Need to check if PN handling in SW needs to be enabled
179 	 *  So far this is not a requirement
180 	 */
181 
182 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_SIZE,
183 		pn_size);
184 
185 	/* TODO: Check if RX_REO_QUEUE_2_IGNORE_AMPDU_FLAG need to be set
186 	 * based on BA window size and/or AMPDU capabilities
187 	 */
188 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
189 		IGNORE_AMPDU_FLAG, 1);
190 
191 	if (start_seq <= 0xfff)
192 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SSN,
193 			start_seq);
194 
195 	/* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA,
196 	 * but REO is not delivering packets if we set it to 1. Need to enable
197 	 * this once the issue is resolved
198 	 */
199 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SVLD, 0);
200 
201 	/* TODO: Check if we should set start PN for WAPI */
202 
203 #ifdef notyet
204 	/* Setup first queue extension if BA window size is more than 1 */
205 	if (ba_window_size > 1) {
206 		reo_queue_ext_desc =
207 			(uint32_t *)(((struct rx_reo_queue *)reo_queue_desc) +
208 			1);
209 		qdf_mem_zero(reo_queue_ext_desc,
210 			sizeof(struct rx_reo_queue_ext));
211 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
212 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
213 	}
214 	/* Setup second queue extension if BA window size is more than 105 */
215 	if (ba_window_size > 105) {
216 		reo_queue_ext_desc = (uint32_t *)
217 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
218 		qdf_mem_zero(reo_queue_ext_desc,
219 			sizeof(struct rx_reo_queue_ext));
220 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
221 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
222 	}
223 	/* Setup third queue extension if BA window size is more than 210 */
224 	if (ba_window_size > 210) {
225 		reo_queue_ext_desc = (uint32_t *)
226 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
227 		qdf_mem_zero(reo_queue_ext_desc,
228 			sizeof(struct rx_reo_queue_ext));
229 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
230 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
231 	}
232 #else
233 	/* TODO: HW queue descriptors are currently allocated for max BA
234 	 * window size for all QOS TIDs so that same descriptor can be used
235 	 * later when ADDBA request is recevied. This should be changed to
236 	 * allocate HW queue descriptors based on BA window size being
237 	 * negotiated (0 for non BA cases), and reallocate when BA window
238 	 * size changes and also send WMI message to FW to change the REO
239 	 * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
240 	 */
241 	if (tid != HAL_NON_QOS_TID) {
242 		reo_queue_ext_desc = (uint32_t *)
243 			(((struct rx_reo_queue *)reo_queue_desc) + 1);
244 		qdf_mem_zero(reo_queue_ext_desc, 3 *
245 			sizeof(struct rx_reo_queue_ext));
246 		/* Initialize first reo queue extension descriptor */
247 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
248 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
249 		/* Fixed pattern in reserved bits for debugging */
250 		HAL_DESC_SET_FIELD(reo_queue_ext_desc,
251 			UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xADBEEF);
252 		/* Initialize second reo queue extension descriptor */
253 		reo_queue_ext_desc = (uint32_t *)
254 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
255 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
256 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
257 		/* Fixed pattern in reserved bits for debugging */
258 		HAL_DESC_SET_FIELD(reo_queue_ext_desc,
259 			UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xBDBEEF);
260 		/* Initialize third reo queue extension descriptor */
261 		reo_queue_ext_desc = (uint32_t *)
262 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
263 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
264 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
265 		/* Fixed pattern in reserved bits for debugging */
266 		HAL_DESC_SET_FIELD(reo_queue_ext_desc,
267 			UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xCDBEEF);
268 	}
269 #endif
270 }
271 qdf_export_symbol(hal_reo_qdesc_setup);
272 
273 /**
274  * hal_get_ba_aging_timeout - Get BA Aging timeout
275  *
276  * @hal_soc: Opaque HAL SOC handle
277  * @ac: Access category
278  * @value: window size to get
279  */
280 void hal_get_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
281 			      uint32_t *value)
282 {
283 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
284 
285 	switch (ac) {
286 	case WME_AC_BE:
287 		*value = HAL_REG_READ(soc,
288 				      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
289 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
290 		break;
291 	case WME_AC_BK:
292 		*value = HAL_REG_READ(soc,
293 				      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
294 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
295 		break;
296 	case WME_AC_VI:
297 		*value = HAL_REG_READ(soc,
298 				      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
299 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
300 		break;
301 	case WME_AC_VO:
302 		*value = HAL_REG_READ(soc,
303 				      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
304 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
305 		break;
306 	default:
307 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
308 			  "Invalid AC: %d\n", ac);
309 	}
310 }
311 
312 qdf_export_symbol(hal_get_ba_aging_timeout);
313 
314 /**
315  * hal_set_ba_aging_timeout - Set BA Aging timeout
316  *
317  * @hal_soc: Opaque HAL SOC handle
318  * @ac: Access category
319  * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice
320  * @value: Input value to set
321  */
322 void hal_set_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
323 			      uint32_t value)
324 {
325 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
326 
327 	switch (ac) {
328 	case WME_AC_BE:
329 		HAL_REG_WRITE(soc,
330 			      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
331 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
332 			      value * 1000);
333 		break;
334 	case WME_AC_BK:
335 		HAL_REG_WRITE(soc,
336 			      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
337 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
338 			      value * 1000);
339 		break;
340 	case WME_AC_VI:
341 		HAL_REG_WRITE(soc,
342 			      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
343 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
344 			      value * 1000);
345 		break;
346 	case WME_AC_VO:
347 		HAL_REG_WRITE(soc,
348 			      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
349 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
350 			      value * 1000);
351 		break;
352 	default:
353 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
354 			  "Invalid AC: %d\n", ac);
355 	}
356 }
357 
358 qdf_export_symbol(hal_set_ba_aging_timeout);
359 
360 #define BLOCK_RES_MASK		0xF
361 static inline uint8_t hal_find_one_bit(uint8_t x)
362 {
363 	uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK;
364 	uint8_t pos;
365 
366 	for (pos = 0; y; y >>= 1)
367 		pos++;
368 
369 	return pos-1;
370 }
371 
372 static inline uint8_t hal_find_zero_bit(uint8_t x)
373 {
374 	uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK;
375 	uint8_t pos;
376 
377 	for (pos = 0; y; y >>= 1)
378 		pos++;
379 
380 	return pos-1;
381 }
382 
383 inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc,
384 				       enum hal_reo_cmd_type type,
385 				       uint32_t paddr_lo,
386 				       uint8_t paddr_hi)
387 {
388 	switch (type) {
389 	case CMD_GET_QUEUE_STATS:
390 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
391 			RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
392 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
393 				    RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
394 		break;
395 	case CMD_FLUSH_QUEUE:
396 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
397 					FLUSH_DESC_ADDR_31_0, paddr_lo);
398 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
399 					FLUSH_DESC_ADDR_39_32, paddr_hi);
400 		break;
401 	case CMD_FLUSH_CACHE:
402 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
403 					FLUSH_ADDR_31_0, paddr_lo);
404 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
405 					FLUSH_ADDR_39_32, paddr_hi);
406 		break;
407 	case CMD_UPDATE_RX_REO_QUEUE:
408 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
409 					RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
410 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
411 					RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
412 		break;
413 	default:
414 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
415 			"%s: Invalid REO command type", __func__);
416 		break;
417 	}
418 }
419 
420 inline int hal_reo_cmd_queue_stats(hal_ring_handle_t  hal_ring_hdl,
421 				   hal_soc_handle_t hal_soc_hdl,
422 				   struct hal_reo_cmd_params *cmd)
423 
424 {
425 	uint32_t *reo_desc, val;
426 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
427 
428 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
429 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
430 	if (!reo_desc) {
431 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
432 			"%s: Out of cmd ring entries", __func__);
433 		hal_srng_access_end(hal_soc, hal_ring_hdl);
434 		return -EBUSY;
435 	}
436 
437 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
438 			     sizeof(struct reo_get_queue_stats));
439 
440 	/* Offsets of descriptor fields defined in HW headers start from
441 	 * the field after TLV header */
442 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
443 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
444 		     sizeof(struct reo_get_queue_stats) -
445 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
446 
447 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
448 		REO_STATUS_REQUIRED, cmd->std.need_status);
449 
450 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_GET_QUEUE_STATS,
451 				   cmd->std.addr_lo,
452 				   cmd->std.addr_hi);
453 
454 	HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
455 			      cmd->u.stats_params.clear);
456 
457 	if (hif_pm_runtime_get(hal_soc->hif_handle) == 0) {
458 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
459 		hif_pm_runtime_put(hal_soc->hif_handle);
460 	} else {
461 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
462 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
463 		hal_srng_inc_flush_cnt(hal_ring_hdl);
464 	}
465 
466 	val = reo_desc[CMD_HEADER_DW_OFFSET];
467 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
468 				     val);
469 }
470 qdf_export_symbol(hal_reo_cmd_queue_stats);
471 
472 inline int hal_reo_cmd_flush_queue(hal_ring_handle_t hal_ring_hdl,
473 				   hal_soc_handle_t hal_soc_hdl,
474 				   struct hal_reo_cmd_params *cmd)
475 {
476 	uint32_t *reo_desc, val;
477 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
478 
479 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
480 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
481 	if (!reo_desc) {
482 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
483 			"%s: Out of cmd ring entries", __func__);
484 		hal_srng_access_end(hal_soc, hal_ring_hdl);
485 		return -EBUSY;
486 	}
487 
488 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
489 			     sizeof(struct reo_flush_queue));
490 
491 	/* Offsets of descriptor fields defined in HW headers start from
492 	 * the field after TLV header */
493 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
494 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
495 		     sizeof(struct reo_flush_queue) -
496 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
497 
498 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
499 		REO_STATUS_REQUIRED, cmd->std.need_status);
500 
501 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_QUEUE, cmd->std.addr_lo,
502 		cmd->std.addr_hi);
503 
504 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
505 		BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
506 		cmd->u.fl_queue_params.block_use_after_flush);
507 
508 	if (cmd->u.fl_queue_params.block_use_after_flush) {
509 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
510 			BLOCK_RESOURCE_INDEX, cmd->u.fl_queue_params.index);
511 	}
512 
513 	hal_srng_access_end(hal_soc, hal_ring_hdl);
514 	val = reo_desc[CMD_HEADER_DW_OFFSET];
515 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
516 				     val);
517 }
518 qdf_export_symbol(hal_reo_cmd_flush_queue);
519 
520 inline int hal_reo_cmd_flush_cache(hal_ring_handle_t hal_ring_hdl,
521 				   hal_soc_handle_t hal_soc_hdl,
522 				   struct hal_reo_cmd_params *cmd)
523 {
524 	uint32_t *reo_desc, val;
525 	struct hal_reo_cmd_flush_cache_params *cp;
526 	uint8_t index = 0;
527 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
528 
529 	cp = &cmd->u.fl_cache_params;
530 
531 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
532 
533 	/* We need a cache block resource for this operation, and REO HW has
534 	 * only 4 such blocking resources. These resources are managed using
535 	 * reo_res_bitmap, and we return failure if none is available.
536 	 */
537 	if (cp->block_use_after_flush) {
538 		index = hal_find_zero_bit(hal_soc->reo_res_bitmap);
539 		if (index > 3) {
540 			qdf_print("%s, No blocking resource available!",
541 				  __func__);
542 			hal_srng_access_end(hal_soc, hal_ring_hdl);
543 			return -EBUSY;
544 		}
545 		hal_soc->index = index;
546 	}
547 
548 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
549 	if (!reo_desc) {
550 		hal_srng_access_end(hal_soc, hal_ring_hdl);
551 		hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl));
552 		return -EBUSY;
553 	}
554 
555 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
556 			     sizeof(struct reo_flush_cache));
557 
558 	/* Offsets of descriptor fields defined in HW headers start from
559 	 * the field after TLV header */
560 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
561 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
562 		     sizeof(struct reo_flush_cache) -
563 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
564 
565 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
566 		REO_STATUS_REQUIRED, cmd->std.need_status);
567 
568 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_CACHE, cmd->std.addr_lo,
569 				   cmd->std.addr_hi);
570 
571 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
572 		FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
573 
574 	/* set it to 0 for now */
575 	cp->rel_block_index = 0;
576 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
577 		RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
578 
579 	if (cp->block_use_after_flush) {
580 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
581 			CACHE_BLOCK_RESOURCE_INDEX, index);
582 	}
583 
584 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
585 		FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
586 
587 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
588 		BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush);
589 
590 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
591 		cp->flush_all);
592 
593 	if (hif_pm_runtime_get(hal_soc->hif_handle) == 0) {
594 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
595 		hif_pm_runtime_put(hal_soc->hif_handle);
596 	} else {
597 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
598 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
599 		hal_srng_inc_flush_cnt(hal_ring_hdl);
600 	}
601 
602 	val = reo_desc[CMD_HEADER_DW_OFFSET];
603 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
604 				     val);
605 }
606 qdf_export_symbol(hal_reo_cmd_flush_cache);
607 
608 inline int hal_reo_cmd_unblock_cache(hal_ring_handle_t hal_ring_hdl,
609 				     hal_soc_handle_t hal_soc_hdl,
610 				     struct hal_reo_cmd_params *cmd)
611 
612 {
613 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
614 	uint32_t *reo_desc, val;
615 	uint8_t index = 0;
616 
617 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
618 
619 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
620 		index = hal_find_one_bit(hal_soc->reo_res_bitmap);
621 		if (index > 3) {
622 			hal_srng_access_end(hal_soc, hal_ring_hdl);
623 			qdf_print("%s: No blocking resource to unblock!",
624 				  __func__);
625 			return -EBUSY;
626 		}
627 	}
628 
629 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
630 	if (!reo_desc) {
631 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
632 			"%s: Out of cmd ring entries", __func__);
633 		hal_srng_access_end(hal_soc, hal_ring_hdl);
634 		return -EBUSY;
635 	}
636 
637 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
638 			     sizeof(struct reo_unblock_cache));
639 
640 	/* Offsets of descriptor fields defined in HW headers start from
641 	 * the field after TLV header */
642 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
643 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
644 		     sizeof(struct reo_unblock_cache) -
645 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
646 
647 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
648 		REO_STATUS_REQUIRED, cmd->std.need_status);
649 
650 	HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
651 		UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
652 
653 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
654 		HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
655 			CACHE_BLOCK_RESOURCE_INDEX,
656 			cmd->u.unblk_cache_params.index);
657 	}
658 
659 	hal_srng_access_end(hal_soc, hal_ring_hdl);
660 	val = reo_desc[CMD_HEADER_DW_OFFSET];
661 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
662 				     val);
663 }
664 qdf_export_symbol(hal_reo_cmd_unblock_cache);
665 
666 inline int hal_reo_cmd_flush_timeout_list(hal_ring_handle_t hal_ring_hdl,
667 					  hal_soc_handle_t hal_soc_hdl,
668 					  struct hal_reo_cmd_params *cmd)
669 {
670 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
671 	uint32_t *reo_desc, val;
672 
673 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
674 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
675 	if (!reo_desc) {
676 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
677 			"%s: Out of cmd ring entries", __func__);
678 		hal_srng_access_end(hal_soc, hal_ring_hdl);
679 		return -EBUSY;
680 	}
681 
682 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
683 			     sizeof(struct reo_flush_timeout_list));
684 
685 	/* Offsets of descriptor fields defined in HW headers start from
686 	 * the field after TLV header */
687 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
688 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
689 		     sizeof(struct reo_flush_timeout_list) -
690 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
691 
692 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
693 		REO_STATUS_REQUIRED, cmd->std.need_status);
694 
695 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
696 		cmd->u.fl_tim_list_params.ac_list);
697 
698 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
699 		MINIMUM_RELEASE_DESC_COUNT,
700 		cmd->u.fl_tim_list_params.min_rel_desc);
701 
702 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
703 		MINIMUM_FORWARD_BUF_COUNT,
704 		cmd->u.fl_tim_list_params.min_fwd_buf);
705 
706 	hal_srng_access_end(hal_soc, hal_ring_hdl);
707 	val = reo_desc[CMD_HEADER_DW_OFFSET];
708 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
709 				     val);
710 }
711 qdf_export_symbol(hal_reo_cmd_flush_timeout_list);
712 
713 inline int hal_reo_cmd_update_rx_queue(hal_ring_handle_t hal_ring_hdl,
714 				       hal_soc_handle_t hal_soc_hdl,
715 				       struct hal_reo_cmd_params *cmd)
716 {
717 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
718 	uint32_t *reo_desc, val;
719 	struct hal_reo_cmd_update_queue_params *p;
720 
721 	p = &cmd->u.upd_queue_params;
722 
723 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
724 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
725 	if (!reo_desc) {
726 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
727 			"%s: Out of cmd ring entries", __func__);
728 		hal_srng_access_end(hal_soc, hal_ring_hdl);
729 		return -EBUSY;
730 	}
731 
732 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
733 			     sizeof(struct reo_update_rx_reo_queue));
734 
735 	/* Offsets of descriptor fields defined in HW headers start from
736 	 * the field after TLV header */
737 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
738 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
739 		     sizeof(struct reo_update_rx_reo_queue) -
740 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
741 
742 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
743 		REO_STATUS_REQUIRED, cmd->std.need_status);
744 
745 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
746 		cmd->std.addr_lo, cmd->std.addr_hi);
747 
748 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
749 		UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
750 
751 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
752 			      p->update_vld);
753 
754 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
755 		UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
756 		p->update_assoc_link_desc);
757 
758 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
759 		UPDATE_DISABLE_DUPLICATE_DETECTION,
760 		p->update_disable_dup_detect);
761 
762 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
763 		UPDATE_DISABLE_DUPLICATE_DETECTION,
764 		p->update_disable_dup_detect);
765 
766 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
767 		UPDATE_SOFT_REORDER_ENABLE,
768 		p->update_soft_reorder_enab);
769 
770 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
771 		UPDATE_AC, p->update_ac);
772 
773 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
774 		UPDATE_BAR, p->update_bar);
775 
776 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
777 		UPDATE_BAR, p->update_bar);
778 
779 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
780 		UPDATE_RTY, p->update_rty);
781 
782 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
783 		UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
784 
785 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
786 		UPDATE_OOR_MODE, p->update_oor_mode);
787 
788 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
789 		UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
790 
791 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
792 		UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
793 
794 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
795 		UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
796 
797 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
798 		UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
799 
800 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
801 		UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
802 
803 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
804 		UPDATE_PN_SIZE, p->update_pn_size);
805 
806 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
807 		UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
808 
809 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
810 		UPDATE_SVLD, p->update_svld);
811 
812 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
813 		UPDATE_SSN, p->update_ssn);
814 
815 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
816 		UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
817 		p->update_seq_2k_err_detect);
818 
819 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
820 		UPDATE_PN_VALID, p->update_pn_valid);
821 
822 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
823 		UPDATE_PN, p->update_pn);
824 
825 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
826 		RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
827 
828 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
829 		VLD, p->vld);
830 
831 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
832 		ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
833 		p->assoc_link_desc);
834 
835 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
836 		DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
837 
838 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
839 		SOFT_REORDER_ENABLE, p->soft_reorder_enab);
840 
841 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
842 
843 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
844 		BAR, p->bar);
845 
846 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
847 		CHK_2K_MODE, p->chk_2k_mode);
848 
849 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
850 		RTY, p->rty);
851 
852 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
853 		OOR_MODE, p->oor_mode);
854 
855 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
856 		PN_CHECK_NEEDED, p->pn_check_needed);
857 
858 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
859 		PN_SHALL_BE_EVEN, p->pn_even);
860 
861 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
862 		PN_SHALL_BE_UNEVEN, p->pn_uneven);
863 
864 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
865 		PN_HANDLING_ENABLE, p->pn_hand_enab);
866 
867 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
868 		IGNORE_AMPDU_FLAG, p->ignore_ampdu);
869 
870 	if (p->ba_window_size < 1)
871 		p->ba_window_size = 1;
872 	/*
873 	 * WAR to get 2k exception in Non BA case.
874 	 * Setting window size to 2 to get 2k jump exception
875 	 * when we receive aggregates in Non BA case
876 	 */
877 	if (p->ba_window_size == 1)
878 		p->ba_window_size++;
879 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
880 		BA_WINDOW_SIZE, p->ba_window_size - 1);
881 
882 	if (p->pn_size == 24)
883 		p->pn_size = PN_SIZE_24;
884 	else if (p->pn_size == 48)
885 		p->pn_size = PN_SIZE_48;
886 	else if (p->pn_size == 128)
887 		p->pn_size = PN_SIZE_128;
888 
889 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
890 		PN_SIZE, p->pn_size);
891 
892 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
893 		SVLD, p->svld);
894 
895 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
896 		SSN, p->ssn);
897 
898 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
899 		SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
900 
901 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
902 		PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
903 
904 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
905 		PN_31_0, p->pn_31_0);
906 
907 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
908 		PN_63_32, p->pn_63_32);
909 
910 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
911 		PN_95_64, p->pn_95_64);
912 
913 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
914 		PN_127_96, p->pn_127_96);
915 
916 	if (hif_pm_runtime_get(hal_soc->hif_handle) == 0) {
917 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
918 		hif_pm_runtime_put(hal_soc->hif_handle);
919 	} else {
920 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
921 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
922 		hal_srng_inc_flush_cnt(hal_ring_hdl);
923 	}
924 
925 	val = reo_desc[CMD_HEADER_DW_OFFSET];
926 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
927 				     val);
928 }
929 qdf_export_symbol(hal_reo_cmd_update_rx_queue);
930 
931 inline void
932 hal_reo_queue_stats_status(uint32_t *reo_desc,
933 			   struct hal_reo_queue_status *st,
934 			   hal_soc_handle_t hal_soc_hdl)
935 {
936 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
937 	uint32_t val;
938 
939 	/* Offsets of descriptor fields defined in HW headers start
940 	 * from the field after TLV header */
941 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
942 
943 	/* header */
944 	hal_reo_status_get_header(reo_desc, HAL_REO_QUEUE_STATS_STATUS_TLV,
945 					&(st->header), hal_soc);
946 
947 	/* SSN */
948 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
949 	st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
950 
951 	/* current index */
952 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
953 					 CURRENT_INDEX)];
954 	st->curr_idx =
955 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
956 			      CURRENT_INDEX, val);
957 
958 	/* PN bits */
959 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
960 					 PN_31_0)];
961 	st->pn_31_0 =
962 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
963 			      PN_31_0, val);
964 
965 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
966 					 PN_63_32)];
967 	st->pn_63_32 =
968 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
969 			      PN_63_32, val);
970 
971 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
972 					 PN_95_64)];
973 	st->pn_95_64 =
974 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
975 			      PN_95_64, val);
976 
977 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
978 					 PN_127_96)];
979 	st->pn_127_96 =
980 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
981 			      PN_127_96, val);
982 
983 	/* timestamps */
984 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
985 					 LAST_RX_ENQUEUE_TIMESTAMP)];
986 	st->last_rx_enq_tstamp =
987 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
988 			      LAST_RX_ENQUEUE_TIMESTAMP, val);
989 
990 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
991 					 LAST_RX_DEQUEUE_TIMESTAMP)];
992 	st->last_rx_deq_tstamp =
993 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
994 			      LAST_RX_DEQUEUE_TIMESTAMP, val);
995 
996 	/* rx bitmap */
997 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
998 					 RX_BITMAP_31_0)];
999 	st->rx_bitmap_31_0 =
1000 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
1001 			      RX_BITMAP_31_0, val);
1002 
1003 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
1004 					 RX_BITMAP_63_32)];
1005 	st->rx_bitmap_63_32 =
1006 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
1007 			      RX_BITMAP_63_32, val);
1008 
1009 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
1010 					 RX_BITMAP_95_64)];
1011 	st->rx_bitmap_95_64 =
1012 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
1013 			      RX_BITMAP_95_64, val);
1014 
1015 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
1016 					 RX_BITMAP_127_96)];
1017 	st->rx_bitmap_127_96 =
1018 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
1019 			      RX_BITMAP_127_96, val);
1020 
1021 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
1022 					 RX_BITMAP_159_128)];
1023 	st->rx_bitmap_159_128 =
1024 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
1025 			      RX_BITMAP_159_128, val);
1026 
1027 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
1028 					 RX_BITMAP_191_160)];
1029 	st->rx_bitmap_191_160 =
1030 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
1031 			      RX_BITMAP_191_160, val);
1032 
1033 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
1034 					 RX_BITMAP_223_192)];
1035 	st->rx_bitmap_223_192 =
1036 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
1037 			      RX_BITMAP_223_192, val);
1038 
1039 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
1040 					 RX_BITMAP_255_224)];
1041 	st->rx_bitmap_255_224 =
1042 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
1043 			      RX_BITMAP_255_224, val);
1044 
1045 	/* various counts */
1046 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
1047 					 CURRENT_MPDU_COUNT)];
1048 	st->curr_mpdu_cnt =
1049 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
1050 			      CURRENT_MPDU_COUNT, val);
1051 
1052 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
1053 					 CURRENT_MSDU_COUNT)];
1054 	st->curr_msdu_cnt =
1055 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
1056 			      CURRENT_MSDU_COUNT, val);
1057 
1058 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1059 					 TIMEOUT_COUNT)];
1060 	st->fwd_timeout_cnt =
1061 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1062 			      TIMEOUT_COUNT, val);
1063 
1064 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1065 					 FORWARD_DUE_TO_BAR_COUNT)];
1066 	st->fwd_bar_cnt =
1067 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1068 			      FORWARD_DUE_TO_BAR_COUNT, val);
1069 
1070 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1071 					 DUPLICATE_COUNT)];
1072 	st->dup_cnt =
1073 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1074 			      DUPLICATE_COUNT, val);
1075 
1076 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1077 					 FRAMES_IN_ORDER_COUNT)];
1078 	st->frms_in_order_cnt =
1079 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1080 			      FRAMES_IN_ORDER_COUNT, val);
1081 
1082 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1083 					 BAR_RECEIVED_COUNT)];
1084 	st->bar_rcvd_cnt =
1085 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1086 			      BAR_RECEIVED_COUNT, val);
1087 
1088 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
1089 					 MPDU_FRAMES_PROCESSED_COUNT)];
1090 	st->mpdu_frms_cnt =
1091 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
1092 			      MPDU_FRAMES_PROCESSED_COUNT, val);
1093 
1094 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
1095 					 MSDU_FRAMES_PROCESSED_COUNT)];
1096 	st->msdu_frms_cnt =
1097 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
1098 			      MSDU_FRAMES_PROCESSED_COUNT, val);
1099 
1100 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
1101 					 TOTAL_PROCESSED_BYTE_COUNT)];
1102 	st->total_cnt =
1103 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
1104 			      TOTAL_PROCESSED_BYTE_COUNT, val);
1105 
1106 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1107 					 LATE_RECEIVE_MPDU_COUNT)];
1108 	st->late_recv_mpdu_cnt =
1109 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1110 			      LATE_RECEIVE_MPDU_COUNT, val);
1111 
1112 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1113 					 WINDOW_JUMP_2K)];
1114 	st->win_jump_2k =
1115 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1116 			      WINDOW_JUMP_2K, val);
1117 
1118 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1119 					 HOLE_COUNT)];
1120 	st->hole_cnt =
1121 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1122 			      HOLE_COUNT, val);
1123 }
1124 qdf_export_symbol(hal_reo_queue_stats_status);
1125 
1126 inline void
1127 hal_reo_flush_queue_status(uint32_t *reo_desc,
1128 			   struct hal_reo_flush_queue_status *st,
1129 			   hal_soc_handle_t hal_soc_hdl)
1130 {
1131 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1132 	uint32_t val;
1133 
1134 	/* Offsets of descriptor fields defined in HW headers start
1135 	 * from the field after TLV header */
1136 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1137 
1138 	/* header */
1139 	hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV,
1140 					&(st->header), hal_soc);
1141 
1142 	/* error bit */
1143 	val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
1144 					 ERROR_DETECTED)];
1145 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1146 				  val);
1147 }
1148 qdf_export_symbol(hal_reo_flush_queue_status);
1149 
1150 inline void
1151 hal_reo_flush_cache_status(uint32_t *reo_desc,
1152 			   struct hal_reo_flush_cache_status *st,
1153 			   hal_soc_handle_t hal_soc_hdl)
1154 {
1155 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1156 	uint32_t val;
1157 
1158 	/* Offsets of descriptor fields defined in HW headers start
1159 	 * from the field after TLV header */
1160 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1161 
1162 	/* header */
1163 	hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV,
1164 					&(st->header), hal_soc);
1165 
1166 	/* error bit */
1167 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1168 					 ERROR_DETECTED)];
1169 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1170 				  val);
1171 
1172 	/* block error */
1173 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1174 					 BLOCK_ERROR_DETAILS)];
1175 	st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1176 					BLOCK_ERROR_DETAILS,
1177 					val);
1178 	if (!st->block_error)
1179 		qdf_set_bit(hal_soc->index,
1180 			    (unsigned long *)&hal_soc->reo_res_bitmap);
1181 
1182 	/* cache flush status */
1183 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1184 					 CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
1185 	st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1186 					CACHE_CONTROLLER_FLUSH_STATUS_HIT,
1187 					val);
1188 
1189 	/* cache flush descriptor type */
1190 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1191 				  CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
1192 	st->cache_flush_status_desc_type =
1193 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1194 			      CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
1195 			      val);
1196 
1197 	/* cache flush count */
1198 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1199 				  CACHE_CONTROLLER_FLUSH_COUNT)];
1200 	st->cache_flush_cnt =
1201 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1202 			      CACHE_CONTROLLER_FLUSH_COUNT,
1203 			      val);
1204 
1205 }
1206 qdf_export_symbol(hal_reo_flush_cache_status);
1207 
1208 inline void hal_reo_unblock_cache_status(uint32_t *reo_desc,
1209 					 hal_soc_handle_t hal_soc_hdl,
1210 					 struct hal_reo_unblk_cache_status *st)
1211 {
1212 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1213 	uint32_t val;
1214 
1215 	/* Offsets of descriptor fields defined in HW headers start
1216 	 * from the field after TLV header */
1217 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1218 
1219 	/* header */
1220 	hal_reo_status_get_header(reo_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV,
1221 					&st->header, hal_soc);
1222 
1223 	/* error bit */
1224 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1225 				  ERROR_DETECTED)];
1226 	st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1227 				  ERROR_DETECTED,
1228 				  val);
1229 
1230 	/* unblock type */
1231 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1232 				  UNBLOCK_TYPE)];
1233 	st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1234 					 UNBLOCK_TYPE,
1235 					 val);
1236 
1237 	if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
1238 		qdf_clear_bit(hal_soc->index,
1239 			      (unsigned long *)&hal_soc->reo_res_bitmap);
1240 }
1241 qdf_export_symbol(hal_reo_unblock_cache_status);
1242 
1243 inline void hal_reo_flush_timeout_list_status(
1244 			 uint32_t *reo_desc,
1245 			 struct hal_reo_flush_timeout_list_status *st,
1246 			 hal_soc_handle_t hal_soc_hdl)
1247 
1248 {
1249 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1250 	uint32_t val;
1251 
1252 	/* Offsets of descriptor fields defined in HW headers start
1253 	 * from the field after TLV header */
1254 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1255 
1256 	/* header */
1257 	hal_reo_status_get_header(reo_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV,
1258 					&(st->header), hal_soc);
1259 
1260 	/* error bit */
1261 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1262 					 ERROR_DETECTED)];
1263 	st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1264 				  ERROR_DETECTED,
1265 				  val);
1266 
1267 	/* list empty */
1268 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1269 					 TIMOUT_LIST_EMPTY)];
1270 	st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1271 					TIMOUT_LIST_EMPTY,
1272 					val);
1273 
1274 	/* release descriptor count */
1275 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1276 					 RELEASE_DESC_COUNT)];
1277 	st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1278 				       RELEASE_DESC_COUNT,
1279 				       val);
1280 
1281 	/* forward buf count */
1282 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1283 					 FORWARD_BUF_COUNT)];
1284 	st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1285 				       FORWARD_BUF_COUNT,
1286 				       val);
1287 }
1288 qdf_export_symbol(hal_reo_flush_timeout_list_status);
1289 
1290 inline void hal_reo_desc_thres_reached_status(
1291 			 uint32_t *reo_desc,
1292 			 struct hal_reo_desc_thres_reached_status *st,
1293 			 hal_soc_handle_t hal_soc_hdl)
1294 {
1295 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1296 	uint32_t val;
1297 
1298 	/* Offsets of descriptor fields defined in HW headers start
1299 	 * from the field after TLV header */
1300 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1301 
1302 	/* header */
1303 	hal_reo_status_get_header(reo_desc,
1304 			      HAL_REO_DESC_THRES_STATUS_TLV,
1305 			      &(st->header), hal_soc);
1306 
1307 	/* threshold index */
1308 	val = reo_desc[HAL_OFFSET_DW(
1309 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1310 				 THRESHOLD_INDEX)];
1311 	st->thres_index = HAL_GET_FIELD(
1312 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1313 				THRESHOLD_INDEX,
1314 				val);
1315 
1316 	/* link desc counters */
1317 	val = reo_desc[HAL_OFFSET_DW(
1318 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1319 				 LINK_DESCRIPTOR_COUNTER0)];
1320 	st->link_desc_counter0 = HAL_GET_FIELD(
1321 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1322 				LINK_DESCRIPTOR_COUNTER0,
1323 				val);
1324 
1325 	val = reo_desc[HAL_OFFSET_DW(
1326 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1327 				 LINK_DESCRIPTOR_COUNTER1)];
1328 	st->link_desc_counter1 = HAL_GET_FIELD(
1329 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1330 				LINK_DESCRIPTOR_COUNTER1,
1331 				val);
1332 
1333 	val = reo_desc[HAL_OFFSET_DW(
1334 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1335 				 LINK_DESCRIPTOR_COUNTER2)];
1336 	st->link_desc_counter2 = HAL_GET_FIELD(
1337 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1338 				LINK_DESCRIPTOR_COUNTER2,
1339 				val);
1340 
1341 	val = reo_desc[HAL_OFFSET_DW(
1342 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1343 				 LINK_DESCRIPTOR_COUNTER_SUM)];
1344 	st->link_desc_counter_sum = HAL_GET_FIELD(
1345 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1346 				LINK_DESCRIPTOR_COUNTER_SUM,
1347 				val);
1348 }
1349 qdf_export_symbol(hal_reo_desc_thres_reached_status);
1350 
1351 inline void
1352 hal_reo_rx_update_queue_status(uint32_t *reo_desc,
1353 			       struct hal_reo_update_rx_queue_status *st,
1354 			       hal_soc_handle_t hal_soc_hdl)
1355 {
1356 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1357 
1358 	/* Offsets of descriptor fields defined in HW headers start
1359 	 * from the field after TLV header */
1360 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1361 
1362 	/* header */
1363 	hal_reo_status_get_header(reo_desc,
1364 			      HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV,
1365 			      &(st->header), hal_soc);
1366 }
1367 qdf_export_symbol(hal_reo_rx_update_queue_status);
1368 
1369 /**
1370  * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG
1371  * with command number
1372  * @hal_soc: Handle to HAL SoC structure
1373  * @hal_ring: Handle to HAL SRNG structure
1374  *
1375  * Return: none
1376  */
1377 inline void hal_reo_init_cmd_ring(hal_soc_handle_t hal_soc_hdl,
1378 				  hal_ring_handle_t hal_ring_hdl)
1379 {
1380 	int cmd_num;
1381 	uint32_t *desc_addr;
1382 	struct hal_srng_params srng_params;
1383 	uint32_t desc_size;
1384 	uint32_t num_desc;
1385 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
1386 
1387 	hal_get_srng_params(hal_soc_hdl, hal_ring_hdl, &srng_params);
1388 
1389 	desc_addr = (uint32_t *)(srng_params.ring_base_vaddr);
1390 	desc_addr += (sizeof(struct tlv_32_hdr) >> 2);
1391 	desc_size = hal_srng_get_entrysize(soc, REO_CMD) >> 2;
1392 	num_desc = srng_params.num_entries;
1393 	cmd_num = 1;
1394 	while (num_desc) {
1395 		/* Offsets of descriptor fields defined in HW headers start
1396 		 * from the field after TLV header */
1397 		HAL_DESC_SET_FIELD(desc_addr, UNIFORM_REO_CMD_HEADER_0,
1398 			REO_CMD_NUMBER, cmd_num);
1399 		desc_addr += desc_size;
1400 		num_desc--; cmd_num++;
1401 	}
1402 
1403 	soc->reo_res_bitmap = 0;
1404 }
1405 qdf_export_symbol(hal_reo_init_cmd_ring);
1406