xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_reo.c (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_api.h"
20 #include "hal_hw_headers.h"
21 #include "hal_reo.h"
22 #include "hal_tx.h"
23 #include "hal_rx.h"
24 #include "qdf_module.h"
25 
26 /* TODO: See if the following definition is available in HW headers */
27 #define HAL_REO_OWNED 4
28 #define HAL_REO_QUEUE_DESC 8
29 #define HAL_REO_QUEUE_EXT_DESC 9
30 
31 /* TODO: Using associated link desc counter 1 for Rx. Check with FW on
32  * how these counters are assigned
33  */
34 #define HAL_RX_LINK_DESC_CNTR 1
35 /* TODO: Following definition should be from HW headers */
36 #define HAL_DESC_REO_OWNED 4
37 
38 /**
39  * hal_uniform_desc_hdr_setup - setup reo_queue_ext descritpro
40  * @owner - owner info
41  * @buffer_type - buffer type
42  */
43 static inline void hal_uniform_desc_hdr_setup(uint32_t *desc, uint32_t owner,
44 	uint32_t buffer_type)
45 {
46 	HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, OWNER,
47 		owner);
48 	HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, BUFFER_TYPE,
49 		buffer_type);
50 }
51 
52 #ifndef TID_TO_WME_AC
53 #define WME_AC_BE 0 /* best effort */
54 #define WME_AC_BK 1 /* background */
55 #define WME_AC_VI 2 /* video */
56 #define WME_AC_VO 3 /* voice */
57 
58 #define TID_TO_WME_AC(_tid) ( \
59 	(((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
60 	(((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
61 	(((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
62 	WME_AC_VO)
63 #endif
64 #define HAL_NON_QOS_TID 16
65 
66 /**
67  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
68  *
69  * @hal_soc: Opaque HAL SOC handle
70  * @ba_window_size: BlockAck window size
71  * @start_seq: Starting sequence number
72  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
73  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
74  * @tid: TID
75  *
76  */
77 void hal_reo_qdesc_setup(hal_soc_handle_t hal_soc_hdl, int tid,
78 			 uint32_t ba_window_size,
79 			 uint32_t start_seq, void *hw_qdesc_vaddr,
80 			 qdf_dma_addr_t hw_qdesc_paddr,
81 			 int pn_type)
82 {
83 	uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
84 	uint32_t *reo_queue_ext_desc;
85 	uint32_t reg_val;
86 	uint32_t pn_enable;
87 	uint32_t pn_size = 0;
88 
89 	qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
90 
91 	hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
92 		HAL_REO_QUEUE_DESC);
93 	/* Fixed pattern in reserved bits for debugging */
94 	HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER_0,
95 		RESERVED_0A, 0xDDBEEF);
96 
97 	/* This a just a SW meta data and will be copied to REO destination
98 	 * descriptors indicated by hardware.
99 	 * TODO: Setting TID in this field. See if we should set something else.
100 	 */
101 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_1,
102 		RECEIVE_QUEUE_NUMBER, tid);
103 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
104 		VLD, 1);
105 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
106 		ASSOCIATED_LINK_DESCRIPTOR_COUNTER, HAL_RX_LINK_DESC_CNTR);
107 
108 	/*
109 	 * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
110 	 */
111 
112 	reg_val = TID_TO_WME_AC(tid);
113 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, AC, reg_val);
114 
115 	if (ba_window_size < 1)
116 		ba_window_size = 1;
117 	/* WAR to get 2k exception in Non BA case.
118 	 * Setting window size to 2 to get 2k jump exception
119 	 * when we receive aggregates in Non BA case
120 	 */
121 	if ((ba_window_size == 1) && (tid != HAL_NON_QOS_TID))
122 		ba_window_size++;
123 	/* Set RTY bit for non-BA case. Duplicate detection is currently not
124 	 * done by HW in non-BA case if RTY bit is not set.
125 	 * TODO: This is a temporary War and should be removed once HW fix is
126 	 * made to check and discard duplicates even if RTY bit is not set.
127 	 */
128 	if (ba_window_size == 1)
129 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, RTY, 1);
130 
131 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, BA_WINDOW_SIZE,
132 		ba_window_size - 1);
133 
134 	switch (pn_type) {
135 	case HAL_PN_WPA:
136 		pn_enable = 1;
137 		pn_size = PN_SIZE_48;
138 		break;
139 	case HAL_PN_WAPI_EVEN:
140 	case HAL_PN_WAPI_UNEVEN:
141 		pn_enable = 1;
142 		pn_size = PN_SIZE_128;
143 		break;
144 	default:
145 		pn_enable = 0;
146 		break;
147 	}
148 
149 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_CHECK_NEEDED,
150 		pn_enable);
151 
152 	if (pn_type == HAL_PN_WAPI_EVEN)
153 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
154 			PN_SHALL_BE_EVEN, 1);
155 	else if (pn_type == HAL_PN_WAPI_UNEVEN)
156 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
157 			PN_SHALL_BE_UNEVEN, 1);
158 
159 	/*
160 	 *  TODO: Need to check if PN handling in SW needs to be enabled
161 	 *  So far this is not a requirement
162 	 */
163 
164 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_SIZE,
165 		pn_size);
166 
167 	/* TODO: Check if RX_REO_QUEUE_2_IGNORE_AMPDU_FLAG need to be set
168 	 * based on BA window size and/or AMPDU capabilities
169 	 */
170 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
171 		IGNORE_AMPDU_FLAG, 1);
172 
173 	if (start_seq <= 0xfff)
174 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SSN,
175 			start_seq);
176 
177 	/* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA,
178 	 * but REO is not delivering packets if we set it to 1. Need to enable
179 	 * this once the issue is resolved
180 	 */
181 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SVLD, 0);
182 
183 	/* TODO: Check if we should set start PN for WAPI */
184 
185 #ifdef notyet
186 	/* Setup first queue extension if BA window size is more than 1 */
187 	if (ba_window_size > 1) {
188 		reo_queue_ext_desc =
189 			(uint32_t *)(((struct rx_reo_queue *)reo_queue_desc) +
190 			1);
191 		qdf_mem_zero(reo_queue_ext_desc,
192 			sizeof(struct rx_reo_queue_ext));
193 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
194 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
195 	}
196 	/* Setup second queue extension if BA window size is more than 105 */
197 	if (ba_window_size > 105) {
198 		reo_queue_ext_desc = (uint32_t *)
199 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
200 		qdf_mem_zero(reo_queue_ext_desc,
201 			sizeof(struct rx_reo_queue_ext));
202 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
203 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
204 	}
205 	/* Setup third queue extension if BA window size is more than 210 */
206 	if (ba_window_size > 210) {
207 		reo_queue_ext_desc = (uint32_t *)
208 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
209 		qdf_mem_zero(reo_queue_ext_desc,
210 			sizeof(struct rx_reo_queue_ext));
211 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
212 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
213 	}
214 #else
215 	/* TODO: HW queue descriptors are currently allocated for max BA
216 	 * window size for all QOS TIDs so that same descriptor can be used
217 	 * later when ADDBA request is recevied. This should be changed to
218 	 * allocate HW queue descriptors based on BA window size being
219 	 * negotiated (0 for non BA cases), and reallocate when BA window
220 	 * size changes and also send WMI message to FW to change the REO
221 	 * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
222 	 */
223 	if (tid != HAL_NON_QOS_TID) {
224 		reo_queue_ext_desc = (uint32_t *)
225 			(((struct rx_reo_queue *)reo_queue_desc) + 1);
226 		qdf_mem_zero(reo_queue_ext_desc, 3 *
227 			sizeof(struct rx_reo_queue_ext));
228 		/* Initialize first reo queue extension descriptor */
229 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
230 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
231 		/* Fixed pattern in reserved bits for debugging */
232 		HAL_DESC_SET_FIELD(reo_queue_ext_desc,
233 			UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xADBEEF);
234 		/* Initialize second reo queue extension descriptor */
235 		reo_queue_ext_desc = (uint32_t *)
236 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
237 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
238 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
239 		/* Fixed pattern in reserved bits for debugging */
240 		HAL_DESC_SET_FIELD(reo_queue_ext_desc,
241 			UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xBDBEEF);
242 		/* Initialize third reo queue extension descriptor */
243 		reo_queue_ext_desc = (uint32_t *)
244 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
245 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
246 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
247 		/* Fixed pattern in reserved bits for debugging */
248 		HAL_DESC_SET_FIELD(reo_queue_ext_desc,
249 			UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xCDBEEF);
250 	}
251 #endif
252 }
253 qdf_export_symbol(hal_reo_qdesc_setup);
254 
255 /**
256  * hal_get_ba_aging_timeout - Get BA Aging timeout
257  *
258  * @hal_soc: Opaque HAL SOC handle
259  * @ac: Access category
260  * @value: window size to get
261  */
262 void hal_get_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
263 			      uint32_t *value)
264 {
265 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
266 
267 	switch (ac) {
268 	case WME_AC_BE:
269 		*value = HAL_REG_READ(soc,
270 				      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
271 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
272 		break;
273 	case WME_AC_BK:
274 		*value = HAL_REG_READ(soc,
275 				      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
276 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
277 		break;
278 	case WME_AC_VI:
279 		*value = HAL_REG_READ(soc,
280 				      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
281 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
282 		break;
283 	case WME_AC_VO:
284 		*value = HAL_REG_READ(soc,
285 				      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
286 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
287 		break;
288 	default:
289 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
290 			  "Invalid AC: %d\n", ac);
291 	}
292 }
293 
294 qdf_export_symbol(hal_get_ba_aging_timeout);
295 
296 /**
297  * hal_set_ba_aging_timeout - Set BA Aging timeout
298  *
299  * @hal_soc: Opaque HAL SOC handle
300  * @ac: Access category
301  * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice
302  * @value: Input value to set
303  */
304 void hal_set_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
305 			      uint32_t value)
306 {
307 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
308 
309 	switch (ac) {
310 	case WME_AC_BE:
311 		HAL_REG_WRITE(soc,
312 			      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
313 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
314 			      value * 1000);
315 		break;
316 	case WME_AC_BK:
317 		HAL_REG_WRITE(soc,
318 			      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
319 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
320 			      value * 1000);
321 		break;
322 	case WME_AC_VI:
323 		HAL_REG_WRITE(soc,
324 			      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
325 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
326 			      value * 1000);
327 		break;
328 	case WME_AC_VO:
329 		HAL_REG_WRITE(soc,
330 			      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
331 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
332 			      value * 1000);
333 		break;
334 	default:
335 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
336 			  "Invalid AC: %d\n", ac);
337 	}
338 }
339 
340 qdf_export_symbol(hal_set_ba_aging_timeout);
341 
342 #define BLOCK_RES_MASK		0xF
343 static inline uint8_t hal_find_one_bit(uint8_t x)
344 {
345 	uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK;
346 	uint8_t pos;
347 
348 	for (pos = 0; y; y >>= 1)
349 		pos++;
350 
351 	return pos-1;
352 }
353 
354 static inline uint8_t hal_find_zero_bit(uint8_t x)
355 {
356 	uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK;
357 	uint8_t pos;
358 
359 	for (pos = 0; y; y >>= 1)
360 		pos++;
361 
362 	return pos-1;
363 }
364 
365 inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc,
366 				       enum hal_reo_cmd_type type,
367 				       uint32_t paddr_lo,
368 				       uint8_t paddr_hi)
369 {
370 	switch (type) {
371 	case CMD_GET_QUEUE_STATS:
372 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
373 			RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
374 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
375 				    RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
376 		break;
377 	case CMD_FLUSH_QUEUE:
378 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
379 					FLUSH_DESC_ADDR_31_0, paddr_lo);
380 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
381 					FLUSH_DESC_ADDR_39_32, paddr_hi);
382 		break;
383 	case CMD_FLUSH_CACHE:
384 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
385 					FLUSH_ADDR_31_0, paddr_lo);
386 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
387 					FLUSH_ADDR_39_32, paddr_hi);
388 		break;
389 	case CMD_UPDATE_RX_REO_QUEUE:
390 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
391 					RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
392 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
393 					RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
394 		break;
395 	default:
396 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
397 			"%s: Invalid REO command type", __func__);
398 		break;
399 	}
400 }
401 
402 inline int hal_reo_cmd_queue_stats(hal_ring_handle_t  hal_ring_hdl,
403 				   hal_soc_handle_t hal_soc_hdl,
404 				   struct hal_reo_cmd_params *cmd)
405 
406 {
407 	uint32_t *reo_desc, val;
408 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
409 
410 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
411 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
412 	if (!reo_desc) {
413 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
414 			"%s: Out of cmd ring entries", __func__);
415 		hal_srng_access_end(hal_soc, hal_ring_hdl);
416 		return -EBUSY;
417 	}
418 
419 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
420 			     sizeof(struct reo_get_queue_stats));
421 
422 	/* Offsets of descriptor fields defined in HW headers start from
423 	 * the field after TLV header */
424 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
425 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
426 		     sizeof(struct reo_get_queue_stats) -
427 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
428 
429 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
430 		REO_STATUS_REQUIRED, cmd->std.need_status);
431 
432 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_GET_QUEUE_STATS,
433 				   cmd->std.addr_lo,
434 				   cmd->std.addr_hi);
435 
436 	HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
437 			      cmd->u.stats_params.clear);
438 
439 	if (hif_pm_runtime_get(hal_soc->hif_handle) == 0) {
440 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
441 		hif_pm_runtime_put(hal_soc->hif_handle);
442 	} else {
443 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
444 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
445 		hal_srng_inc_flush_cnt(hal_ring_hdl);
446 	}
447 
448 	val = reo_desc[CMD_HEADER_DW_OFFSET];
449 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
450 				     val);
451 }
452 qdf_export_symbol(hal_reo_cmd_queue_stats);
453 
454 inline int hal_reo_cmd_flush_queue(hal_ring_handle_t hal_ring_hdl,
455 				   hal_soc_handle_t hal_soc_hdl,
456 				   struct hal_reo_cmd_params *cmd)
457 {
458 	uint32_t *reo_desc, val;
459 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
460 
461 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
462 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
463 	if (!reo_desc) {
464 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
465 			"%s: Out of cmd ring entries", __func__);
466 		hal_srng_access_end(hal_soc, hal_ring_hdl);
467 		return -EBUSY;
468 	}
469 
470 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
471 			     sizeof(struct reo_flush_queue));
472 
473 	/* Offsets of descriptor fields defined in HW headers start from
474 	 * the field after TLV header */
475 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
476 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
477 		     sizeof(struct reo_flush_queue) -
478 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
479 
480 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
481 		REO_STATUS_REQUIRED, cmd->std.need_status);
482 
483 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_QUEUE, cmd->std.addr_lo,
484 		cmd->std.addr_hi);
485 
486 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
487 		BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
488 		cmd->u.fl_queue_params.block_use_after_flush);
489 
490 	if (cmd->u.fl_queue_params.block_use_after_flush) {
491 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
492 			BLOCK_RESOURCE_INDEX, cmd->u.fl_queue_params.index);
493 	}
494 
495 	hal_srng_access_end(hal_soc, hal_ring_hdl);
496 	val = reo_desc[CMD_HEADER_DW_OFFSET];
497 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
498 				     val);
499 }
500 qdf_export_symbol(hal_reo_cmd_flush_queue);
501 
502 inline int hal_reo_cmd_flush_cache(hal_ring_handle_t hal_ring_hdl,
503 				   hal_soc_handle_t hal_soc_hdl,
504 				   struct hal_reo_cmd_params *cmd)
505 {
506 	uint32_t *reo_desc, val;
507 	struct hal_reo_cmd_flush_cache_params *cp;
508 	uint8_t index = 0;
509 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
510 
511 	cp = &cmd->u.fl_cache_params;
512 
513 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
514 
515 	/* We need a cache block resource for this operation, and REO HW has
516 	 * only 4 such blocking resources. These resources are managed using
517 	 * reo_res_bitmap, and we return failure if none is available.
518 	 */
519 	if (cp->block_use_after_flush) {
520 		index = hal_find_zero_bit(hal_soc->reo_res_bitmap);
521 		if (index > 3) {
522 			qdf_print("%s, No blocking resource available!",
523 				  __func__);
524 			hal_srng_access_end(hal_soc, hal_ring_hdl);
525 			return -EBUSY;
526 		}
527 		hal_soc->index = index;
528 	}
529 
530 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
531 	if (!reo_desc) {
532 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
533 			"%s: Out of cmd ring entries", __func__);
534 		hal_srng_access_end(hal_soc, hal_ring_hdl);
535 		hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl));
536 		return -EBUSY;
537 	}
538 
539 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
540 			     sizeof(struct reo_flush_cache));
541 
542 	/* Offsets of descriptor fields defined in HW headers start from
543 	 * the field after TLV header */
544 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
545 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
546 		     sizeof(struct reo_flush_cache) -
547 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
548 
549 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
550 		REO_STATUS_REQUIRED, cmd->std.need_status);
551 
552 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_CACHE, cmd->std.addr_lo,
553 				   cmd->std.addr_hi);
554 
555 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
556 		FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
557 
558 	/* set it to 0 for now */
559 	cp->rel_block_index = 0;
560 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
561 		RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
562 
563 	if (cp->block_use_after_flush) {
564 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
565 			CACHE_BLOCK_RESOURCE_INDEX, index);
566 	}
567 
568 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
569 		FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
570 
571 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
572 		BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush);
573 
574 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
575 		cp->flush_all);
576 
577 	if (hif_pm_runtime_get(hal_soc->hif_handle) == 0) {
578 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
579 		hif_pm_runtime_put(hal_soc->hif_handle);
580 	} else {
581 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
582 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
583 		hal_srng_inc_flush_cnt(hal_ring_hdl);
584 	}
585 
586 	val = reo_desc[CMD_HEADER_DW_OFFSET];
587 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
588 				     val);
589 }
590 qdf_export_symbol(hal_reo_cmd_flush_cache);
591 
592 inline int hal_reo_cmd_unblock_cache(hal_ring_handle_t hal_ring_hdl,
593 				     hal_soc_handle_t hal_soc_hdl,
594 				     struct hal_reo_cmd_params *cmd)
595 
596 {
597 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
598 	uint32_t *reo_desc, val;
599 	uint8_t index = 0;
600 
601 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
602 
603 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
604 		index = hal_find_one_bit(hal_soc->reo_res_bitmap);
605 		if (index > 3) {
606 			hal_srng_access_end(hal_soc, hal_ring_hdl);
607 			qdf_print("%s: No blocking resource to unblock!",
608 				  __func__);
609 			return -EBUSY;
610 		}
611 	}
612 
613 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
614 	if (!reo_desc) {
615 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
616 			"%s: Out of cmd ring entries", __func__);
617 		hal_srng_access_end(hal_soc, hal_ring_hdl);
618 		return -EBUSY;
619 	}
620 
621 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
622 			     sizeof(struct reo_unblock_cache));
623 
624 	/* Offsets of descriptor fields defined in HW headers start from
625 	 * the field after TLV header */
626 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
627 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
628 		     sizeof(struct reo_unblock_cache) -
629 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
630 
631 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
632 		REO_STATUS_REQUIRED, cmd->std.need_status);
633 
634 	HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
635 		UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
636 
637 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
638 		HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
639 			CACHE_BLOCK_RESOURCE_INDEX,
640 			cmd->u.unblk_cache_params.index);
641 	}
642 
643 	hal_srng_access_end(hal_soc, hal_ring_hdl);
644 	val = reo_desc[CMD_HEADER_DW_OFFSET];
645 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
646 				     val);
647 }
648 qdf_export_symbol(hal_reo_cmd_unblock_cache);
649 
650 inline int hal_reo_cmd_flush_timeout_list(hal_ring_handle_t hal_ring_hdl,
651 					  hal_soc_handle_t hal_soc_hdl,
652 					  struct hal_reo_cmd_params *cmd)
653 {
654 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
655 	uint32_t *reo_desc, val;
656 
657 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
658 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
659 	if (!reo_desc) {
660 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
661 			"%s: Out of cmd ring entries", __func__);
662 		hal_srng_access_end(hal_soc, hal_ring_hdl);
663 		return -EBUSY;
664 	}
665 
666 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
667 			     sizeof(struct reo_flush_timeout_list));
668 
669 	/* Offsets of descriptor fields defined in HW headers start from
670 	 * the field after TLV header */
671 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
672 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
673 		     sizeof(struct reo_flush_timeout_list) -
674 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
675 
676 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
677 		REO_STATUS_REQUIRED, cmd->std.need_status);
678 
679 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
680 		cmd->u.fl_tim_list_params.ac_list);
681 
682 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
683 		MINIMUM_RELEASE_DESC_COUNT,
684 		cmd->u.fl_tim_list_params.min_rel_desc);
685 
686 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
687 		MINIMUM_FORWARD_BUF_COUNT,
688 		cmd->u.fl_tim_list_params.min_fwd_buf);
689 
690 	hal_srng_access_end(hal_soc, hal_ring_hdl);
691 	val = reo_desc[CMD_HEADER_DW_OFFSET];
692 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
693 				     val);
694 }
695 qdf_export_symbol(hal_reo_cmd_flush_timeout_list);
696 
697 inline int hal_reo_cmd_update_rx_queue(hal_ring_handle_t hal_ring_hdl,
698 				       hal_soc_handle_t hal_soc_hdl,
699 				       struct hal_reo_cmd_params *cmd)
700 {
701 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
702 	uint32_t *reo_desc, val;
703 	struct hal_reo_cmd_update_queue_params *p;
704 
705 	p = &cmd->u.upd_queue_params;
706 
707 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
708 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
709 	if (!reo_desc) {
710 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
711 			"%s: Out of cmd ring entries", __func__);
712 		hal_srng_access_end(hal_soc, hal_ring_hdl);
713 		return -EBUSY;
714 	}
715 
716 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
717 			     sizeof(struct reo_update_rx_reo_queue));
718 
719 	/* Offsets of descriptor fields defined in HW headers start from
720 	 * the field after TLV header */
721 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
722 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
723 		     sizeof(struct reo_update_rx_reo_queue) -
724 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
725 
726 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
727 		REO_STATUS_REQUIRED, cmd->std.need_status);
728 
729 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
730 		cmd->std.addr_lo, cmd->std.addr_hi);
731 
732 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
733 		UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
734 
735 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
736 			      p->update_vld);
737 
738 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
739 		UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
740 		p->update_assoc_link_desc);
741 
742 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
743 		UPDATE_DISABLE_DUPLICATE_DETECTION,
744 		p->update_disable_dup_detect);
745 
746 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
747 		UPDATE_DISABLE_DUPLICATE_DETECTION,
748 		p->update_disable_dup_detect);
749 
750 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
751 		UPDATE_SOFT_REORDER_ENABLE,
752 		p->update_soft_reorder_enab);
753 
754 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
755 		UPDATE_AC, p->update_ac);
756 
757 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
758 		UPDATE_BAR, p->update_bar);
759 
760 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
761 		UPDATE_BAR, p->update_bar);
762 
763 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
764 		UPDATE_RTY, p->update_rty);
765 
766 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
767 		UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
768 
769 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
770 		UPDATE_OOR_MODE, p->update_oor_mode);
771 
772 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
773 		UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
774 
775 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
776 		UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
777 
778 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
779 		UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
780 
781 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
782 		UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
783 
784 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
785 		UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
786 
787 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
788 		UPDATE_PN_SIZE, p->update_pn_size);
789 
790 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
791 		UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
792 
793 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
794 		UPDATE_SVLD, p->update_svld);
795 
796 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
797 		UPDATE_SSN, p->update_ssn);
798 
799 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
800 		UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
801 		p->update_seq_2k_err_detect);
802 
803 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
804 		UPDATE_PN_VALID, p->update_pn_valid);
805 
806 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
807 		UPDATE_PN, p->update_pn);
808 
809 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
810 		RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
811 
812 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
813 		VLD, p->vld);
814 
815 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
816 		ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
817 		p->assoc_link_desc);
818 
819 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
820 		DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
821 
822 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
823 		SOFT_REORDER_ENABLE, p->soft_reorder_enab);
824 
825 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
826 
827 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
828 		BAR, p->bar);
829 
830 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
831 		CHK_2K_MODE, p->chk_2k_mode);
832 
833 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
834 		RTY, p->rty);
835 
836 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
837 		OOR_MODE, p->oor_mode);
838 
839 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
840 		PN_CHECK_NEEDED, p->pn_check_needed);
841 
842 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
843 		PN_SHALL_BE_EVEN, p->pn_even);
844 
845 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
846 		PN_SHALL_BE_UNEVEN, p->pn_uneven);
847 
848 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
849 		PN_HANDLING_ENABLE, p->pn_hand_enab);
850 
851 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
852 		IGNORE_AMPDU_FLAG, p->ignore_ampdu);
853 
854 	if (p->ba_window_size < 1)
855 		p->ba_window_size = 1;
856 	/*
857 	 * WAR to get 2k exception in Non BA case.
858 	 * Setting window size to 2 to get 2k jump exception
859 	 * when we receive aggregates in Non BA case
860 	 */
861 	if (p->ba_window_size == 1)
862 		p->ba_window_size++;
863 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
864 		BA_WINDOW_SIZE, p->ba_window_size - 1);
865 
866 	if (p->pn_size == 24)
867 		p->pn_size = PN_SIZE_24;
868 	else if (p->pn_size == 48)
869 		p->pn_size = PN_SIZE_48;
870 	else if (p->pn_size == 128)
871 		p->pn_size = PN_SIZE_128;
872 
873 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
874 		PN_SIZE, p->pn_size);
875 
876 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
877 		SVLD, p->svld);
878 
879 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
880 		SSN, p->ssn);
881 
882 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
883 		SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
884 
885 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
886 		PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
887 
888 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
889 		PN_31_0, p->pn_31_0);
890 
891 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
892 		PN_63_32, p->pn_63_32);
893 
894 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
895 		PN_95_64, p->pn_95_64);
896 
897 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
898 		PN_127_96, p->pn_127_96);
899 
900 	if (hif_pm_runtime_get(hal_soc->hif_handle) == 0) {
901 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
902 		hif_pm_runtime_put(hal_soc->hif_handle);
903 	} else {
904 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
905 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
906 		hal_srng_inc_flush_cnt(hal_ring_hdl);
907 	}
908 
909 	val = reo_desc[CMD_HEADER_DW_OFFSET];
910 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
911 				     val);
912 }
913 qdf_export_symbol(hal_reo_cmd_update_rx_queue);
914 
915 inline void
916 hal_reo_queue_stats_status(uint32_t *reo_desc,
917 			   struct hal_reo_queue_status *st,
918 			   hal_soc_handle_t hal_soc_hdl)
919 {
920 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
921 	uint32_t val;
922 
923 	/* Offsets of descriptor fields defined in HW headers start
924 	 * from the field after TLV header */
925 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
926 
927 	/* header */
928 	hal_reo_status_get_header(reo_desc, HAL_REO_QUEUE_STATS_STATUS_TLV,
929 					&(st->header), hal_soc);
930 
931 	/* SSN */
932 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
933 	st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
934 
935 	/* current index */
936 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
937 					 CURRENT_INDEX)];
938 	st->curr_idx =
939 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
940 			      CURRENT_INDEX, val);
941 
942 	/* PN bits */
943 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
944 					 PN_31_0)];
945 	st->pn_31_0 =
946 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
947 			      PN_31_0, val);
948 
949 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
950 					 PN_63_32)];
951 	st->pn_63_32 =
952 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
953 			      PN_63_32, val);
954 
955 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
956 					 PN_95_64)];
957 	st->pn_95_64 =
958 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
959 			      PN_95_64, val);
960 
961 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
962 					 PN_127_96)];
963 	st->pn_127_96 =
964 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
965 			      PN_127_96, val);
966 
967 	/* timestamps */
968 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
969 					 LAST_RX_ENQUEUE_TIMESTAMP)];
970 	st->last_rx_enq_tstamp =
971 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
972 			      LAST_RX_ENQUEUE_TIMESTAMP, val);
973 
974 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
975 					 LAST_RX_DEQUEUE_TIMESTAMP)];
976 	st->last_rx_deq_tstamp =
977 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
978 			      LAST_RX_DEQUEUE_TIMESTAMP, val);
979 
980 	/* rx bitmap */
981 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
982 					 RX_BITMAP_31_0)];
983 	st->rx_bitmap_31_0 =
984 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
985 			      RX_BITMAP_31_0, val);
986 
987 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
988 					 RX_BITMAP_63_32)];
989 	st->rx_bitmap_63_32 =
990 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
991 			      RX_BITMAP_63_32, val);
992 
993 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
994 					 RX_BITMAP_95_64)];
995 	st->rx_bitmap_95_64 =
996 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
997 			      RX_BITMAP_95_64, val);
998 
999 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
1000 					 RX_BITMAP_127_96)];
1001 	st->rx_bitmap_127_96 =
1002 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
1003 			      RX_BITMAP_127_96, val);
1004 
1005 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
1006 					 RX_BITMAP_159_128)];
1007 	st->rx_bitmap_159_128 =
1008 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
1009 			      RX_BITMAP_159_128, val);
1010 
1011 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
1012 					 RX_BITMAP_191_160)];
1013 	st->rx_bitmap_191_160 =
1014 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
1015 			      RX_BITMAP_191_160, val);
1016 
1017 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
1018 					 RX_BITMAP_223_192)];
1019 	st->rx_bitmap_223_192 =
1020 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
1021 			      RX_BITMAP_223_192, val);
1022 
1023 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
1024 					 RX_BITMAP_255_224)];
1025 	st->rx_bitmap_255_224 =
1026 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
1027 			      RX_BITMAP_255_224, val);
1028 
1029 	/* various counts */
1030 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
1031 					 CURRENT_MPDU_COUNT)];
1032 	st->curr_mpdu_cnt =
1033 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
1034 			      CURRENT_MPDU_COUNT, val);
1035 
1036 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
1037 					 CURRENT_MSDU_COUNT)];
1038 	st->curr_msdu_cnt =
1039 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
1040 			      CURRENT_MSDU_COUNT, val);
1041 
1042 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1043 					 TIMEOUT_COUNT)];
1044 	st->fwd_timeout_cnt =
1045 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1046 			      TIMEOUT_COUNT, val);
1047 
1048 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1049 					 FORWARD_DUE_TO_BAR_COUNT)];
1050 	st->fwd_bar_cnt =
1051 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1052 			      FORWARD_DUE_TO_BAR_COUNT, val);
1053 
1054 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1055 					 DUPLICATE_COUNT)];
1056 	st->dup_cnt =
1057 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1058 			      DUPLICATE_COUNT, val);
1059 
1060 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1061 					 FRAMES_IN_ORDER_COUNT)];
1062 	st->frms_in_order_cnt =
1063 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1064 			      FRAMES_IN_ORDER_COUNT, val);
1065 
1066 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1067 					 BAR_RECEIVED_COUNT)];
1068 	st->bar_rcvd_cnt =
1069 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1070 			      BAR_RECEIVED_COUNT, val);
1071 
1072 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
1073 					 MPDU_FRAMES_PROCESSED_COUNT)];
1074 	st->mpdu_frms_cnt =
1075 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
1076 			      MPDU_FRAMES_PROCESSED_COUNT, val);
1077 
1078 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
1079 					 MSDU_FRAMES_PROCESSED_COUNT)];
1080 	st->msdu_frms_cnt =
1081 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
1082 			      MSDU_FRAMES_PROCESSED_COUNT, val);
1083 
1084 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
1085 					 TOTAL_PROCESSED_BYTE_COUNT)];
1086 	st->total_cnt =
1087 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
1088 			      TOTAL_PROCESSED_BYTE_COUNT, val);
1089 
1090 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1091 					 LATE_RECEIVE_MPDU_COUNT)];
1092 	st->late_recv_mpdu_cnt =
1093 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1094 			      LATE_RECEIVE_MPDU_COUNT, val);
1095 
1096 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1097 					 WINDOW_JUMP_2K)];
1098 	st->win_jump_2k =
1099 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1100 			      WINDOW_JUMP_2K, val);
1101 
1102 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1103 					 HOLE_COUNT)];
1104 	st->hole_cnt =
1105 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1106 			      HOLE_COUNT, val);
1107 }
1108 qdf_export_symbol(hal_reo_queue_stats_status);
1109 
1110 inline void
1111 hal_reo_flush_queue_status(uint32_t *reo_desc,
1112 			   struct hal_reo_flush_queue_status *st,
1113 			   hal_soc_handle_t hal_soc_hdl)
1114 {
1115 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1116 	uint32_t val;
1117 
1118 	/* Offsets of descriptor fields defined in HW headers start
1119 	 * from the field after TLV header */
1120 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1121 
1122 	/* header */
1123 	hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV,
1124 					&(st->header), hal_soc);
1125 
1126 	/* error bit */
1127 	val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
1128 					 ERROR_DETECTED)];
1129 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1130 				  val);
1131 }
1132 qdf_export_symbol(hal_reo_flush_queue_status);
1133 
1134 inline void
1135 hal_reo_flush_cache_status(uint32_t *reo_desc,
1136 			   struct hal_reo_flush_cache_status *st,
1137 			   hal_soc_handle_t hal_soc_hdl)
1138 {
1139 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1140 	uint32_t val;
1141 
1142 	/* Offsets of descriptor fields defined in HW headers start
1143 	 * from the field after TLV header */
1144 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1145 
1146 	/* header */
1147 	hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV,
1148 					&(st->header), hal_soc);
1149 
1150 	/* error bit */
1151 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1152 					 ERROR_DETECTED)];
1153 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1154 				  val);
1155 
1156 	/* block error */
1157 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1158 					 BLOCK_ERROR_DETAILS)];
1159 	st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1160 					BLOCK_ERROR_DETAILS,
1161 					val);
1162 	if (!st->block_error)
1163 		qdf_set_bit(hal_soc->index,
1164 			    (unsigned long *)&hal_soc->reo_res_bitmap);
1165 
1166 	/* cache flush status */
1167 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1168 					 CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
1169 	st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1170 					CACHE_CONTROLLER_FLUSH_STATUS_HIT,
1171 					val);
1172 
1173 	/* cache flush descriptor type */
1174 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1175 				  CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
1176 	st->cache_flush_status_desc_type =
1177 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1178 			      CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
1179 			      val);
1180 
1181 	/* cache flush count */
1182 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1183 				  CACHE_CONTROLLER_FLUSH_COUNT)];
1184 	st->cache_flush_cnt =
1185 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1186 			      CACHE_CONTROLLER_FLUSH_COUNT,
1187 			      val);
1188 
1189 }
1190 qdf_export_symbol(hal_reo_flush_cache_status);
1191 
1192 inline void hal_reo_unblock_cache_status(uint32_t *reo_desc,
1193 					 hal_soc_handle_t hal_soc_hdl,
1194 					 struct hal_reo_unblk_cache_status *st)
1195 {
1196 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1197 	uint32_t val;
1198 
1199 	/* Offsets of descriptor fields defined in HW headers start
1200 	 * from the field after TLV header */
1201 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1202 
1203 	/* header */
1204 	hal_reo_status_get_header(reo_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV,
1205 					&st->header, hal_soc);
1206 
1207 	/* error bit */
1208 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1209 				  ERROR_DETECTED)];
1210 	st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1211 				  ERROR_DETECTED,
1212 				  val);
1213 
1214 	/* unblock type */
1215 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1216 				  UNBLOCK_TYPE)];
1217 	st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1218 					 UNBLOCK_TYPE,
1219 					 val);
1220 
1221 	if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
1222 		qdf_clear_bit(hal_soc->index,
1223 			      (unsigned long *)&hal_soc->reo_res_bitmap);
1224 }
1225 qdf_export_symbol(hal_reo_unblock_cache_status);
1226 
1227 inline void hal_reo_flush_timeout_list_status(
1228 			 uint32_t *reo_desc,
1229 			 struct hal_reo_flush_timeout_list_status *st,
1230 			 hal_soc_handle_t hal_soc_hdl)
1231 
1232 {
1233 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1234 	uint32_t val;
1235 
1236 	/* Offsets of descriptor fields defined in HW headers start
1237 	 * from the field after TLV header */
1238 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1239 
1240 	/* header */
1241 	hal_reo_status_get_header(reo_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV,
1242 					&(st->header), hal_soc);
1243 
1244 	/* error bit */
1245 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1246 					 ERROR_DETECTED)];
1247 	st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1248 				  ERROR_DETECTED,
1249 				  val);
1250 
1251 	/* list empty */
1252 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1253 					 TIMOUT_LIST_EMPTY)];
1254 	st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1255 					TIMOUT_LIST_EMPTY,
1256 					val);
1257 
1258 	/* release descriptor count */
1259 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1260 					 RELEASE_DESC_COUNT)];
1261 	st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1262 				       RELEASE_DESC_COUNT,
1263 				       val);
1264 
1265 	/* forward buf count */
1266 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1267 					 FORWARD_BUF_COUNT)];
1268 	st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1269 				       FORWARD_BUF_COUNT,
1270 				       val);
1271 }
1272 qdf_export_symbol(hal_reo_flush_timeout_list_status);
1273 
1274 inline void hal_reo_desc_thres_reached_status(
1275 			 uint32_t *reo_desc,
1276 			 struct hal_reo_desc_thres_reached_status *st,
1277 			 hal_soc_handle_t hal_soc_hdl)
1278 {
1279 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1280 	uint32_t val;
1281 
1282 	/* Offsets of descriptor fields defined in HW headers start
1283 	 * from the field after TLV header */
1284 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1285 
1286 	/* header */
1287 	hal_reo_status_get_header(reo_desc,
1288 			      HAL_REO_DESC_THRES_STATUS_TLV,
1289 			      &(st->header), hal_soc);
1290 
1291 	/* threshold index */
1292 	val = reo_desc[HAL_OFFSET_DW(
1293 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1294 				 THRESHOLD_INDEX)];
1295 	st->thres_index = HAL_GET_FIELD(
1296 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1297 				THRESHOLD_INDEX,
1298 				val);
1299 
1300 	/* link desc counters */
1301 	val = reo_desc[HAL_OFFSET_DW(
1302 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1303 				 LINK_DESCRIPTOR_COUNTER0)];
1304 	st->link_desc_counter0 = HAL_GET_FIELD(
1305 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1306 				LINK_DESCRIPTOR_COUNTER0,
1307 				val);
1308 
1309 	val = reo_desc[HAL_OFFSET_DW(
1310 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1311 				 LINK_DESCRIPTOR_COUNTER1)];
1312 	st->link_desc_counter1 = HAL_GET_FIELD(
1313 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1314 				LINK_DESCRIPTOR_COUNTER1,
1315 				val);
1316 
1317 	val = reo_desc[HAL_OFFSET_DW(
1318 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1319 				 LINK_DESCRIPTOR_COUNTER2)];
1320 	st->link_desc_counter2 = HAL_GET_FIELD(
1321 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1322 				LINK_DESCRIPTOR_COUNTER2,
1323 				val);
1324 
1325 	val = reo_desc[HAL_OFFSET_DW(
1326 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1327 				 LINK_DESCRIPTOR_COUNTER_SUM)];
1328 	st->link_desc_counter_sum = HAL_GET_FIELD(
1329 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1330 				LINK_DESCRIPTOR_COUNTER_SUM,
1331 				val);
1332 }
1333 qdf_export_symbol(hal_reo_desc_thres_reached_status);
1334 
1335 inline void
1336 hal_reo_rx_update_queue_status(uint32_t *reo_desc,
1337 			       struct hal_reo_update_rx_queue_status *st,
1338 			       hal_soc_handle_t hal_soc_hdl)
1339 {
1340 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1341 
1342 	/* Offsets of descriptor fields defined in HW headers start
1343 	 * from the field after TLV header */
1344 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1345 
1346 	/* header */
1347 	hal_reo_status_get_header(reo_desc,
1348 			      HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV,
1349 			      &(st->header), hal_soc);
1350 }
1351 qdf_export_symbol(hal_reo_rx_update_queue_status);
1352 
1353 /**
1354  * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG
1355  * with command number
1356  * @hal_soc: Handle to HAL SoC structure
1357  * @hal_ring: Handle to HAL SRNG structure
1358  *
1359  * Return: none
1360  */
1361 inline void hal_reo_init_cmd_ring(hal_soc_handle_t hal_soc_hdl,
1362 				  hal_ring_handle_t hal_ring_hdl)
1363 {
1364 	int cmd_num;
1365 	uint32_t *desc_addr;
1366 	struct hal_srng_params srng_params;
1367 	uint32_t desc_size;
1368 	uint32_t num_desc;
1369 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
1370 
1371 	hal_get_srng_params(hal_soc_hdl, hal_ring_hdl, &srng_params);
1372 
1373 	desc_addr = (uint32_t *)(srng_params.ring_base_vaddr);
1374 	desc_addr += (sizeof(struct tlv_32_hdr) >> 2);
1375 	desc_size = hal_srng_get_entrysize(soc, REO_CMD) >> 2;
1376 	num_desc = srng_params.num_entries;
1377 	cmd_num = 1;
1378 	while (num_desc) {
1379 		/* Offsets of descriptor fields defined in HW headers start
1380 		 * from the field after TLV header */
1381 		HAL_DESC_SET_FIELD(desc_addr, UNIFORM_REO_CMD_HEADER_0,
1382 			REO_CMD_NUMBER, cmd_num);
1383 		desc_addr += desc_size;
1384 		num_desc--; cmd_num++;
1385 	}
1386 
1387 	soc->reo_res_bitmap = 0;
1388 }
1389 qdf_export_symbol(hal_reo_init_cmd_ring);
1390