xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_reo.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_api.h"
20 #include "hal_hw_headers.h"
21 #include "hal_reo.h"
22 #include "hal_tx.h"
23 #include "hal_rx.h"
24 #include "qdf_module.h"
25 
26 /* TODO: See if the following definition is available in HW headers */
27 #define HAL_REO_OWNED 4
28 #define HAL_REO_QUEUE_DESC 8
29 #define HAL_REO_QUEUE_EXT_DESC 9
30 
31 /* TODO: Using associated link desc counter 1 for Rx. Check with FW on
32  * how these counters are assigned
33  */
34 #define HAL_RX_LINK_DESC_CNTR 1
35 /* TODO: Following definition should be from HW headers */
36 #define HAL_DESC_REO_OWNED 4
37 
38 /**
39  * hal_uniform_desc_hdr_setup - setup reo_queue_ext descritpro
40  * @owner - owner info
41  * @buffer_type - buffer type
42  */
43 static inline void hal_uniform_desc_hdr_setup(uint32_t *desc, uint32_t owner,
44 	uint32_t buffer_type)
45 {
46 	HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, OWNER,
47 		owner);
48 	HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, BUFFER_TYPE,
49 		buffer_type);
50 }
51 
52 #ifndef TID_TO_WME_AC
53 #define WME_AC_BE 0 /* best effort */
54 #define WME_AC_BK 1 /* background */
55 #define WME_AC_VI 2 /* video */
56 #define WME_AC_VO 3 /* voice */
57 
58 #define TID_TO_WME_AC(_tid) ( \
59 	(((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
60 	(((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
61 	(((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
62 	WME_AC_VO)
63 #endif
64 #define HAL_NON_QOS_TID 16
65 
66 #ifdef HAL_DISABLE_NON_BA_2K_JUMP_ERROR
67 static inline uint32_t hal_update_non_ba_win_size(int tid,
68 						  uint32_t ba_window_size)
69 {
70 	return ba_window_size;
71 }
72 #else
73 static inline uint32_t hal_update_non_ba_win_size(int tid,
74 						  uint32_t ba_window_size)
75 {
76 	if ((ba_window_size == 1) && (tid != HAL_NON_QOS_TID))
77 		ba_window_size++;
78 
79 	return ba_window_size;
80 }
81 #endif
82 
83 /**
84  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
85  *
86  * @hal_soc: Opaque HAL SOC handle
87  * @ba_window_size: BlockAck window size
88  * @start_seq: Starting sequence number
89  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
90  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
91  * @tid: TID
92  *
93  */
94 void hal_reo_qdesc_setup(hal_soc_handle_t hal_soc_hdl, int tid,
95 			 uint32_t ba_window_size,
96 			 uint32_t start_seq, void *hw_qdesc_vaddr,
97 			 qdf_dma_addr_t hw_qdesc_paddr,
98 			 int pn_type)
99 {
100 	uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
101 	uint32_t *reo_queue_ext_desc;
102 	uint32_t reg_val;
103 	uint32_t pn_enable;
104 	uint32_t pn_size = 0;
105 
106 	qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
107 
108 	hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
109 		HAL_REO_QUEUE_DESC);
110 	/* Fixed pattern in reserved bits for debugging */
111 	HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER_0,
112 		RESERVED_0A, 0xDDBEEF);
113 
114 	/* This a just a SW meta data and will be copied to REO destination
115 	 * descriptors indicated by hardware.
116 	 * TODO: Setting TID in this field. See if we should set something else.
117 	 */
118 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_1,
119 		RECEIVE_QUEUE_NUMBER, tid);
120 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
121 		VLD, 1);
122 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
123 		ASSOCIATED_LINK_DESCRIPTOR_COUNTER, HAL_RX_LINK_DESC_CNTR);
124 
125 	/*
126 	 * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
127 	 */
128 
129 	reg_val = TID_TO_WME_AC(tid);
130 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, AC, reg_val);
131 
132 	if (ba_window_size < 1)
133 		ba_window_size = 1;
134 
135 	/* WAR to get 2k exception in Non BA case.
136 	 * Setting window size to 2 to get 2k jump exception
137 	 * when we receive aggregates in Non BA case
138 	 */
139 	ba_window_size = hal_update_non_ba_win_size(tid, ba_window_size);
140 
141 	/* Set RTY bit for non-BA case. Duplicate detection is currently not
142 	 * done by HW in non-BA case if RTY bit is not set.
143 	 * TODO: This is a temporary War and should be removed once HW fix is
144 	 * made to check and discard duplicates even if RTY bit is not set.
145 	 */
146 	if (ba_window_size == 1)
147 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, RTY, 1);
148 
149 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, BA_WINDOW_SIZE,
150 		ba_window_size - 1);
151 
152 	switch (pn_type) {
153 	case HAL_PN_WPA:
154 		pn_enable = 1;
155 		pn_size = PN_SIZE_48;
156 		break;
157 	case HAL_PN_WAPI_EVEN:
158 	case HAL_PN_WAPI_UNEVEN:
159 		pn_enable = 1;
160 		pn_size = PN_SIZE_128;
161 		break;
162 	default:
163 		pn_enable = 0;
164 		break;
165 	}
166 
167 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_CHECK_NEEDED,
168 		pn_enable);
169 
170 	if (pn_type == HAL_PN_WAPI_EVEN)
171 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
172 			PN_SHALL_BE_EVEN, 1);
173 	else if (pn_type == HAL_PN_WAPI_UNEVEN)
174 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
175 			PN_SHALL_BE_UNEVEN, 1);
176 
177 	/*
178 	 *  TODO: Need to check if PN handling in SW needs to be enabled
179 	 *  So far this is not a requirement
180 	 */
181 
182 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_SIZE,
183 		pn_size);
184 
185 	/* TODO: Check if RX_REO_QUEUE_2_IGNORE_AMPDU_FLAG need to be set
186 	 * based on BA window size and/or AMPDU capabilities
187 	 */
188 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
189 		IGNORE_AMPDU_FLAG, 1);
190 
191 	if (start_seq <= 0xfff)
192 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SSN,
193 			start_seq);
194 
195 	/* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA,
196 	 * but REO is not delivering packets if we set it to 1. Need to enable
197 	 * this once the issue is resolved
198 	 */
199 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SVLD, 0);
200 
201 	/* TODO: Check if we should set start PN for WAPI */
202 
203 #ifdef notyet
204 	/* Setup first queue extension if BA window size is more than 1 */
205 	if (ba_window_size > 1) {
206 		reo_queue_ext_desc =
207 			(uint32_t *)(((struct rx_reo_queue *)reo_queue_desc) +
208 			1);
209 		qdf_mem_zero(reo_queue_ext_desc,
210 			sizeof(struct rx_reo_queue_ext));
211 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
212 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
213 	}
214 	/* Setup second queue extension if BA window size is more than 105 */
215 	if (ba_window_size > 105) {
216 		reo_queue_ext_desc = (uint32_t *)
217 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
218 		qdf_mem_zero(reo_queue_ext_desc,
219 			sizeof(struct rx_reo_queue_ext));
220 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
221 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
222 	}
223 	/* Setup third queue extension if BA window size is more than 210 */
224 	if (ba_window_size > 210) {
225 		reo_queue_ext_desc = (uint32_t *)
226 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
227 		qdf_mem_zero(reo_queue_ext_desc,
228 			sizeof(struct rx_reo_queue_ext));
229 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
230 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
231 	}
232 #else
233 	/* TODO: HW queue descriptors are currently allocated for max BA
234 	 * window size for all QOS TIDs so that same descriptor can be used
235 	 * later when ADDBA request is recevied. This should be changed to
236 	 * allocate HW queue descriptors based on BA window size being
237 	 * negotiated (0 for non BA cases), and reallocate when BA window
238 	 * size changes and also send WMI message to FW to change the REO
239 	 * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
240 	 */
241 	if (tid != HAL_NON_QOS_TID) {
242 		reo_queue_ext_desc = (uint32_t *)
243 			(((struct rx_reo_queue *)reo_queue_desc) + 1);
244 		qdf_mem_zero(reo_queue_ext_desc, 3 *
245 			sizeof(struct rx_reo_queue_ext));
246 		/* Initialize first reo queue extension descriptor */
247 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
248 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
249 		/* Fixed pattern in reserved bits for debugging */
250 		HAL_DESC_SET_FIELD(reo_queue_ext_desc,
251 			UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xADBEEF);
252 		/* Initialize second reo queue extension descriptor */
253 		reo_queue_ext_desc = (uint32_t *)
254 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
255 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
256 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
257 		/* Fixed pattern in reserved bits for debugging */
258 		HAL_DESC_SET_FIELD(reo_queue_ext_desc,
259 			UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xBDBEEF);
260 		/* Initialize third reo queue extension descriptor */
261 		reo_queue_ext_desc = (uint32_t *)
262 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
263 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
264 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
265 		/* Fixed pattern in reserved bits for debugging */
266 		HAL_DESC_SET_FIELD(reo_queue_ext_desc,
267 			UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xCDBEEF);
268 	}
269 #endif
270 }
271 qdf_export_symbol(hal_reo_qdesc_setup);
272 
273 /**
274  * hal_get_ba_aging_timeout - Get BA Aging timeout
275  *
276  * @hal_soc: Opaque HAL SOC handle
277  * @ac: Access category
278  * @value: window size to get
279  */
280 void hal_get_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
281 			      uint32_t *value)
282 {
283 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
284 
285 	switch (ac) {
286 	case WME_AC_BE:
287 		*value = HAL_REG_READ(soc,
288 				      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
289 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
290 		break;
291 	case WME_AC_BK:
292 		*value = HAL_REG_READ(soc,
293 				      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
294 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
295 		break;
296 	case WME_AC_VI:
297 		*value = HAL_REG_READ(soc,
298 				      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
299 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
300 		break;
301 	case WME_AC_VO:
302 		*value = HAL_REG_READ(soc,
303 				      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
304 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
305 		break;
306 	default:
307 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
308 			  "Invalid AC: %d\n", ac);
309 	}
310 }
311 
312 qdf_export_symbol(hal_get_ba_aging_timeout);
313 
314 /**
315  * hal_set_ba_aging_timeout - Set BA Aging timeout
316  *
317  * @hal_soc: Opaque HAL SOC handle
318  * @ac: Access category
319  * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice
320  * @value: Input value to set
321  */
322 void hal_set_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
323 			      uint32_t value)
324 {
325 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
326 
327 	switch (ac) {
328 	case WME_AC_BE:
329 		HAL_REG_WRITE(soc,
330 			      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
331 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
332 			      value * 1000);
333 		break;
334 	case WME_AC_BK:
335 		HAL_REG_WRITE(soc,
336 			      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
337 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
338 			      value * 1000);
339 		break;
340 	case WME_AC_VI:
341 		HAL_REG_WRITE(soc,
342 			      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
343 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
344 			      value * 1000);
345 		break;
346 	case WME_AC_VO:
347 		HAL_REG_WRITE(soc,
348 			      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
349 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
350 			      value * 1000);
351 		break;
352 	default:
353 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
354 			  "Invalid AC: %d\n", ac);
355 	}
356 }
357 
358 qdf_export_symbol(hal_set_ba_aging_timeout);
359 
360 #define BLOCK_RES_MASK		0xF
361 static inline uint8_t hal_find_one_bit(uint8_t x)
362 {
363 	uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK;
364 	uint8_t pos;
365 
366 	for (pos = 0; y; y >>= 1)
367 		pos++;
368 
369 	return pos-1;
370 }
371 
372 static inline uint8_t hal_find_zero_bit(uint8_t x)
373 {
374 	uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK;
375 	uint8_t pos;
376 
377 	for (pos = 0; y; y >>= 1)
378 		pos++;
379 
380 	return pos-1;
381 }
382 
383 inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc,
384 				       enum hal_reo_cmd_type type,
385 				       uint32_t paddr_lo,
386 				       uint8_t paddr_hi)
387 {
388 	switch (type) {
389 	case CMD_GET_QUEUE_STATS:
390 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
391 			RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
392 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
393 				    RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
394 		break;
395 	case CMD_FLUSH_QUEUE:
396 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
397 					FLUSH_DESC_ADDR_31_0, paddr_lo);
398 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
399 					FLUSH_DESC_ADDR_39_32, paddr_hi);
400 		break;
401 	case CMD_FLUSH_CACHE:
402 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
403 					FLUSH_ADDR_31_0, paddr_lo);
404 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
405 					FLUSH_ADDR_39_32, paddr_hi);
406 		break;
407 	case CMD_UPDATE_RX_REO_QUEUE:
408 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
409 					RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
410 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
411 					RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
412 		break;
413 	default:
414 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
415 			"%s: Invalid REO command type", __func__);
416 		break;
417 	}
418 }
419 
420 inline int hal_reo_cmd_queue_stats(hal_ring_handle_t  hal_ring_hdl,
421 				   hal_soc_handle_t hal_soc_hdl,
422 				   struct hal_reo_cmd_params *cmd)
423 
424 {
425 	uint32_t *reo_desc, val;
426 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
427 
428 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
429 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
430 	if (!reo_desc) {
431 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
432 			"%s: Out of cmd ring entries", __func__);
433 		hal_srng_access_end(hal_soc, hal_ring_hdl);
434 		return -EBUSY;
435 	}
436 
437 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
438 			     sizeof(struct reo_get_queue_stats));
439 
440 	/* Offsets of descriptor fields defined in HW headers start from
441 	 * the field after TLV header */
442 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
443 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
444 		     sizeof(struct reo_get_queue_stats) -
445 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
446 
447 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
448 		REO_STATUS_REQUIRED, cmd->std.need_status);
449 
450 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_GET_QUEUE_STATS,
451 				   cmd->std.addr_lo,
452 				   cmd->std.addr_hi);
453 
454 	HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
455 			      cmd->u.stats_params.clear);
456 
457 	if (hif_pm_runtime_get(hal_soc->hif_handle,
458 			       RTPM_ID_HAL_REO_CMD) == 0) {
459 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
460 		hif_pm_runtime_put(hal_soc->hif_handle,
461 				   RTPM_ID_HAL_REO_CMD);
462 	} else {
463 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
464 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
465 		hal_srng_inc_flush_cnt(hal_ring_hdl);
466 	}
467 
468 	val = reo_desc[CMD_HEADER_DW_OFFSET];
469 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
470 				     val);
471 }
472 qdf_export_symbol(hal_reo_cmd_queue_stats);
473 
474 inline int hal_reo_cmd_flush_queue(hal_ring_handle_t hal_ring_hdl,
475 				   hal_soc_handle_t hal_soc_hdl,
476 				   struct hal_reo_cmd_params *cmd)
477 {
478 	uint32_t *reo_desc, val;
479 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
480 
481 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
482 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
483 	if (!reo_desc) {
484 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
485 			"%s: Out of cmd ring entries", __func__);
486 		hal_srng_access_end(hal_soc, hal_ring_hdl);
487 		return -EBUSY;
488 	}
489 
490 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
491 			     sizeof(struct reo_flush_queue));
492 
493 	/* Offsets of descriptor fields defined in HW headers start from
494 	 * the field after TLV header */
495 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
496 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
497 		     sizeof(struct reo_flush_queue) -
498 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
499 
500 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
501 		REO_STATUS_REQUIRED, cmd->std.need_status);
502 
503 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_QUEUE, cmd->std.addr_lo,
504 		cmd->std.addr_hi);
505 
506 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
507 		BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
508 		cmd->u.fl_queue_params.block_use_after_flush);
509 
510 	if (cmd->u.fl_queue_params.block_use_after_flush) {
511 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
512 			BLOCK_RESOURCE_INDEX, cmd->u.fl_queue_params.index);
513 	}
514 
515 	hal_srng_access_end(hal_soc, hal_ring_hdl);
516 	val = reo_desc[CMD_HEADER_DW_OFFSET];
517 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
518 				     val);
519 }
520 qdf_export_symbol(hal_reo_cmd_flush_queue);
521 
522 inline int hal_reo_cmd_flush_cache(hal_ring_handle_t hal_ring_hdl,
523 				   hal_soc_handle_t hal_soc_hdl,
524 				   struct hal_reo_cmd_params *cmd)
525 {
526 	uint32_t *reo_desc, val;
527 	struct hal_reo_cmd_flush_cache_params *cp;
528 	uint8_t index = 0;
529 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
530 
531 	cp = &cmd->u.fl_cache_params;
532 
533 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
534 
535 	/* We need a cache block resource for this operation, and REO HW has
536 	 * only 4 such blocking resources. These resources are managed using
537 	 * reo_res_bitmap, and we return failure if none is available.
538 	 */
539 	if (cp->block_use_after_flush) {
540 		index = hal_find_zero_bit(hal_soc->reo_res_bitmap);
541 		if (index > 3) {
542 			qdf_print("No blocking resource available!");
543 			hal_srng_access_end(hal_soc, hal_ring_hdl);
544 			return -EBUSY;
545 		}
546 		hal_soc->index = index;
547 	}
548 
549 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
550 	if (!reo_desc) {
551 		hal_srng_access_end(hal_soc, hal_ring_hdl);
552 		hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl));
553 		return -EBUSY;
554 	}
555 
556 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
557 			     sizeof(struct reo_flush_cache));
558 
559 	/* Offsets of descriptor fields defined in HW headers start from
560 	 * the field after TLV header */
561 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
562 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
563 		     sizeof(struct reo_flush_cache) -
564 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
565 
566 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
567 		REO_STATUS_REQUIRED, cmd->std.need_status);
568 
569 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_CACHE, cmd->std.addr_lo,
570 				   cmd->std.addr_hi);
571 
572 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
573 		FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
574 
575 	/* set it to 0 for now */
576 	cp->rel_block_index = 0;
577 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
578 		RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
579 
580 	if (cp->block_use_after_flush) {
581 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
582 			CACHE_BLOCK_RESOURCE_INDEX, index);
583 	}
584 
585 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
586 		FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
587 
588 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
589 		BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush);
590 
591 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
592 		cp->flush_all);
593 
594 	if (hif_pm_runtime_get(hal_soc->hif_handle,
595 			       RTPM_ID_HAL_REO_CMD) == 0) {
596 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
597 		hif_pm_runtime_put(hal_soc->hif_handle,
598 				   RTPM_ID_HAL_REO_CMD);
599 	} else {
600 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
601 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
602 		hal_srng_inc_flush_cnt(hal_ring_hdl);
603 	}
604 
605 	val = reo_desc[CMD_HEADER_DW_OFFSET];
606 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
607 				     val);
608 }
609 qdf_export_symbol(hal_reo_cmd_flush_cache);
610 
611 inline int hal_reo_cmd_unblock_cache(hal_ring_handle_t hal_ring_hdl,
612 				     hal_soc_handle_t hal_soc_hdl,
613 				     struct hal_reo_cmd_params *cmd)
614 
615 {
616 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
617 	uint32_t *reo_desc, val;
618 	uint8_t index = 0;
619 
620 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
621 
622 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
623 		index = hal_find_one_bit(hal_soc->reo_res_bitmap);
624 		if (index > 3) {
625 			hal_srng_access_end(hal_soc, hal_ring_hdl);
626 			qdf_print("No blocking resource to unblock!");
627 			return -EBUSY;
628 		}
629 	}
630 
631 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
632 	if (!reo_desc) {
633 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
634 			"%s: Out of cmd ring entries", __func__);
635 		hal_srng_access_end(hal_soc, hal_ring_hdl);
636 		return -EBUSY;
637 	}
638 
639 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
640 			     sizeof(struct reo_unblock_cache));
641 
642 	/* Offsets of descriptor fields defined in HW headers start from
643 	 * the field after TLV header */
644 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
645 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
646 		     sizeof(struct reo_unblock_cache) -
647 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
648 
649 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
650 		REO_STATUS_REQUIRED, cmd->std.need_status);
651 
652 	HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
653 		UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
654 
655 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
656 		HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
657 			CACHE_BLOCK_RESOURCE_INDEX,
658 			cmd->u.unblk_cache_params.index);
659 	}
660 
661 	hal_srng_access_end(hal_soc, hal_ring_hdl);
662 	val = reo_desc[CMD_HEADER_DW_OFFSET];
663 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
664 				     val);
665 }
666 qdf_export_symbol(hal_reo_cmd_unblock_cache);
667 
668 inline int hal_reo_cmd_flush_timeout_list(hal_ring_handle_t hal_ring_hdl,
669 					  hal_soc_handle_t hal_soc_hdl,
670 					  struct hal_reo_cmd_params *cmd)
671 {
672 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
673 	uint32_t *reo_desc, val;
674 
675 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
676 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
677 	if (!reo_desc) {
678 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
679 			"%s: Out of cmd ring entries", __func__);
680 		hal_srng_access_end(hal_soc, hal_ring_hdl);
681 		return -EBUSY;
682 	}
683 
684 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
685 			     sizeof(struct reo_flush_timeout_list));
686 
687 	/* Offsets of descriptor fields defined in HW headers start from
688 	 * the field after TLV header */
689 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
690 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
691 		     sizeof(struct reo_flush_timeout_list) -
692 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
693 
694 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
695 		REO_STATUS_REQUIRED, cmd->std.need_status);
696 
697 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
698 		cmd->u.fl_tim_list_params.ac_list);
699 
700 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
701 		MINIMUM_RELEASE_DESC_COUNT,
702 		cmd->u.fl_tim_list_params.min_rel_desc);
703 
704 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
705 		MINIMUM_FORWARD_BUF_COUNT,
706 		cmd->u.fl_tim_list_params.min_fwd_buf);
707 
708 	hal_srng_access_end(hal_soc, hal_ring_hdl);
709 	val = reo_desc[CMD_HEADER_DW_OFFSET];
710 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
711 				     val);
712 }
713 qdf_export_symbol(hal_reo_cmd_flush_timeout_list);
714 
715 inline int hal_reo_cmd_update_rx_queue(hal_ring_handle_t hal_ring_hdl,
716 				       hal_soc_handle_t hal_soc_hdl,
717 				       struct hal_reo_cmd_params *cmd)
718 {
719 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
720 	uint32_t *reo_desc, val;
721 	struct hal_reo_cmd_update_queue_params *p;
722 
723 	p = &cmd->u.upd_queue_params;
724 
725 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
726 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
727 	if (!reo_desc) {
728 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
729 			"%s: Out of cmd ring entries", __func__);
730 		hal_srng_access_end(hal_soc, hal_ring_hdl);
731 		return -EBUSY;
732 	}
733 
734 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
735 			     sizeof(struct reo_update_rx_reo_queue));
736 
737 	/* Offsets of descriptor fields defined in HW headers start from
738 	 * the field after TLV header */
739 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
740 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
741 		     sizeof(struct reo_update_rx_reo_queue) -
742 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
743 
744 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
745 		REO_STATUS_REQUIRED, cmd->std.need_status);
746 
747 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
748 		cmd->std.addr_lo, cmd->std.addr_hi);
749 
750 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
751 		UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
752 
753 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
754 			      p->update_vld);
755 
756 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
757 		UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
758 		p->update_assoc_link_desc);
759 
760 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
761 		UPDATE_DISABLE_DUPLICATE_DETECTION,
762 		p->update_disable_dup_detect);
763 
764 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
765 		UPDATE_DISABLE_DUPLICATE_DETECTION,
766 		p->update_disable_dup_detect);
767 
768 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
769 		UPDATE_SOFT_REORDER_ENABLE,
770 		p->update_soft_reorder_enab);
771 
772 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
773 		UPDATE_AC, p->update_ac);
774 
775 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
776 		UPDATE_BAR, p->update_bar);
777 
778 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
779 		UPDATE_BAR, p->update_bar);
780 
781 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
782 		UPDATE_RTY, p->update_rty);
783 
784 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
785 		UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
786 
787 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
788 		UPDATE_OOR_MODE, p->update_oor_mode);
789 
790 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
791 		UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
792 
793 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
794 		UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
795 
796 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
797 		UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
798 
799 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
800 		UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
801 
802 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
803 		UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
804 
805 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
806 		UPDATE_PN_SIZE, p->update_pn_size);
807 
808 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
809 		UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
810 
811 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
812 		UPDATE_SVLD, p->update_svld);
813 
814 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
815 		UPDATE_SSN, p->update_ssn);
816 
817 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
818 		UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
819 		p->update_seq_2k_err_detect);
820 
821 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
822 		UPDATE_PN_VALID, p->update_pn_valid);
823 
824 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
825 		UPDATE_PN, p->update_pn);
826 
827 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
828 		RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
829 
830 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
831 		VLD, p->vld);
832 
833 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
834 		ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
835 		p->assoc_link_desc);
836 
837 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
838 		DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
839 
840 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
841 		SOFT_REORDER_ENABLE, p->soft_reorder_enab);
842 
843 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
844 
845 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
846 		BAR, p->bar);
847 
848 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
849 		CHK_2K_MODE, p->chk_2k_mode);
850 
851 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
852 		RTY, p->rty);
853 
854 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
855 		OOR_MODE, p->oor_mode);
856 
857 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
858 		PN_CHECK_NEEDED, p->pn_check_needed);
859 
860 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
861 		PN_SHALL_BE_EVEN, p->pn_even);
862 
863 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
864 		PN_SHALL_BE_UNEVEN, p->pn_uneven);
865 
866 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
867 		PN_HANDLING_ENABLE, p->pn_hand_enab);
868 
869 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
870 		IGNORE_AMPDU_FLAG, p->ignore_ampdu);
871 
872 	if (p->ba_window_size < 1)
873 		p->ba_window_size = 1;
874 	/*
875 	 * WAR to get 2k exception in Non BA case.
876 	 * Setting window size to 2 to get 2k jump exception
877 	 * when we receive aggregates in Non BA case
878 	 */
879 	if (p->ba_window_size == 1)
880 		p->ba_window_size++;
881 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
882 		BA_WINDOW_SIZE, p->ba_window_size - 1);
883 
884 	if (p->pn_size == 24)
885 		p->pn_size = PN_SIZE_24;
886 	else if (p->pn_size == 48)
887 		p->pn_size = PN_SIZE_48;
888 	else if (p->pn_size == 128)
889 		p->pn_size = PN_SIZE_128;
890 
891 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
892 		PN_SIZE, p->pn_size);
893 
894 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
895 		SVLD, p->svld);
896 
897 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
898 		SSN, p->ssn);
899 
900 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
901 		SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
902 
903 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
904 		PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
905 
906 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
907 		PN_31_0, p->pn_31_0);
908 
909 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
910 		PN_63_32, p->pn_63_32);
911 
912 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
913 		PN_95_64, p->pn_95_64);
914 
915 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
916 		PN_127_96, p->pn_127_96);
917 
918 	if (hif_pm_runtime_get(hal_soc->hif_handle,
919 			       RTPM_ID_HAL_REO_CMD) == 0) {
920 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
921 		hif_pm_runtime_put(hal_soc->hif_handle,
922 				   RTPM_ID_HAL_REO_CMD);
923 	} else {
924 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
925 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
926 		hal_srng_inc_flush_cnt(hal_ring_hdl);
927 	}
928 
929 	val = reo_desc[CMD_HEADER_DW_OFFSET];
930 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
931 				     val);
932 }
933 qdf_export_symbol(hal_reo_cmd_update_rx_queue);
934 
935 inline void
936 hal_reo_queue_stats_status(uint32_t *reo_desc,
937 			   struct hal_reo_queue_status *st,
938 			   hal_soc_handle_t hal_soc_hdl)
939 {
940 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
941 	uint32_t val;
942 
943 	/* Offsets of descriptor fields defined in HW headers start
944 	 * from the field after TLV header */
945 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
946 
947 	/* header */
948 	hal_reo_status_get_header(reo_desc, HAL_REO_QUEUE_STATS_STATUS_TLV,
949 					&(st->header), hal_soc);
950 
951 	/* SSN */
952 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
953 	st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
954 
955 	/* current index */
956 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
957 					 CURRENT_INDEX)];
958 	st->curr_idx =
959 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
960 			      CURRENT_INDEX, val);
961 
962 	/* PN bits */
963 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
964 					 PN_31_0)];
965 	st->pn_31_0 =
966 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
967 			      PN_31_0, val);
968 
969 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
970 					 PN_63_32)];
971 	st->pn_63_32 =
972 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
973 			      PN_63_32, val);
974 
975 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
976 					 PN_95_64)];
977 	st->pn_95_64 =
978 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
979 			      PN_95_64, val);
980 
981 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
982 					 PN_127_96)];
983 	st->pn_127_96 =
984 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
985 			      PN_127_96, val);
986 
987 	/* timestamps */
988 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
989 					 LAST_RX_ENQUEUE_TIMESTAMP)];
990 	st->last_rx_enq_tstamp =
991 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
992 			      LAST_RX_ENQUEUE_TIMESTAMP, val);
993 
994 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
995 					 LAST_RX_DEQUEUE_TIMESTAMP)];
996 	st->last_rx_deq_tstamp =
997 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
998 			      LAST_RX_DEQUEUE_TIMESTAMP, val);
999 
1000 	/* rx bitmap */
1001 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
1002 					 RX_BITMAP_31_0)];
1003 	st->rx_bitmap_31_0 =
1004 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
1005 			      RX_BITMAP_31_0, val);
1006 
1007 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
1008 					 RX_BITMAP_63_32)];
1009 	st->rx_bitmap_63_32 =
1010 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
1011 			      RX_BITMAP_63_32, val);
1012 
1013 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
1014 					 RX_BITMAP_95_64)];
1015 	st->rx_bitmap_95_64 =
1016 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
1017 			      RX_BITMAP_95_64, val);
1018 
1019 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
1020 					 RX_BITMAP_127_96)];
1021 	st->rx_bitmap_127_96 =
1022 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
1023 			      RX_BITMAP_127_96, val);
1024 
1025 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
1026 					 RX_BITMAP_159_128)];
1027 	st->rx_bitmap_159_128 =
1028 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
1029 			      RX_BITMAP_159_128, val);
1030 
1031 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
1032 					 RX_BITMAP_191_160)];
1033 	st->rx_bitmap_191_160 =
1034 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
1035 			      RX_BITMAP_191_160, val);
1036 
1037 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
1038 					 RX_BITMAP_223_192)];
1039 	st->rx_bitmap_223_192 =
1040 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
1041 			      RX_BITMAP_223_192, val);
1042 
1043 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
1044 					 RX_BITMAP_255_224)];
1045 	st->rx_bitmap_255_224 =
1046 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
1047 			      RX_BITMAP_255_224, val);
1048 
1049 	/* various counts */
1050 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
1051 					 CURRENT_MPDU_COUNT)];
1052 	st->curr_mpdu_cnt =
1053 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
1054 			      CURRENT_MPDU_COUNT, val);
1055 
1056 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
1057 					 CURRENT_MSDU_COUNT)];
1058 	st->curr_msdu_cnt =
1059 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
1060 			      CURRENT_MSDU_COUNT, val);
1061 
1062 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1063 					 TIMEOUT_COUNT)];
1064 	st->fwd_timeout_cnt =
1065 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1066 			      TIMEOUT_COUNT, val);
1067 
1068 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1069 					 FORWARD_DUE_TO_BAR_COUNT)];
1070 	st->fwd_bar_cnt =
1071 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1072 			      FORWARD_DUE_TO_BAR_COUNT, val);
1073 
1074 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1075 					 DUPLICATE_COUNT)];
1076 	st->dup_cnt =
1077 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1078 			      DUPLICATE_COUNT, val);
1079 
1080 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1081 					 FRAMES_IN_ORDER_COUNT)];
1082 	st->frms_in_order_cnt =
1083 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1084 			      FRAMES_IN_ORDER_COUNT, val);
1085 
1086 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1087 					 BAR_RECEIVED_COUNT)];
1088 	st->bar_rcvd_cnt =
1089 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1090 			      BAR_RECEIVED_COUNT, val);
1091 
1092 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
1093 					 MPDU_FRAMES_PROCESSED_COUNT)];
1094 	st->mpdu_frms_cnt =
1095 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
1096 			      MPDU_FRAMES_PROCESSED_COUNT, val);
1097 
1098 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
1099 					 MSDU_FRAMES_PROCESSED_COUNT)];
1100 	st->msdu_frms_cnt =
1101 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
1102 			      MSDU_FRAMES_PROCESSED_COUNT, val);
1103 
1104 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
1105 					 TOTAL_PROCESSED_BYTE_COUNT)];
1106 	st->total_cnt =
1107 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
1108 			      TOTAL_PROCESSED_BYTE_COUNT, val);
1109 
1110 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1111 					 LATE_RECEIVE_MPDU_COUNT)];
1112 	st->late_recv_mpdu_cnt =
1113 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1114 			      LATE_RECEIVE_MPDU_COUNT, val);
1115 
1116 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1117 					 WINDOW_JUMP_2K)];
1118 	st->win_jump_2k =
1119 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1120 			      WINDOW_JUMP_2K, val);
1121 
1122 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1123 					 HOLE_COUNT)];
1124 	st->hole_cnt =
1125 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1126 			      HOLE_COUNT, val);
1127 }
1128 qdf_export_symbol(hal_reo_queue_stats_status);
1129 
1130 inline void
1131 hal_reo_flush_queue_status(uint32_t *reo_desc,
1132 			   struct hal_reo_flush_queue_status *st,
1133 			   hal_soc_handle_t hal_soc_hdl)
1134 {
1135 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1136 	uint32_t val;
1137 
1138 	/* Offsets of descriptor fields defined in HW headers start
1139 	 * from the field after TLV header */
1140 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1141 
1142 	/* header */
1143 	hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV,
1144 					&(st->header), hal_soc);
1145 
1146 	/* error bit */
1147 	val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
1148 					 ERROR_DETECTED)];
1149 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1150 				  val);
1151 }
1152 qdf_export_symbol(hal_reo_flush_queue_status);
1153 
1154 inline void
1155 hal_reo_flush_cache_status(uint32_t *reo_desc,
1156 			   struct hal_reo_flush_cache_status *st,
1157 			   hal_soc_handle_t hal_soc_hdl)
1158 {
1159 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1160 	uint32_t val;
1161 
1162 	/* Offsets of descriptor fields defined in HW headers start
1163 	 * from the field after TLV header */
1164 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1165 
1166 	/* header */
1167 	hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV,
1168 					&(st->header), hal_soc);
1169 
1170 	/* error bit */
1171 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1172 					 ERROR_DETECTED)];
1173 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1174 				  val);
1175 
1176 	/* block error */
1177 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1178 					 BLOCK_ERROR_DETAILS)];
1179 	st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1180 					BLOCK_ERROR_DETAILS,
1181 					val);
1182 	if (!st->block_error)
1183 		qdf_set_bit(hal_soc->index,
1184 			    (unsigned long *)&hal_soc->reo_res_bitmap);
1185 
1186 	/* cache flush status */
1187 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1188 					 CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
1189 	st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1190 					CACHE_CONTROLLER_FLUSH_STATUS_HIT,
1191 					val);
1192 
1193 	/* cache flush descriptor type */
1194 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1195 				  CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
1196 	st->cache_flush_status_desc_type =
1197 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1198 			      CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
1199 			      val);
1200 
1201 	/* cache flush count */
1202 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1203 				  CACHE_CONTROLLER_FLUSH_COUNT)];
1204 	st->cache_flush_cnt =
1205 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1206 			      CACHE_CONTROLLER_FLUSH_COUNT,
1207 			      val);
1208 
1209 }
1210 qdf_export_symbol(hal_reo_flush_cache_status);
1211 
1212 inline void hal_reo_unblock_cache_status(uint32_t *reo_desc,
1213 					 hal_soc_handle_t hal_soc_hdl,
1214 					 struct hal_reo_unblk_cache_status *st)
1215 {
1216 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1217 	uint32_t val;
1218 
1219 	/* Offsets of descriptor fields defined in HW headers start
1220 	 * from the field after TLV header */
1221 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1222 
1223 	/* header */
1224 	hal_reo_status_get_header(reo_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV,
1225 					&st->header, hal_soc);
1226 
1227 	/* error bit */
1228 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1229 				  ERROR_DETECTED)];
1230 	st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1231 				  ERROR_DETECTED,
1232 				  val);
1233 
1234 	/* unblock type */
1235 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1236 				  UNBLOCK_TYPE)];
1237 	st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1238 					 UNBLOCK_TYPE,
1239 					 val);
1240 
1241 	if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
1242 		qdf_clear_bit(hal_soc->index,
1243 			      (unsigned long *)&hal_soc->reo_res_bitmap);
1244 }
1245 qdf_export_symbol(hal_reo_unblock_cache_status);
1246 
1247 inline void hal_reo_flush_timeout_list_status(
1248 			 uint32_t *reo_desc,
1249 			 struct hal_reo_flush_timeout_list_status *st,
1250 			 hal_soc_handle_t hal_soc_hdl)
1251 
1252 {
1253 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1254 	uint32_t val;
1255 
1256 	/* Offsets of descriptor fields defined in HW headers start
1257 	 * from the field after TLV header */
1258 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1259 
1260 	/* header */
1261 	hal_reo_status_get_header(reo_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV,
1262 					&(st->header), hal_soc);
1263 
1264 	/* error bit */
1265 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1266 					 ERROR_DETECTED)];
1267 	st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1268 				  ERROR_DETECTED,
1269 				  val);
1270 
1271 	/* list empty */
1272 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1273 					 TIMOUT_LIST_EMPTY)];
1274 	st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1275 					TIMOUT_LIST_EMPTY,
1276 					val);
1277 
1278 	/* release descriptor count */
1279 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1280 					 RELEASE_DESC_COUNT)];
1281 	st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1282 				       RELEASE_DESC_COUNT,
1283 				       val);
1284 
1285 	/* forward buf count */
1286 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1287 					 FORWARD_BUF_COUNT)];
1288 	st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1289 				       FORWARD_BUF_COUNT,
1290 				       val);
1291 }
1292 qdf_export_symbol(hal_reo_flush_timeout_list_status);
1293 
1294 inline void hal_reo_desc_thres_reached_status(
1295 			 uint32_t *reo_desc,
1296 			 struct hal_reo_desc_thres_reached_status *st,
1297 			 hal_soc_handle_t hal_soc_hdl)
1298 {
1299 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1300 	uint32_t val;
1301 
1302 	/* Offsets of descriptor fields defined in HW headers start
1303 	 * from the field after TLV header */
1304 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1305 
1306 	/* header */
1307 	hal_reo_status_get_header(reo_desc,
1308 			      HAL_REO_DESC_THRES_STATUS_TLV,
1309 			      &(st->header), hal_soc);
1310 
1311 	/* threshold index */
1312 	val = reo_desc[HAL_OFFSET_DW(
1313 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1314 				 THRESHOLD_INDEX)];
1315 	st->thres_index = HAL_GET_FIELD(
1316 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1317 				THRESHOLD_INDEX,
1318 				val);
1319 
1320 	/* link desc counters */
1321 	val = reo_desc[HAL_OFFSET_DW(
1322 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1323 				 LINK_DESCRIPTOR_COUNTER0)];
1324 	st->link_desc_counter0 = HAL_GET_FIELD(
1325 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1326 				LINK_DESCRIPTOR_COUNTER0,
1327 				val);
1328 
1329 	val = reo_desc[HAL_OFFSET_DW(
1330 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1331 				 LINK_DESCRIPTOR_COUNTER1)];
1332 	st->link_desc_counter1 = HAL_GET_FIELD(
1333 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1334 				LINK_DESCRIPTOR_COUNTER1,
1335 				val);
1336 
1337 	val = reo_desc[HAL_OFFSET_DW(
1338 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1339 				 LINK_DESCRIPTOR_COUNTER2)];
1340 	st->link_desc_counter2 = HAL_GET_FIELD(
1341 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1342 				LINK_DESCRIPTOR_COUNTER2,
1343 				val);
1344 
1345 	val = reo_desc[HAL_OFFSET_DW(
1346 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1347 				 LINK_DESCRIPTOR_COUNTER_SUM)];
1348 	st->link_desc_counter_sum = HAL_GET_FIELD(
1349 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1350 				LINK_DESCRIPTOR_COUNTER_SUM,
1351 				val);
1352 }
1353 qdf_export_symbol(hal_reo_desc_thres_reached_status);
1354 
1355 inline void
1356 hal_reo_rx_update_queue_status(uint32_t *reo_desc,
1357 			       struct hal_reo_update_rx_queue_status *st,
1358 			       hal_soc_handle_t hal_soc_hdl)
1359 {
1360 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1361 
1362 	/* Offsets of descriptor fields defined in HW headers start
1363 	 * from the field after TLV header */
1364 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1365 
1366 	/* header */
1367 	hal_reo_status_get_header(reo_desc,
1368 			      HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV,
1369 			      &(st->header), hal_soc);
1370 }
1371 qdf_export_symbol(hal_reo_rx_update_queue_status);
1372 
1373 /**
1374  * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG
1375  * with command number
1376  * @hal_soc: Handle to HAL SoC structure
1377  * @hal_ring: Handle to HAL SRNG structure
1378  *
1379  * Return: none
1380  */
1381 inline void hal_reo_init_cmd_ring(hal_soc_handle_t hal_soc_hdl,
1382 				  hal_ring_handle_t hal_ring_hdl)
1383 {
1384 	int cmd_num;
1385 	uint32_t *desc_addr;
1386 	struct hal_srng_params srng_params;
1387 	uint32_t desc_size;
1388 	uint32_t num_desc;
1389 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
1390 
1391 	hal_get_srng_params(hal_soc_hdl, hal_ring_hdl, &srng_params);
1392 
1393 	desc_addr = (uint32_t *)(srng_params.ring_base_vaddr);
1394 	desc_addr += (sizeof(struct tlv_32_hdr) >> 2);
1395 	desc_size = hal_srng_get_entrysize(soc, REO_CMD) >> 2;
1396 	num_desc = srng_params.num_entries;
1397 	cmd_num = 1;
1398 	while (num_desc) {
1399 		/* Offsets of descriptor fields defined in HW headers start
1400 		 * from the field after TLV header */
1401 		HAL_DESC_SET_FIELD(desc_addr, UNIFORM_REO_CMD_HEADER_0,
1402 			REO_CMD_NUMBER, cmd_num);
1403 		desc_addr += desc_size;
1404 		num_desc--; cmd_num++;
1405 	}
1406 
1407 	soc->reo_res_bitmap = 0;
1408 }
1409 qdf_export_symbol(hal_reo_init_cmd_ring);
1410