xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_reo.c (revision a86b23ee68a2491aede2e03991f3fb37046f4e41)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_api.h"
20 #include "hal_hw_headers.h"
21 #include "hal_reo.h"
22 #include "hal_tx.h"
23 #include "hal_rx.h"
24 #include "qdf_module.h"
25 
26 /* TODO: See if the following definition is available in HW headers */
27 #define HAL_REO_OWNED 4
28 #define HAL_REO_QUEUE_DESC 8
29 #define HAL_REO_QUEUE_EXT_DESC 9
30 
31 /* TODO: Using associated link desc counter 1 for Rx. Check with FW on
32  * how these counters are assigned
33  */
34 #define HAL_RX_LINK_DESC_CNTR 1
35 /* TODO: Following definition should be from HW headers */
36 #define HAL_DESC_REO_OWNED 4
37 
38 /**
39  * hal_uniform_desc_hdr_setup - setup reo_queue_ext descritpro
40  * @owner - owner info
41  * @buffer_type - buffer type
42  */
43 static inline void hal_uniform_desc_hdr_setup(uint32_t *desc, uint32_t owner,
44 	uint32_t buffer_type)
45 {
46 	HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, OWNER,
47 		owner);
48 	HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, BUFFER_TYPE,
49 		buffer_type);
50 }
51 
52 #ifndef TID_TO_WME_AC
53 #define WME_AC_BE 0 /* best effort */
54 #define WME_AC_BK 1 /* background */
55 #define WME_AC_VI 2 /* video */
56 #define WME_AC_VO 3 /* voice */
57 
58 #define TID_TO_WME_AC(_tid) ( \
59 	(((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
60 	(((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
61 	(((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
62 	WME_AC_VO)
63 #endif
64 #define HAL_NON_QOS_TID 16
65 
66 #ifdef HAL_DISABLE_NON_BA_2K_JUMP_ERROR
67 static inline uint32_t hal_update_non_ba_win_size(int tid,
68 						  uint32_t ba_window_size)
69 {
70 	return ba_window_size;
71 }
72 #else
73 static inline uint32_t hal_update_non_ba_win_size(int tid,
74 						  uint32_t ba_window_size)
75 {
76 	if ((ba_window_size == 1) && (tid != HAL_NON_QOS_TID))
77 		ba_window_size++;
78 
79 	return ba_window_size;
80 }
81 #endif
82 
83 /**
84  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
85  *
86  * @hal_soc: Opaque HAL SOC handle
87  * @ba_window_size: BlockAck window size
88  * @start_seq: Starting sequence number
89  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
90  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
91  * @tid: TID
92  *
93  */
94 void hal_reo_qdesc_setup(hal_soc_handle_t hal_soc_hdl, int tid,
95 			 uint32_t ba_window_size,
96 			 uint32_t start_seq, void *hw_qdesc_vaddr,
97 			 qdf_dma_addr_t hw_qdesc_paddr,
98 			 int pn_type)
99 {
100 	uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
101 	uint32_t *reo_queue_ext_desc;
102 	uint32_t reg_val;
103 	uint32_t pn_enable;
104 	uint32_t pn_size = 0;
105 
106 	qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
107 
108 	hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
109 		HAL_REO_QUEUE_DESC);
110 	/* Fixed pattern in reserved bits for debugging */
111 	HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER_0,
112 		RESERVED_0A, 0xDDBEEF);
113 
114 	/* This a just a SW meta data and will be copied to REO destination
115 	 * descriptors indicated by hardware.
116 	 * TODO: Setting TID in this field. See if we should set something else.
117 	 */
118 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_1,
119 		RECEIVE_QUEUE_NUMBER, tid);
120 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
121 		VLD, 1);
122 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
123 		ASSOCIATED_LINK_DESCRIPTOR_COUNTER, HAL_RX_LINK_DESC_CNTR);
124 
125 	/*
126 	 * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
127 	 */
128 
129 	reg_val = TID_TO_WME_AC(tid);
130 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, AC, reg_val);
131 
132 	if (ba_window_size < 1)
133 		ba_window_size = 1;
134 
135 	/* WAR to get 2k exception in Non BA case.
136 	 * Setting window size to 2 to get 2k jump exception
137 	 * when we receive aggregates in Non BA case
138 	 */
139 	ba_window_size = hal_update_non_ba_win_size(tid, ba_window_size);
140 
141 	/* Set RTY bit for non-BA case. Duplicate detection is currently not
142 	 * done by HW in non-BA case if RTY bit is not set.
143 	 * TODO: This is a temporary War and should be removed once HW fix is
144 	 * made to check and discard duplicates even if RTY bit is not set.
145 	 */
146 	if (ba_window_size == 1)
147 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, RTY, 1);
148 
149 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, BA_WINDOW_SIZE,
150 		ba_window_size - 1);
151 
152 	switch (pn_type) {
153 	case HAL_PN_WPA:
154 		pn_enable = 1;
155 		pn_size = PN_SIZE_48;
156 		break;
157 	case HAL_PN_WAPI_EVEN:
158 	case HAL_PN_WAPI_UNEVEN:
159 		pn_enable = 1;
160 		pn_size = PN_SIZE_128;
161 		break;
162 	default:
163 		pn_enable = 0;
164 		break;
165 	}
166 
167 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_CHECK_NEEDED,
168 		pn_enable);
169 
170 	if (pn_type == HAL_PN_WAPI_EVEN)
171 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
172 			PN_SHALL_BE_EVEN, 1);
173 	else if (pn_type == HAL_PN_WAPI_UNEVEN)
174 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
175 			PN_SHALL_BE_UNEVEN, 1);
176 
177 	/*
178 	 *  TODO: Need to check if PN handling in SW needs to be enabled
179 	 *  So far this is not a requirement
180 	 */
181 
182 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_SIZE,
183 		pn_size);
184 
185 	/* TODO: Check if RX_REO_QUEUE_2_IGNORE_AMPDU_FLAG need to be set
186 	 * based on BA window size and/or AMPDU capabilities
187 	 */
188 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
189 		IGNORE_AMPDU_FLAG, 1);
190 
191 	if (start_seq <= 0xfff)
192 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SSN,
193 			start_seq);
194 
195 	/* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA,
196 	 * but REO is not delivering packets if we set it to 1. Need to enable
197 	 * this once the issue is resolved
198 	 */
199 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SVLD, 0);
200 
201 	/* TODO: Check if we should set start PN for WAPI */
202 
203 #ifdef notyet
204 	/* Setup first queue extension if BA window size is more than 1 */
205 	if (ba_window_size > 1) {
206 		reo_queue_ext_desc =
207 			(uint32_t *)(((struct rx_reo_queue *)reo_queue_desc) +
208 			1);
209 		qdf_mem_zero(reo_queue_ext_desc,
210 			sizeof(struct rx_reo_queue_ext));
211 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
212 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
213 	}
214 	/* Setup second queue extension if BA window size is more than 105 */
215 	if (ba_window_size > 105) {
216 		reo_queue_ext_desc = (uint32_t *)
217 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
218 		qdf_mem_zero(reo_queue_ext_desc,
219 			sizeof(struct rx_reo_queue_ext));
220 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
221 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
222 	}
223 	/* Setup third queue extension if BA window size is more than 210 */
224 	if (ba_window_size > 210) {
225 		reo_queue_ext_desc = (uint32_t *)
226 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
227 		qdf_mem_zero(reo_queue_ext_desc,
228 			sizeof(struct rx_reo_queue_ext));
229 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
230 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
231 	}
232 #else
233 	/* TODO: HW queue descriptors are currently allocated for max BA
234 	 * window size for all QOS TIDs so that same descriptor can be used
235 	 * later when ADDBA request is recevied. This should be changed to
236 	 * allocate HW queue descriptors based on BA window size being
237 	 * negotiated (0 for non BA cases), and reallocate when BA window
238 	 * size changes and also send WMI message to FW to change the REO
239 	 * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
240 	 */
241 	if (tid != HAL_NON_QOS_TID) {
242 		reo_queue_ext_desc = (uint32_t *)
243 			(((struct rx_reo_queue *)reo_queue_desc) + 1);
244 		qdf_mem_zero(reo_queue_ext_desc, 3 *
245 			sizeof(struct rx_reo_queue_ext));
246 		/* Initialize first reo queue extension descriptor */
247 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
248 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
249 		/* Fixed pattern in reserved bits for debugging */
250 		HAL_DESC_SET_FIELD(reo_queue_ext_desc,
251 			UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xADBEEF);
252 		/* Initialize second reo queue extension descriptor */
253 		reo_queue_ext_desc = (uint32_t *)
254 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
255 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
256 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
257 		/* Fixed pattern in reserved bits for debugging */
258 		HAL_DESC_SET_FIELD(reo_queue_ext_desc,
259 			UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xBDBEEF);
260 		/* Initialize third reo queue extension descriptor */
261 		reo_queue_ext_desc = (uint32_t *)
262 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
263 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
264 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
265 		/* Fixed pattern in reserved bits for debugging */
266 		HAL_DESC_SET_FIELD(reo_queue_ext_desc,
267 			UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xCDBEEF);
268 	}
269 #endif
270 }
271 qdf_export_symbol(hal_reo_qdesc_setup);
272 
273 /**
274  * hal_get_ba_aging_timeout - Get BA Aging timeout
275  *
276  * @hal_soc: Opaque HAL SOC handle
277  * @ac: Access category
278  * @value: window size to get
279  */
280 void hal_get_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
281 			      uint32_t *value)
282 {
283 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
284 
285 	switch (ac) {
286 	case WME_AC_BE:
287 		*value = HAL_REG_READ(soc,
288 				      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
289 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
290 		break;
291 	case WME_AC_BK:
292 		*value = HAL_REG_READ(soc,
293 				      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
294 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
295 		break;
296 	case WME_AC_VI:
297 		*value = HAL_REG_READ(soc,
298 				      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
299 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
300 		break;
301 	case WME_AC_VO:
302 		*value = HAL_REG_READ(soc,
303 				      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
304 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
305 		break;
306 	default:
307 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
308 			  "Invalid AC: %d\n", ac);
309 	}
310 }
311 
312 qdf_export_symbol(hal_get_ba_aging_timeout);
313 
314 /**
315  * hal_set_ba_aging_timeout - Set BA Aging timeout
316  *
317  * @hal_soc: Opaque HAL SOC handle
318  * @ac: Access category
319  * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice
320  * @value: Input value to set
321  */
322 void hal_set_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
323 			      uint32_t value)
324 {
325 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
326 
327 	switch (ac) {
328 	case WME_AC_BE:
329 		HAL_REG_WRITE(soc,
330 			      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
331 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
332 			      value * 1000);
333 		break;
334 	case WME_AC_BK:
335 		HAL_REG_WRITE(soc,
336 			      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
337 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
338 			      value * 1000);
339 		break;
340 	case WME_AC_VI:
341 		HAL_REG_WRITE(soc,
342 			      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
343 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
344 			      value * 1000);
345 		break;
346 	case WME_AC_VO:
347 		HAL_REG_WRITE(soc,
348 			      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
349 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
350 			      value * 1000);
351 		break;
352 	default:
353 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
354 			  "Invalid AC: %d\n", ac);
355 	}
356 }
357 
358 qdf_export_symbol(hal_set_ba_aging_timeout);
359 
360 #define BLOCK_RES_MASK		0xF
361 static inline uint8_t hal_find_one_bit(uint8_t x)
362 {
363 	uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK;
364 	uint8_t pos;
365 
366 	for (pos = 0; y; y >>= 1)
367 		pos++;
368 
369 	return pos-1;
370 }
371 
372 static inline uint8_t hal_find_zero_bit(uint8_t x)
373 {
374 	uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK;
375 	uint8_t pos;
376 
377 	for (pos = 0; y; y >>= 1)
378 		pos++;
379 
380 	return pos-1;
381 }
382 
383 inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc,
384 				       enum hal_reo_cmd_type type,
385 				       uint32_t paddr_lo,
386 				       uint8_t paddr_hi)
387 {
388 	switch (type) {
389 	case CMD_GET_QUEUE_STATS:
390 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
391 			RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
392 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
393 				    RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
394 		break;
395 	case CMD_FLUSH_QUEUE:
396 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
397 					FLUSH_DESC_ADDR_31_0, paddr_lo);
398 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
399 					FLUSH_DESC_ADDR_39_32, paddr_hi);
400 		break;
401 	case CMD_FLUSH_CACHE:
402 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
403 					FLUSH_ADDR_31_0, paddr_lo);
404 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
405 					FLUSH_ADDR_39_32, paddr_hi);
406 		break;
407 	case CMD_UPDATE_RX_REO_QUEUE:
408 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
409 					RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
410 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
411 					RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
412 		break;
413 	default:
414 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
415 			"%s: Invalid REO command type", __func__);
416 		break;
417 	}
418 }
419 
420 inline int hal_reo_cmd_queue_stats(hal_ring_handle_t  hal_ring_hdl,
421 				   hal_soc_handle_t hal_soc_hdl,
422 				   struct hal_reo_cmd_params *cmd)
423 
424 {
425 	uint32_t *reo_desc, val;
426 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
427 
428 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
429 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
430 	if (!reo_desc) {
431 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
432 			"%s: Out of cmd ring entries", __func__);
433 		hal_srng_access_end(hal_soc, hal_ring_hdl);
434 		return -EBUSY;
435 	}
436 
437 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
438 			     sizeof(struct reo_get_queue_stats));
439 
440 	/* Offsets of descriptor fields defined in HW headers start from
441 	 * the field after TLV header */
442 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
443 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
444 		     sizeof(struct reo_get_queue_stats) -
445 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
446 
447 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
448 		REO_STATUS_REQUIRED, cmd->std.need_status);
449 
450 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_GET_QUEUE_STATS,
451 				   cmd->std.addr_lo,
452 				   cmd->std.addr_hi);
453 
454 	HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
455 			      cmd->u.stats_params.clear);
456 
457 	if (hif_pm_runtime_get(hal_soc->hif_handle,
458 			       RTPM_ID_HAL_REO_CMD) == 0) {
459 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
460 		hif_pm_runtime_put(hal_soc->hif_handle,
461 				   RTPM_ID_HAL_REO_CMD);
462 	} else {
463 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
464 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
465 		hal_srng_inc_flush_cnt(hal_ring_hdl);
466 	}
467 
468 	val = reo_desc[CMD_HEADER_DW_OFFSET];
469 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
470 				     val);
471 }
472 qdf_export_symbol(hal_reo_cmd_queue_stats);
473 
474 inline int hal_reo_cmd_flush_queue(hal_ring_handle_t hal_ring_hdl,
475 				   hal_soc_handle_t hal_soc_hdl,
476 				   struct hal_reo_cmd_params *cmd)
477 {
478 	uint32_t *reo_desc, val;
479 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
480 
481 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
482 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
483 	if (!reo_desc) {
484 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
485 			"%s: Out of cmd ring entries", __func__);
486 		hal_srng_access_end(hal_soc, hal_ring_hdl);
487 		return -EBUSY;
488 	}
489 
490 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
491 			     sizeof(struct reo_flush_queue));
492 
493 	/* Offsets of descriptor fields defined in HW headers start from
494 	 * the field after TLV header */
495 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
496 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
497 		     sizeof(struct reo_flush_queue) -
498 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
499 
500 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
501 		REO_STATUS_REQUIRED, cmd->std.need_status);
502 
503 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_QUEUE, cmd->std.addr_lo,
504 		cmd->std.addr_hi);
505 
506 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
507 		BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
508 		cmd->u.fl_queue_params.block_use_after_flush);
509 
510 	if (cmd->u.fl_queue_params.block_use_after_flush) {
511 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
512 			BLOCK_RESOURCE_INDEX, cmd->u.fl_queue_params.index);
513 	}
514 
515 	hal_srng_access_end(hal_soc, hal_ring_hdl);
516 	val = reo_desc[CMD_HEADER_DW_OFFSET];
517 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
518 				     val);
519 }
520 qdf_export_symbol(hal_reo_cmd_flush_queue);
521 
522 inline int hal_reo_cmd_flush_cache(hal_ring_handle_t hal_ring_hdl,
523 				   hal_soc_handle_t hal_soc_hdl,
524 				   struct hal_reo_cmd_params *cmd)
525 {
526 	uint32_t *reo_desc, val;
527 	struct hal_reo_cmd_flush_cache_params *cp;
528 	uint8_t index = 0;
529 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
530 
531 	cp = &cmd->u.fl_cache_params;
532 
533 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
534 
535 	/* We need a cache block resource for this operation, and REO HW has
536 	 * only 4 such blocking resources. These resources are managed using
537 	 * reo_res_bitmap, and we return failure if none is available.
538 	 */
539 	if (cp->block_use_after_flush) {
540 		index = hal_find_zero_bit(hal_soc->reo_res_bitmap);
541 		if (index > 3) {
542 			qdf_print("%s, No blocking resource available!",
543 				  __func__);
544 			hal_srng_access_end(hal_soc, hal_ring_hdl);
545 			return -EBUSY;
546 		}
547 		hal_soc->index = index;
548 	}
549 
550 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
551 	if (!reo_desc) {
552 		hal_srng_access_end(hal_soc, hal_ring_hdl);
553 		hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl));
554 		return -EBUSY;
555 	}
556 
557 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
558 			     sizeof(struct reo_flush_cache));
559 
560 	/* Offsets of descriptor fields defined in HW headers start from
561 	 * the field after TLV header */
562 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
563 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
564 		     sizeof(struct reo_flush_cache) -
565 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
566 
567 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
568 		REO_STATUS_REQUIRED, cmd->std.need_status);
569 
570 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_CACHE, cmd->std.addr_lo,
571 				   cmd->std.addr_hi);
572 
573 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
574 		FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
575 
576 	/* set it to 0 for now */
577 	cp->rel_block_index = 0;
578 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
579 		RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
580 
581 	if (cp->block_use_after_flush) {
582 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
583 			CACHE_BLOCK_RESOURCE_INDEX, index);
584 	}
585 
586 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
587 		FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
588 
589 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
590 		BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush);
591 
592 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
593 		cp->flush_all);
594 
595 	if (hif_pm_runtime_get(hal_soc->hif_handle,
596 			       RTPM_ID_HAL_REO_CMD) == 0) {
597 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
598 		hif_pm_runtime_put(hal_soc->hif_handle,
599 				   RTPM_ID_HAL_REO_CMD);
600 	} else {
601 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
602 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
603 		hal_srng_inc_flush_cnt(hal_ring_hdl);
604 	}
605 
606 	val = reo_desc[CMD_HEADER_DW_OFFSET];
607 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
608 				     val);
609 }
610 qdf_export_symbol(hal_reo_cmd_flush_cache);
611 
612 inline int hal_reo_cmd_unblock_cache(hal_ring_handle_t hal_ring_hdl,
613 				     hal_soc_handle_t hal_soc_hdl,
614 				     struct hal_reo_cmd_params *cmd)
615 
616 {
617 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
618 	uint32_t *reo_desc, val;
619 	uint8_t index = 0;
620 
621 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
622 
623 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
624 		index = hal_find_one_bit(hal_soc->reo_res_bitmap);
625 		if (index > 3) {
626 			hal_srng_access_end(hal_soc, hal_ring_hdl);
627 			qdf_print("%s: No blocking resource to unblock!",
628 				  __func__);
629 			return -EBUSY;
630 		}
631 	}
632 
633 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
634 	if (!reo_desc) {
635 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
636 			"%s: Out of cmd ring entries", __func__);
637 		hal_srng_access_end(hal_soc, hal_ring_hdl);
638 		return -EBUSY;
639 	}
640 
641 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
642 			     sizeof(struct reo_unblock_cache));
643 
644 	/* Offsets of descriptor fields defined in HW headers start from
645 	 * the field after TLV header */
646 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
647 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
648 		     sizeof(struct reo_unblock_cache) -
649 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
650 
651 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
652 		REO_STATUS_REQUIRED, cmd->std.need_status);
653 
654 	HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
655 		UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
656 
657 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
658 		HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
659 			CACHE_BLOCK_RESOURCE_INDEX,
660 			cmd->u.unblk_cache_params.index);
661 	}
662 
663 	hal_srng_access_end(hal_soc, hal_ring_hdl);
664 	val = reo_desc[CMD_HEADER_DW_OFFSET];
665 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
666 				     val);
667 }
668 qdf_export_symbol(hal_reo_cmd_unblock_cache);
669 
670 inline int hal_reo_cmd_flush_timeout_list(hal_ring_handle_t hal_ring_hdl,
671 					  hal_soc_handle_t hal_soc_hdl,
672 					  struct hal_reo_cmd_params *cmd)
673 {
674 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
675 	uint32_t *reo_desc, val;
676 
677 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
678 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
679 	if (!reo_desc) {
680 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
681 			"%s: Out of cmd ring entries", __func__);
682 		hal_srng_access_end(hal_soc, hal_ring_hdl);
683 		return -EBUSY;
684 	}
685 
686 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
687 			     sizeof(struct reo_flush_timeout_list));
688 
689 	/* Offsets of descriptor fields defined in HW headers start from
690 	 * the field after TLV header */
691 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
692 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
693 		     sizeof(struct reo_flush_timeout_list) -
694 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
695 
696 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
697 		REO_STATUS_REQUIRED, cmd->std.need_status);
698 
699 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
700 		cmd->u.fl_tim_list_params.ac_list);
701 
702 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
703 		MINIMUM_RELEASE_DESC_COUNT,
704 		cmd->u.fl_tim_list_params.min_rel_desc);
705 
706 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
707 		MINIMUM_FORWARD_BUF_COUNT,
708 		cmd->u.fl_tim_list_params.min_fwd_buf);
709 
710 	hal_srng_access_end(hal_soc, hal_ring_hdl);
711 	val = reo_desc[CMD_HEADER_DW_OFFSET];
712 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
713 				     val);
714 }
715 qdf_export_symbol(hal_reo_cmd_flush_timeout_list);
716 
717 inline int hal_reo_cmd_update_rx_queue(hal_ring_handle_t hal_ring_hdl,
718 				       hal_soc_handle_t hal_soc_hdl,
719 				       struct hal_reo_cmd_params *cmd)
720 {
721 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
722 	uint32_t *reo_desc, val;
723 	struct hal_reo_cmd_update_queue_params *p;
724 
725 	p = &cmd->u.upd_queue_params;
726 
727 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
728 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
729 	if (!reo_desc) {
730 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
731 			"%s: Out of cmd ring entries", __func__);
732 		hal_srng_access_end(hal_soc, hal_ring_hdl);
733 		return -EBUSY;
734 	}
735 
736 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
737 			     sizeof(struct reo_update_rx_reo_queue));
738 
739 	/* Offsets of descriptor fields defined in HW headers start from
740 	 * the field after TLV header */
741 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
742 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
743 		     sizeof(struct reo_update_rx_reo_queue) -
744 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
745 
746 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
747 		REO_STATUS_REQUIRED, cmd->std.need_status);
748 
749 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
750 		cmd->std.addr_lo, cmd->std.addr_hi);
751 
752 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
753 		UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
754 
755 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
756 			      p->update_vld);
757 
758 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
759 		UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
760 		p->update_assoc_link_desc);
761 
762 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
763 		UPDATE_DISABLE_DUPLICATE_DETECTION,
764 		p->update_disable_dup_detect);
765 
766 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
767 		UPDATE_DISABLE_DUPLICATE_DETECTION,
768 		p->update_disable_dup_detect);
769 
770 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
771 		UPDATE_SOFT_REORDER_ENABLE,
772 		p->update_soft_reorder_enab);
773 
774 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
775 		UPDATE_AC, p->update_ac);
776 
777 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
778 		UPDATE_BAR, p->update_bar);
779 
780 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
781 		UPDATE_BAR, p->update_bar);
782 
783 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
784 		UPDATE_RTY, p->update_rty);
785 
786 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
787 		UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
788 
789 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
790 		UPDATE_OOR_MODE, p->update_oor_mode);
791 
792 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
793 		UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
794 
795 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
796 		UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
797 
798 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
799 		UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
800 
801 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
802 		UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
803 
804 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
805 		UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
806 
807 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
808 		UPDATE_PN_SIZE, p->update_pn_size);
809 
810 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
811 		UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
812 
813 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
814 		UPDATE_SVLD, p->update_svld);
815 
816 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
817 		UPDATE_SSN, p->update_ssn);
818 
819 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
820 		UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
821 		p->update_seq_2k_err_detect);
822 
823 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
824 		UPDATE_PN_VALID, p->update_pn_valid);
825 
826 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
827 		UPDATE_PN, p->update_pn);
828 
829 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
830 		RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
831 
832 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
833 		VLD, p->vld);
834 
835 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
836 		ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
837 		p->assoc_link_desc);
838 
839 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
840 		DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
841 
842 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
843 		SOFT_REORDER_ENABLE, p->soft_reorder_enab);
844 
845 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
846 
847 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
848 		BAR, p->bar);
849 
850 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
851 		CHK_2K_MODE, p->chk_2k_mode);
852 
853 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
854 		RTY, p->rty);
855 
856 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
857 		OOR_MODE, p->oor_mode);
858 
859 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
860 		PN_CHECK_NEEDED, p->pn_check_needed);
861 
862 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
863 		PN_SHALL_BE_EVEN, p->pn_even);
864 
865 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
866 		PN_SHALL_BE_UNEVEN, p->pn_uneven);
867 
868 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
869 		PN_HANDLING_ENABLE, p->pn_hand_enab);
870 
871 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
872 		IGNORE_AMPDU_FLAG, p->ignore_ampdu);
873 
874 	if (p->ba_window_size < 1)
875 		p->ba_window_size = 1;
876 	/*
877 	 * WAR to get 2k exception in Non BA case.
878 	 * Setting window size to 2 to get 2k jump exception
879 	 * when we receive aggregates in Non BA case
880 	 */
881 	if (p->ba_window_size == 1)
882 		p->ba_window_size++;
883 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
884 		BA_WINDOW_SIZE, p->ba_window_size - 1);
885 
886 	if (p->pn_size == 24)
887 		p->pn_size = PN_SIZE_24;
888 	else if (p->pn_size == 48)
889 		p->pn_size = PN_SIZE_48;
890 	else if (p->pn_size == 128)
891 		p->pn_size = PN_SIZE_128;
892 
893 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
894 		PN_SIZE, p->pn_size);
895 
896 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
897 		SVLD, p->svld);
898 
899 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
900 		SSN, p->ssn);
901 
902 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
903 		SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
904 
905 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
906 		PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
907 
908 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
909 		PN_31_0, p->pn_31_0);
910 
911 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
912 		PN_63_32, p->pn_63_32);
913 
914 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
915 		PN_95_64, p->pn_95_64);
916 
917 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
918 		PN_127_96, p->pn_127_96);
919 
920 	if (hif_pm_runtime_get(hal_soc->hif_handle,
921 			       RTPM_ID_HAL_REO_CMD) == 0) {
922 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
923 		hif_pm_runtime_put(hal_soc->hif_handle,
924 				   RTPM_ID_HAL_REO_CMD);
925 	} else {
926 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
927 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
928 		hal_srng_inc_flush_cnt(hal_ring_hdl);
929 	}
930 
931 	val = reo_desc[CMD_HEADER_DW_OFFSET];
932 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
933 				     val);
934 }
935 qdf_export_symbol(hal_reo_cmd_update_rx_queue);
936 
937 inline void
938 hal_reo_queue_stats_status(uint32_t *reo_desc,
939 			   struct hal_reo_queue_status *st,
940 			   hal_soc_handle_t hal_soc_hdl)
941 {
942 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
943 	uint32_t val;
944 
945 	/* Offsets of descriptor fields defined in HW headers start
946 	 * from the field after TLV header */
947 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
948 
949 	/* header */
950 	hal_reo_status_get_header(reo_desc, HAL_REO_QUEUE_STATS_STATUS_TLV,
951 					&(st->header), hal_soc);
952 
953 	/* SSN */
954 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
955 	st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
956 
957 	/* current index */
958 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
959 					 CURRENT_INDEX)];
960 	st->curr_idx =
961 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
962 			      CURRENT_INDEX, val);
963 
964 	/* PN bits */
965 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
966 					 PN_31_0)];
967 	st->pn_31_0 =
968 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
969 			      PN_31_0, val);
970 
971 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
972 					 PN_63_32)];
973 	st->pn_63_32 =
974 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
975 			      PN_63_32, val);
976 
977 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
978 					 PN_95_64)];
979 	st->pn_95_64 =
980 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
981 			      PN_95_64, val);
982 
983 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
984 					 PN_127_96)];
985 	st->pn_127_96 =
986 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
987 			      PN_127_96, val);
988 
989 	/* timestamps */
990 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
991 					 LAST_RX_ENQUEUE_TIMESTAMP)];
992 	st->last_rx_enq_tstamp =
993 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
994 			      LAST_RX_ENQUEUE_TIMESTAMP, val);
995 
996 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
997 					 LAST_RX_DEQUEUE_TIMESTAMP)];
998 	st->last_rx_deq_tstamp =
999 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
1000 			      LAST_RX_DEQUEUE_TIMESTAMP, val);
1001 
1002 	/* rx bitmap */
1003 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
1004 					 RX_BITMAP_31_0)];
1005 	st->rx_bitmap_31_0 =
1006 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
1007 			      RX_BITMAP_31_0, val);
1008 
1009 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
1010 					 RX_BITMAP_63_32)];
1011 	st->rx_bitmap_63_32 =
1012 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
1013 			      RX_BITMAP_63_32, val);
1014 
1015 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
1016 					 RX_BITMAP_95_64)];
1017 	st->rx_bitmap_95_64 =
1018 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
1019 			      RX_BITMAP_95_64, val);
1020 
1021 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
1022 					 RX_BITMAP_127_96)];
1023 	st->rx_bitmap_127_96 =
1024 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
1025 			      RX_BITMAP_127_96, val);
1026 
1027 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
1028 					 RX_BITMAP_159_128)];
1029 	st->rx_bitmap_159_128 =
1030 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
1031 			      RX_BITMAP_159_128, val);
1032 
1033 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
1034 					 RX_BITMAP_191_160)];
1035 	st->rx_bitmap_191_160 =
1036 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
1037 			      RX_BITMAP_191_160, val);
1038 
1039 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
1040 					 RX_BITMAP_223_192)];
1041 	st->rx_bitmap_223_192 =
1042 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
1043 			      RX_BITMAP_223_192, val);
1044 
1045 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
1046 					 RX_BITMAP_255_224)];
1047 	st->rx_bitmap_255_224 =
1048 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
1049 			      RX_BITMAP_255_224, val);
1050 
1051 	/* various counts */
1052 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
1053 					 CURRENT_MPDU_COUNT)];
1054 	st->curr_mpdu_cnt =
1055 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
1056 			      CURRENT_MPDU_COUNT, val);
1057 
1058 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
1059 					 CURRENT_MSDU_COUNT)];
1060 	st->curr_msdu_cnt =
1061 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
1062 			      CURRENT_MSDU_COUNT, val);
1063 
1064 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1065 					 TIMEOUT_COUNT)];
1066 	st->fwd_timeout_cnt =
1067 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1068 			      TIMEOUT_COUNT, val);
1069 
1070 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1071 					 FORWARD_DUE_TO_BAR_COUNT)];
1072 	st->fwd_bar_cnt =
1073 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1074 			      FORWARD_DUE_TO_BAR_COUNT, val);
1075 
1076 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1077 					 DUPLICATE_COUNT)];
1078 	st->dup_cnt =
1079 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1080 			      DUPLICATE_COUNT, val);
1081 
1082 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1083 					 FRAMES_IN_ORDER_COUNT)];
1084 	st->frms_in_order_cnt =
1085 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1086 			      FRAMES_IN_ORDER_COUNT, val);
1087 
1088 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1089 					 BAR_RECEIVED_COUNT)];
1090 	st->bar_rcvd_cnt =
1091 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1092 			      BAR_RECEIVED_COUNT, val);
1093 
1094 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
1095 					 MPDU_FRAMES_PROCESSED_COUNT)];
1096 	st->mpdu_frms_cnt =
1097 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
1098 			      MPDU_FRAMES_PROCESSED_COUNT, val);
1099 
1100 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
1101 					 MSDU_FRAMES_PROCESSED_COUNT)];
1102 	st->msdu_frms_cnt =
1103 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
1104 			      MSDU_FRAMES_PROCESSED_COUNT, val);
1105 
1106 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
1107 					 TOTAL_PROCESSED_BYTE_COUNT)];
1108 	st->total_cnt =
1109 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
1110 			      TOTAL_PROCESSED_BYTE_COUNT, val);
1111 
1112 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1113 					 LATE_RECEIVE_MPDU_COUNT)];
1114 	st->late_recv_mpdu_cnt =
1115 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1116 			      LATE_RECEIVE_MPDU_COUNT, val);
1117 
1118 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1119 					 WINDOW_JUMP_2K)];
1120 	st->win_jump_2k =
1121 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1122 			      WINDOW_JUMP_2K, val);
1123 
1124 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1125 					 HOLE_COUNT)];
1126 	st->hole_cnt =
1127 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1128 			      HOLE_COUNT, val);
1129 }
1130 qdf_export_symbol(hal_reo_queue_stats_status);
1131 
1132 inline void
1133 hal_reo_flush_queue_status(uint32_t *reo_desc,
1134 			   struct hal_reo_flush_queue_status *st,
1135 			   hal_soc_handle_t hal_soc_hdl)
1136 {
1137 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1138 	uint32_t val;
1139 
1140 	/* Offsets of descriptor fields defined in HW headers start
1141 	 * from the field after TLV header */
1142 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1143 
1144 	/* header */
1145 	hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV,
1146 					&(st->header), hal_soc);
1147 
1148 	/* error bit */
1149 	val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
1150 					 ERROR_DETECTED)];
1151 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1152 				  val);
1153 }
1154 qdf_export_symbol(hal_reo_flush_queue_status);
1155 
1156 inline void
1157 hal_reo_flush_cache_status(uint32_t *reo_desc,
1158 			   struct hal_reo_flush_cache_status *st,
1159 			   hal_soc_handle_t hal_soc_hdl)
1160 {
1161 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1162 	uint32_t val;
1163 
1164 	/* Offsets of descriptor fields defined in HW headers start
1165 	 * from the field after TLV header */
1166 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1167 
1168 	/* header */
1169 	hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV,
1170 					&(st->header), hal_soc);
1171 
1172 	/* error bit */
1173 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1174 					 ERROR_DETECTED)];
1175 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1176 				  val);
1177 
1178 	/* block error */
1179 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1180 					 BLOCK_ERROR_DETAILS)];
1181 	st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1182 					BLOCK_ERROR_DETAILS,
1183 					val);
1184 	if (!st->block_error)
1185 		qdf_set_bit(hal_soc->index,
1186 			    (unsigned long *)&hal_soc->reo_res_bitmap);
1187 
1188 	/* cache flush status */
1189 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1190 					 CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
1191 	st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1192 					CACHE_CONTROLLER_FLUSH_STATUS_HIT,
1193 					val);
1194 
1195 	/* cache flush descriptor type */
1196 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1197 				  CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
1198 	st->cache_flush_status_desc_type =
1199 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1200 			      CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
1201 			      val);
1202 
1203 	/* cache flush count */
1204 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1205 				  CACHE_CONTROLLER_FLUSH_COUNT)];
1206 	st->cache_flush_cnt =
1207 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1208 			      CACHE_CONTROLLER_FLUSH_COUNT,
1209 			      val);
1210 
1211 }
1212 qdf_export_symbol(hal_reo_flush_cache_status);
1213 
1214 inline void hal_reo_unblock_cache_status(uint32_t *reo_desc,
1215 					 hal_soc_handle_t hal_soc_hdl,
1216 					 struct hal_reo_unblk_cache_status *st)
1217 {
1218 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1219 	uint32_t val;
1220 
1221 	/* Offsets of descriptor fields defined in HW headers start
1222 	 * from the field after TLV header */
1223 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1224 
1225 	/* header */
1226 	hal_reo_status_get_header(reo_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV,
1227 					&st->header, hal_soc);
1228 
1229 	/* error bit */
1230 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1231 				  ERROR_DETECTED)];
1232 	st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1233 				  ERROR_DETECTED,
1234 				  val);
1235 
1236 	/* unblock type */
1237 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1238 				  UNBLOCK_TYPE)];
1239 	st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1240 					 UNBLOCK_TYPE,
1241 					 val);
1242 
1243 	if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
1244 		qdf_clear_bit(hal_soc->index,
1245 			      (unsigned long *)&hal_soc->reo_res_bitmap);
1246 }
1247 qdf_export_symbol(hal_reo_unblock_cache_status);
1248 
1249 inline void hal_reo_flush_timeout_list_status(
1250 			 uint32_t *reo_desc,
1251 			 struct hal_reo_flush_timeout_list_status *st,
1252 			 hal_soc_handle_t hal_soc_hdl)
1253 
1254 {
1255 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1256 	uint32_t val;
1257 
1258 	/* Offsets of descriptor fields defined in HW headers start
1259 	 * from the field after TLV header */
1260 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1261 
1262 	/* header */
1263 	hal_reo_status_get_header(reo_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV,
1264 					&(st->header), hal_soc);
1265 
1266 	/* error bit */
1267 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1268 					 ERROR_DETECTED)];
1269 	st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1270 				  ERROR_DETECTED,
1271 				  val);
1272 
1273 	/* list empty */
1274 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1275 					 TIMOUT_LIST_EMPTY)];
1276 	st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1277 					TIMOUT_LIST_EMPTY,
1278 					val);
1279 
1280 	/* release descriptor count */
1281 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1282 					 RELEASE_DESC_COUNT)];
1283 	st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1284 				       RELEASE_DESC_COUNT,
1285 				       val);
1286 
1287 	/* forward buf count */
1288 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1289 					 FORWARD_BUF_COUNT)];
1290 	st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1291 				       FORWARD_BUF_COUNT,
1292 				       val);
1293 }
1294 qdf_export_symbol(hal_reo_flush_timeout_list_status);
1295 
1296 inline void hal_reo_desc_thres_reached_status(
1297 			 uint32_t *reo_desc,
1298 			 struct hal_reo_desc_thres_reached_status *st,
1299 			 hal_soc_handle_t hal_soc_hdl)
1300 {
1301 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1302 	uint32_t val;
1303 
1304 	/* Offsets of descriptor fields defined in HW headers start
1305 	 * from the field after TLV header */
1306 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1307 
1308 	/* header */
1309 	hal_reo_status_get_header(reo_desc,
1310 			      HAL_REO_DESC_THRES_STATUS_TLV,
1311 			      &(st->header), hal_soc);
1312 
1313 	/* threshold index */
1314 	val = reo_desc[HAL_OFFSET_DW(
1315 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1316 				 THRESHOLD_INDEX)];
1317 	st->thres_index = HAL_GET_FIELD(
1318 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1319 				THRESHOLD_INDEX,
1320 				val);
1321 
1322 	/* link desc counters */
1323 	val = reo_desc[HAL_OFFSET_DW(
1324 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1325 				 LINK_DESCRIPTOR_COUNTER0)];
1326 	st->link_desc_counter0 = HAL_GET_FIELD(
1327 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1328 				LINK_DESCRIPTOR_COUNTER0,
1329 				val);
1330 
1331 	val = reo_desc[HAL_OFFSET_DW(
1332 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1333 				 LINK_DESCRIPTOR_COUNTER1)];
1334 	st->link_desc_counter1 = HAL_GET_FIELD(
1335 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1336 				LINK_DESCRIPTOR_COUNTER1,
1337 				val);
1338 
1339 	val = reo_desc[HAL_OFFSET_DW(
1340 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1341 				 LINK_DESCRIPTOR_COUNTER2)];
1342 	st->link_desc_counter2 = HAL_GET_FIELD(
1343 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1344 				LINK_DESCRIPTOR_COUNTER2,
1345 				val);
1346 
1347 	val = reo_desc[HAL_OFFSET_DW(
1348 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1349 				 LINK_DESCRIPTOR_COUNTER_SUM)];
1350 	st->link_desc_counter_sum = HAL_GET_FIELD(
1351 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1352 				LINK_DESCRIPTOR_COUNTER_SUM,
1353 				val);
1354 }
1355 qdf_export_symbol(hal_reo_desc_thres_reached_status);
1356 
1357 inline void
1358 hal_reo_rx_update_queue_status(uint32_t *reo_desc,
1359 			       struct hal_reo_update_rx_queue_status *st,
1360 			       hal_soc_handle_t hal_soc_hdl)
1361 {
1362 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1363 
1364 	/* Offsets of descriptor fields defined in HW headers start
1365 	 * from the field after TLV header */
1366 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1367 
1368 	/* header */
1369 	hal_reo_status_get_header(reo_desc,
1370 			      HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV,
1371 			      &(st->header), hal_soc);
1372 }
1373 qdf_export_symbol(hal_reo_rx_update_queue_status);
1374 
1375 /**
1376  * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG
1377  * with command number
1378  * @hal_soc: Handle to HAL SoC structure
1379  * @hal_ring: Handle to HAL SRNG structure
1380  *
1381  * Return: none
1382  */
1383 inline void hal_reo_init_cmd_ring(hal_soc_handle_t hal_soc_hdl,
1384 				  hal_ring_handle_t hal_ring_hdl)
1385 {
1386 	int cmd_num;
1387 	uint32_t *desc_addr;
1388 	struct hal_srng_params srng_params;
1389 	uint32_t desc_size;
1390 	uint32_t num_desc;
1391 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
1392 
1393 	hal_get_srng_params(hal_soc_hdl, hal_ring_hdl, &srng_params);
1394 
1395 	desc_addr = (uint32_t *)(srng_params.ring_base_vaddr);
1396 	desc_addr += (sizeof(struct tlv_32_hdr) >> 2);
1397 	desc_size = hal_srng_get_entrysize(soc, REO_CMD) >> 2;
1398 	num_desc = srng_params.num_entries;
1399 	cmd_num = 1;
1400 	while (num_desc) {
1401 		/* Offsets of descriptor fields defined in HW headers start
1402 		 * from the field after TLV header */
1403 		HAL_DESC_SET_FIELD(desc_addr, UNIFORM_REO_CMD_HEADER_0,
1404 			REO_CMD_NUMBER, cmd_num);
1405 		desc_addr += desc_size;
1406 		num_desc--; cmd_num++;
1407 	}
1408 
1409 	soc->reo_res_bitmap = 0;
1410 }
1411 qdf_export_symbol(hal_reo_init_cmd_ring);
1412