xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/li/hal_li_reo.c (revision f49b3a17535861c81c96f561e6e9be8a33a99f15)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "qdf_module.h"
21 #include "hal_li_hw_headers.h"
22 #include "hal_reo.h"
23 #include "hal_li_reo.h"
24 #include "hal_li_api.h"
25 
26 uint32_t hal_get_reo_reg_base_offset_li(void)
27 {
28 	return SEQ_WCSS_UMAC_REO_REG_OFFSET;
29 }
30 
31 /**
32  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
33  *
34  * @hal_soc: Opaque HAL SOC handle
35  * @ba_window_size: BlockAck window size
36  * @start_seq: Starting sequence number
37  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
38  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
39  * @tid: TID
40  *
41  */
42 void hal_reo_qdesc_setup_li(hal_soc_handle_t hal_soc_hdl, int tid,
43 			    uint32_t ba_window_size,
44 			    uint32_t start_seq, void *hw_qdesc_vaddr,
45 			    qdf_dma_addr_t hw_qdesc_paddr,
46 			    int pn_type, uint8_t vdev_stats_id)
47 {
48 	uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
49 	uint32_t *reo_queue_ext_desc;
50 	uint32_t reg_val;
51 	uint32_t pn_enable;
52 	uint32_t pn_size = 0;
53 
54 	qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
55 
56 	hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
57 				   HAL_REO_QUEUE_DESC);
58 	/* Fixed pattern in reserved bits for debugging */
59 	HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER_0,
60 			   RESERVED_0A, 0xDDBEEF);
61 
62 	/* This a just a SW meta data and will be copied to REO destination
63 	 * descriptors indicated by hardware.
64 	 * TODO: Setting TID in this field. See if we should set something else.
65 	 */
66 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_1,
67 			   RECEIVE_QUEUE_NUMBER, tid);
68 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
69 			   VLD, 1);
70 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
71 			   ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
72 			   HAL_RX_LINK_DESC_CNTR);
73 
74 	/*
75 	 * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
76 	 */
77 
78 	reg_val = TID_TO_WME_AC(tid);
79 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, AC, reg_val);
80 
81 	if (ba_window_size < 1)
82 		ba_window_size = 1;
83 
84 	/* WAR to get 2k exception in Non BA case.
85 	 * Setting window size to 2 to get 2k jump exception
86 	 * when we receive aggregates in Non BA case
87 	 */
88 	ba_window_size = hal_update_non_ba_win_size(tid, ba_window_size);
89 
90 	/* Set RTY bit for non-BA case. Duplicate detection is currently not
91 	 * done by HW in non-BA case if RTY bit is not set.
92 	 * TODO: This is a temporary War and should be removed once HW fix is
93 	 * made to check and discard duplicates even if RTY bit is not set.
94 	 */
95 	if (ba_window_size == 1)
96 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, RTY, 1);
97 
98 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, BA_WINDOW_SIZE,
99 			   ba_window_size - 1);
100 
101 	switch (pn_type) {
102 	case HAL_PN_WPA:
103 		pn_enable = 1;
104 		pn_size = PN_SIZE_48;
105 		break;
106 	case HAL_PN_WAPI_EVEN:
107 	case HAL_PN_WAPI_UNEVEN:
108 		pn_enable = 1;
109 		pn_size = PN_SIZE_128;
110 		break;
111 	default:
112 		pn_enable = 0;
113 		break;
114 	}
115 
116 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_CHECK_NEEDED,
117 			   pn_enable);
118 
119 	if (pn_type == HAL_PN_WAPI_EVEN)
120 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
121 				   PN_SHALL_BE_EVEN, 1);
122 	else if (pn_type == HAL_PN_WAPI_UNEVEN)
123 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
124 				   PN_SHALL_BE_UNEVEN, 1);
125 
126 	/*
127 	 *  TODO: Need to check if PN handling in SW needs to be enabled
128 	 *  So far this is not a requirement
129 	 */
130 
131 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_SIZE,
132 			   pn_size);
133 
134 	/* TODO: Check if RX_REO_QUEUE_2_IGNORE_AMPDU_FLAG need to be set
135 	 * based on BA window size and/or AMPDU capabilities
136 	 */
137 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
138 			   IGNORE_AMPDU_FLAG, 1);
139 
140 	if (start_seq <= 0xfff)
141 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SSN,
142 				   start_seq);
143 
144 	/* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA,
145 	 * but REO is not delivering packets if we set it to 1. Need to enable
146 	 * this once the issue is resolved
147 	 */
148 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SVLD, 0);
149 
150 	/* TODO: Check if we should set start PN for WAPI */
151 
152 	/* TODO: HW queue descriptors are currently allocated for max BA
153 	 * window size for all QOS TIDs so that same descriptor can be used
154 	 * later when ADDBA request is recevied. This should be changed to
155 	 * allocate HW queue descriptors based on BA window size being
156 	 * negotiated (0 for non BA cases), and reallocate when BA window
157 	 * size changes and also send WMI message to FW to change the REO
158 	 * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
159 	 */
160 	if (tid == HAL_NON_QOS_TID)
161 		return;
162 
163 	reo_queue_ext_desc = (uint32_t *)
164 		(((struct rx_reo_queue *)reo_queue_desc) + 1);
165 	qdf_mem_zero(reo_queue_ext_desc, 3 *
166 		sizeof(struct rx_reo_queue_ext));
167 	/* Initialize first reo queue extension descriptor */
168 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
169 				   HAL_DESC_REO_OWNED,
170 				   HAL_REO_QUEUE_EXT_DESC);
171 	/* Fixed pattern in reserved bits for debugging */
172 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
173 			   UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A,
174 			   0xADBEEF);
175 	/* Initialize second reo queue extension descriptor */
176 	reo_queue_ext_desc = (uint32_t *)
177 		(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
178 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
179 				   HAL_DESC_REO_OWNED,
180 				   HAL_REO_QUEUE_EXT_DESC);
181 	/* Fixed pattern in reserved bits for debugging */
182 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
183 			   UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A,
184 			   0xBDBEEF);
185 	/* Initialize third reo queue extension descriptor */
186 	reo_queue_ext_desc = (uint32_t *)
187 		(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
188 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
189 				   HAL_DESC_REO_OWNED,
190 				   HAL_REO_QUEUE_EXT_DESC);
191 	/* Fixed pattern in reserved bits for debugging */
192 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
193 			   UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A,
194 				   0xCDBEEF);
195 }
196 
197 qdf_export_symbol(hal_reo_qdesc_setup_li);
198 
199 /**
200  * hal_get_ba_aging_timeout_li - Get BA Aging timeout
201  *
202  * @hal_soc: Opaque HAL SOC handle
203  * @ac: Access category
204  * @value: window size to get
205  */
206 void hal_get_ba_aging_timeout_li(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
207 				 uint32_t *value)
208 {
209 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
210 
211 	switch (ac) {
212 	case WME_AC_BE:
213 		*value = HAL_REG_READ(soc,
214 				      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
215 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
216 		break;
217 	case WME_AC_BK:
218 		*value = HAL_REG_READ(soc,
219 				      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
220 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
221 		break;
222 	case WME_AC_VI:
223 		*value = HAL_REG_READ(soc,
224 				      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
225 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
226 		break;
227 	case WME_AC_VO:
228 		*value = HAL_REG_READ(soc,
229 				      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
230 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
231 		break;
232 	default:
233 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
234 			  "Invalid AC: %d\n", ac);
235 	}
236 }
237 qdf_export_symbol(hal_get_ba_aging_timeout_li);
238 
239 /**
240  * hal_set_ba_aging_timeout_li - Set BA Aging timeout
241  *
242  * @hal_soc: Opaque HAL SOC handle
243  * @ac: Access category
244  * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice
245  * @value: Input value to set
246  */
247 void hal_set_ba_aging_timeout_li(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
248 				 uint32_t value)
249 {
250 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
251 
252 	switch (ac) {
253 	case WME_AC_BE:
254 		HAL_REG_WRITE(soc,
255 			      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
256 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
257 			      value * 1000);
258 		break;
259 	case WME_AC_BK:
260 		HAL_REG_WRITE(soc,
261 			      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
262 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
263 			      value * 1000);
264 		break;
265 	case WME_AC_VI:
266 		HAL_REG_WRITE(soc,
267 			      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
268 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
269 			      value * 1000);
270 		break;
271 	case WME_AC_VO:
272 		HAL_REG_WRITE(soc,
273 			      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
274 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
275 			      value * 1000);
276 		break;
277 	default:
278 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
279 			  "Invalid AC: %d\n", ac);
280 	}
281 }
282 qdf_export_symbol(hal_set_ba_aging_timeout_li);
283 
284 static inline void
285 hal_reo_cmd_set_descr_addr_li(uint32_t *reo_desc, enum hal_reo_cmd_type type,
286 			      uint32_t paddr_lo, uint8_t paddr_hi)
287 {
288 	switch (type) {
289 	case CMD_GET_QUEUE_STATS:
290 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
291 				   RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
292 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
293 				   RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
294 		break;
295 	case CMD_FLUSH_QUEUE:
296 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
297 				   FLUSH_DESC_ADDR_31_0, paddr_lo);
298 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
299 				   FLUSH_DESC_ADDR_39_32, paddr_hi);
300 		break;
301 	case CMD_FLUSH_CACHE:
302 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
303 				   FLUSH_ADDR_31_0, paddr_lo);
304 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
305 				   FLUSH_ADDR_39_32, paddr_hi);
306 		break;
307 	case CMD_UPDATE_RX_REO_QUEUE:
308 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
309 				   RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
310 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
311 				   RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
312 		break;
313 	default:
314 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
315 			  "%s: Invalid REO command type", __func__);
316 		break;
317 	}
318 }
319 
320 static inline int
321 hal_reo_cmd_queue_stats_li(hal_ring_handle_t  hal_ring_hdl,
322 			   hal_soc_handle_t hal_soc_hdl,
323 			   struct hal_reo_cmd_params *cmd)
324 {
325 	uint32_t *reo_desc, val;
326 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
327 
328 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
329 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
330 	if (!reo_desc) {
331 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
332 			  "%s: Out of cmd ring entries", __func__);
333 		hal_srng_access_end(hal_soc, hal_ring_hdl);
334 		return -EBUSY;
335 	}
336 
337 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
338 			sizeof(struct reo_get_queue_stats));
339 
340 	/*
341 	 * Offsets of descriptor fields defined in HW headers start from
342 	 * the field after TLV header
343 	 */
344 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
345 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
346 		     sizeof(struct reo_get_queue_stats) -
347 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
348 
349 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
350 			   REO_STATUS_REQUIRED, cmd->std.need_status);
351 
352 	hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_GET_QUEUE_STATS,
353 				      cmd->std.addr_lo,
354 				      cmd->std.addr_hi);
355 
356 	HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
357 			   cmd->u.stats_params.clear);
358 
359 	if (hif_pm_runtime_get(hal_soc->hif_handle,
360 			       RTPM_ID_HAL_REO_CMD, false) == 0) {
361 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
362 		hif_pm_runtime_put(hal_soc->hif_handle,
363 				   RTPM_ID_HAL_REO_CMD);
364 	} else {
365 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
366 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
367 		hal_srng_inc_flush_cnt(hal_ring_hdl);
368 	}
369 
370 	val = reo_desc[CMD_HEADER_DW_OFFSET];
371 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
372 			     val);
373 }
374 
375 static inline int
376 hal_reo_cmd_flush_queue_li(hal_ring_handle_t hal_ring_hdl,
377 			   hal_soc_handle_t hal_soc_hdl,
378 			   struct hal_reo_cmd_params *cmd)
379 {
380 	uint32_t *reo_desc, val;
381 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
382 
383 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
384 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
385 	if (!reo_desc) {
386 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
387 			  "%s: Out of cmd ring entries", __func__);
388 		hal_srng_access_end(hal_soc, hal_ring_hdl);
389 		return -EBUSY;
390 	}
391 
392 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
393 			sizeof(struct reo_flush_queue));
394 
395 	/*
396 	 * Offsets of descriptor fields defined in HW headers start from
397 	 * the field after TLV header
398 	 */
399 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
400 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
401 		     sizeof(struct reo_flush_queue) -
402 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
403 
404 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
405 			   REO_STATUS_REQUIRED, cmd->std.need_status);
406 
407 	hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_FLUSH_QUEUE,
408 				      cmd->std.addr_lo, cmd->std.addr_hi);
409 
410 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
411 			   BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
412 			   cmd->u.fl_queue_params.block_use_after_flush);
413 
414 	if (cmd->u.fl_queue_params.block_use_after_flush) {
415 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
416 				   BLOCK_RESOURCE_INDEX,
417 				   cmd->u.fl_queue_params.index);
418 	}
419 
420 	hal_srng_access_end(hal_soc, hal_ring_hdl);
421 	val = reo_desc[CMD_HEADER_DW_OFFSET];
422 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
423 			     val);
424 }
425 
426 static inline int
427 hal_reo_cmd_flush_cache_li(hal_ring_handle_t hal_ring_hdl,
428 			   hal_soc_handle_t hal_soc_hdl,
429 			   struct hal_reo_cmd_params *cmd)
430 {
431 	uint32_t *reo_desc, val;
432 	struct hal_reo_cmd_flush_cache_params *cp;
433 	uint8_t index = 0;
434 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
435 
436 	cp = &cmd->u.fl_cache_params;
437 
438 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
439 
440 	/* We need a cache block resource for this operation, and REO HW has
441 	 * only 4 such blocking resources. These resources are managed using
442 	 * reo_res_bitmap, and we return failure if none is available.
443 	 */
444 	if (cp->block_use_after_flush) {
445 		index = hal_find_zero_bit(hal_soc->reo_res_bitmap);
446 		if (index > 3) {
447 			qdf_print("No blocking resource available!");
448 			hal_srng_access_end(hal_soc, hal_ring_hdl);
449 			return -EBUSY;
450 		}
451 		hal_soc->index = index;
452 	}
453 
454 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
455 	if (!reo_desc) {
456 		hal_srng_access_end(hal_soc, hal_ring_hdl);
457 		hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl));
458 		return -EBUSY;
459 	}
460 
461 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
462 			sizeof(struct reo_flush_cache));
463 
464 	/*
465 	 * Offsets of descriptor fields defined in HW headers start from
466 	 * the field after TLV header
467 	 */
468 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
469 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
470 		     sizeof(struct reo_flush_cache) -
471 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
472 
473 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
474 			   REO_STATUS_REQUIRED, cmd->std.need_status);
475 
476 	hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_FLUSH_CACHE,
477 				      cmd->std.addr_lo, cmd->std.addr_hi);
478 
479 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
480 			   FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
481 
482 	/* set it to 0 for now */
483 	cp->rel_block_index = 0;
484 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
485 			   RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
486 
487 	if (cp->block_use_after_flush) {
488 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
489 				   CACHE_BLOCK_RESOURCE_INDEX, index);
490 	}
491 
492 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
493 			   FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
494 
495 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
496 			   BLOCK_CACHE_USAGE_AFTER_FLUSH,
497 			   cp->block_use_after_flush);
498 
499 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
500 			   cp->flush_entire_cache);
501 
502 	if (hif_pm_runtime_get(hal_soc->hif_handle,
503 			       RTPM_ID_HAL_REO_CMD, false) == 0) {
504 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
505 		hif_pm_runtime_put(hal_soc->hif_handle,
506 				   RTPM_ID_HAL_REO_CMD);
507 	} else {
508 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
509 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
510 		hal_srng_inc_flush_cnt(hal_ring_hdl);
511 	}
512 
513 	val = reo_desc[CMD_HEADER_DW_OFFSET];
514 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
515 			     val);
516 }
517 
518 static inline int
519 hal_reo_cmd_unblock_cache_li(hal_ring_handle_t hal_ring_hdl,
520 			     hal_soc_handle_t hal_soc_hdl,
521 			     struct hal_reo_cmd_params *cmd)
522 
523 {
524 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
525 	uint32_t *reo_desc, val;
526 	uint8_t index = 0;
527 
528 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
529 
530 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
531 		index = hal_find_one_bit(hal_soc->reo_res_bitmap);
532 		if (index > 3) {
533 			hal_srng_access_end(hal_soc, hal_ring_hdl);
534 			qdf_print("No blocking resource to unblock!");
535 			return -EBUSY;
536 		}
537 	}
538 
539 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
540 	if (!reo_desc) {
541 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
542 			  "%s: Out of cmd ring entries", __func__);
543 		hal_srng_access_end(hal_soc, hal_ring_hdl);
544 		return -EBUSY;
545 	}
546 
547 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
548 			sizeof(struct reo_unblock_cache));
549 
550 	/*
551 	 * Offsets of descriptor fields defined in HW headers start from
552 	 * the field after TLV header
553 	 */
554 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
555 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
556 		     sizeof(struct reo_unblock_cache) -
557 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
558 
559 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
560 			   REO_STATUS_REQUIRED, cmd->std.need_status);
561 
562 	HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
563 			   UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
564 
565 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
566 		HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
567 				   CACHE_BLOCK_RESOURCE_INDEX,
568 				   cmd->u.unblk_cache_params.index);
569 	}
570 
571 	hal_srng_access_end(hal_soc, hal_ring_hdl);
572 	val = reo_desc[CMD_HEADER_DW_OFFSET];
573 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
574 			     val);
575 }
576 
577 static inline int
578 hal_reo_cmd_flush_timeout_list_li(hal_ring_handle_t hal_ring_hdl,
579 				  hal_soc_handle_t hal_soc_hdl,
580 				  struct hal_reo_cmd_params *cmd)
581 {
582 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
583 	uint32_t *reo_desc, val;
584 
585 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
586 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
587 	if (!reo_desc) {
588 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
589 			  "%s: Out of cmd ring entries", __func__);
590 		hal_srng_access_end(hal_soc, hal_ring_hdl);
591 		return -EBUSY;
592 	}
593 
594 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
595 			sizeof(struct reo_flush_timeout_list));
596 
597 	/*
598 	 * Offsets of descriptor fields defined in HW headers start from
599 	 * the field after TLV header
600 	 */
601 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
602 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
603 		     sizeof(struct reo_flush_timeout_list) -
604 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
605 
606 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
607 			   REO_STATUS_REQUIRED, cmd->std.need_status);
608 
609 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
610 			   cmd->u.fl_tim_list_params.ac_list);
611 
612 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
613 			   MINIMUM_RELEASE_DESC_COUNT,
614 			   cmd->u.fl_tim_list_params.min_rel_desc);
615 
616 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
617 			   MINIMUM_FORWARD_BUF_COUNT,
618 			   cmd->u.fl_tim_list_params.min_fwd_buf);
619 
620 	hal_srng_access_end(hal_soc, hal_ring_hdl);
621 	val = reo_desc[CMD_HEADER_DW_OFFSET];
622 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
623 			     val);
624 }
625 
626 static inline int
627 hal_reo_cmd_update_rx_queue_li(hal_ring_handle_t hal_ring_hdl,
628 			       hal_soc_handle_t hal_soc_hdl,
629 			       struct hal_reo_cmd_params *cmd)
630 {
631 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
632 	uint32_t *reo_desc, val;
633 	struct hal_reo_cmd_update_queue_params *p;
634 
635 	p = &cmd->u.upd_queue_params;
636 
637 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
638 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
639 	if (!reo_desc) {
640 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
641 			  "%s: Out of cmd ring entries", __func__);
642 		hal_srng_access_end(hal_soc, hal_ring_hdl);
643 		return -EBUSY;
644 	}
645 
646 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
647 			sizeof(struct reo_update_rx_reo_queue));
648 
649 	/*
650 	 * Offsets of descriptor fields defined in HW headers start from
651 	 * the field after TLV header
652 	 */
653 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
654 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
655 		     sizeof(struct reo_update_rx_reo_queue) -
656 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
657 
658 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
659 			   REO_STATUS_REQUIRED, cmd->std.need_status);
660 
661 	hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
662 				      cmd->std.addr_lo, cmd->std.addr_hi);
663 
664 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
665 			   UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
666 
667 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
668 			   p->update_vld);
669 
670 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
671 			   UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
672 			   p->update_assoc_link_desc);
673 
674 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
675 			   UPDATE_DISABLE_DUPLICATE_DETECTION,
676 			   p->update_disable_dup_detect);
677 
678 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
679 			   UPDATE_DISABLE_DUPLICATE_DETECTION,
680 			   p->update_disable_dup_detect);
681 
682 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
683 			   UPDATE_SOFT_REORDER_ENABLE,
684 			   p->update_soft_reorder_enab);
685 
686 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
687 			   UPDATE_AC, p->update_ac);
688 
689 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
690 			   UPDATE_BAR, p->update_bar);
691 
692 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
693 			   UPDATE_BAR, p->update_bar);
694 
695 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
696 			   UPDATE_RTY, p->update_rty);
697 
698 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
699 			   UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
700 
701 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
702 			   UPDATE_OOR_MODE, p->update_oor_mode);
703 
704 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
705 			   UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
706 
707 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
708 			   UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
709 
710 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
711 			   UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
712 
713 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
714 			   UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
715 
716 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
717 			   UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
718 
719 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
720 			   UPDATE_PN_SIZE, p->update_pn_size);
721 
722 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
723 			   UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
724 
725 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
726 			   UPDATE_SVLD, p->update_svld);
727 
728 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
729 			   UPDATE_SSN, p->update_ssn);
730 
731 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
732 			   UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
733 			   p->update_seq_2k_err_detect);
734 
735 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
736 			   UPDATE_PN_VALID, p->update_pn_valid);
737 
738 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
739 			   UPDATE_PN, p->update_pn);
740 
741 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
742 			   RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
743 
744 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
745 			   VLD, p->vld);
746 
747 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
748 			   ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
749 			   p->assoc_link_desc);
750 
751 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
752 			   DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
753 
754 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
755 			   SOFT_REORDER_ENABLE, p->soft_reorder_enab);
756 
757 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
758 
759 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
760 			   BAR, p->bar);
761 
762 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
763 			   CHK_2K_MODE, p->chk_2k_mode);
764 
765 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
766 			   RTY, p->rty);
767 
768 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
769 			   OOR_MODE, p->oor_mode);
770 
771 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
772 			   PN_CHECK_NEEDED, p->pn_check_needed);
773 
774 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
775 			   PN_SHALL_BE_EVEN, p->pn_even);
776 
777 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
778 			   PN_SHALL_BE_UNEVEN, p->pn_uneven);
779 
780 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
781 			   PN_HANDLING_ENABLE, p->pn_hand_enab);
782 
783 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
784 			   IGNORE_AMPDU_FLAG, p->ignore_ampdu);
785 
786 	if (p->ba_window_size < 1)
787 		p->ba_window_size = 1;
788 	/*
789 	 * WAR to get 2k exception in Non BA case.
790 	 * Setting window size to 2 to get 2k jump exception
791 	 * when we receive aggregates in Non BA case
792 	 */
793 	if (p->ba_window_size == 1)
794 		p->ba_window_size++;
795 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
796 			   BA_WINDOW_SIZE, p->ba_window_size - 1);
797 
798 	if (p->pn_size == 24)
799 		p->pn_size = PN_SIZE_24;
800 	else if (p->pn_size == 48)
801 		p->pn_size = PN_SIZE_48;
802 	else if (p->pn_size == 128)
803 		p->pn_size = PN_SIZE_128;
804 
805 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
806 			   PN_SIZE, p->pn_size);
807 
808 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
809 			   SVLD, p->svld);
810 
811 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
812 			   SSN, p->ssn);
813 
814 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
815 			   SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
816 
817 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
818 			   PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
819 
820 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
821 			   PN_31_0, p->pn_31_0);
822 
823 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
824 			   PN_63_32, p->pn_63_32);
825 
826 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
827 			   PN_95_64, p->pn_95_64);
828 
829 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
830 			   PN_127_96, p->pn_127_96);
831 
832 	if (hif_pm_runtime_get(hal_soc->hif_handle,
833 			       RTPM_ID_HAL_REO_CMD, false) == 0) {
834 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
835 		hif_pm_runtime_put(hal_soc->hif_handle,
836 				   RTPM_ID_HAL_REO_CMD);
837 	} else {
838 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
839 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
840 		hal_srng_inc_flush_cnt(hal_ring_hdl);
841 	}
842 
843 	val = reo_desc[CMD_HEADER_DW_OFFSET];
844 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
845 			     val);
846 }
847 
848 int hal_reo_send_cmd_li(hal_soc_handle_t hal_soc_hdl,
849 			hal_ring_handle_t  hal_ring_hdl,
850 			enum hal_reo_cmd_type cmd,
851 			void *params)
852 {
853 	struct hal_reo_cmd_params *cmd_params =
854 			(struct hal_reo_cmd_params *)params;
855 	int num = 0;
856 
857 	switch (cmd) {
858 	case CMD_GET_QUEUE_STATS:
859 		num = hal_reo_cmd_queue_stats_li(hal_ring_hdl,
860 						 hal_soc_hdl, cmd_params);
861 		break;
862 	case CMD_FLUSH_QUEUE:
863 		num = hal_reo_cmd_flush_queue_li(hal_ring_hdl,
864 						 hal_soc_hdl, cmd_params);
865 		break;
866 	case CMD_FLUSH_CACHE:
867 		num = hal_reo_cmd_flush_cache_li(hal_ring_hdl,
868 						 hal_soc_hdl, cmd_params);
869 		break;
870 	case CMD_UNBLOCK_CACHE:
871 		num = hal_reo_cmd_unblock_cache_li(hal_ring_hdl,
872 						   hal_soc_hdl, cmd_params);
873 		break;
874 	case CMD_FLUSH_TIMEOUT_LIST:
875 		num = hal_reo_cmd_flush_timeout_list_li(hal_ring_hdl,
876 							hal_soc_hdl,
877 							cmd_params);
878 		break;
879 	case CMD_UPDATE_RX_REO_QUEUE:
880 		num = hal_reo_cmd_update_rx_queue_li(hal_ring_hdl,
881 						     hal_soc_hdl, cmd_params);
882 		break;
883 	default:
884 		hal_err("Invalid REO command type: %d", cmd);
885 		return -EINVAL;
886 	};
887 
888 	return num;
889 }
890 
891 void
892 hal_reo_queue_stats_status_li(hal_ring_desc_t ring_desc,
893 			      void *st_handle,
894 			      hal_soc_handle_t hal_soc_hdl)
895 {
896 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
897 	struct hal_reo_queue_status *st =
898 		(struct hal_reo_queue_status *)st_handle;
899 	uint32_t *reo_desc = (uint32_t *)ring_desc;
900 	uint32_t val;
901 
902 	/*
903 	 * Offsets of descriptor fields defined in HW headers start
904 	 * from the field after TLV header
905 	 */
906 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
907 
908 	/* header */
909 	hal_reo_status_get_header(ring_desc, HAL_REO_QUEUE_STATS_STATUS_TLV,
910 				  &(st->header), hal_soc);
911 
912 	/* SSN */
913 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
914 	st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
915 
916 	/* current index */
917 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
918 					 CURRENT_INDEX)];
919 	st->curr_idx =
920 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
921 			      CURRENT_INDEX, val);
922 
923 	/* PN bits */
924 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
925 					 PN_31_0)];
926 	st->pn_31_0 =
927 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
928 			      PN_31_0, val);
929 
930 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
931 					 PN_63_32)];
932 	st->pn_63_32 =
933 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
934 			      PN_63_32, val);
935 
936 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
937 					 PN_95_64)];
938 	st->pn_95_64 =
939 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
940 			      PN_95_64, val);
941 
942 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
943 					 PN_127_96)];
944 	st->pn_127_96 =
945 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
946 			      PN_127_96, val);
947 
948 	/* timestamps */
949 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
950 					 LAST_RX_ENQUEUE_TIMESTAMP)];
951 	st->last_rx_enq_tstamp =
952 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
953 			      LAST_RX_ENQUEUE_TIMESTAMP, val);
954 
955 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
956 					 LAST_RX_DEQUEUE_TIMESTAMP)];
957 	st->last_rx_deq_tstamp =
958 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
959 			      LAST_RX_DEQUEUE_TIMESTAMP, val);
960 
961 	/* rx bitmap */
962 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
963 					 RX_BITMAP_31_0)];
964 	st->rx_bitmap_31_0 =
965 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
966 			      RX_BITMAP_31_0, val);
967 
968 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
969 					 RX_BITMAP_63_32)];
970 	st->rx_bitmap_63_32 =
971 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
972 			      RX_BITMAP_63_32, val);
973 
974 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
975 					 RX_BITMAP_95_64)];
976 	st->rx_bitmap_95_64 =
977 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
978 			      RX_BITMAP_95_64, val);
979 
980 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
981 					 RX_BITMAP_127_96)];
982 	st->rx_bitmap_127_96 =
983 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
984 			      RX_BITMAP_127_96, val);
985 
986 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
987 					 RX_BITMAP_159_128)];
988 	st->rx_bitmap_159_128 =
989 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
990 			      RX_BITMAP_159_128, val);
991 
992 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
993 					 RX_BITMAP_191_160)];
994 	st->rx_bitmap_191_160 =
995 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
996 			      RX_BITMAP_191_160, val);
997 
998 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
999 					 RX_BITMAP_223_192)];
1000 	st->rx_bitmap_223_192 =
1001 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
1002 			      RX_BITMAP_223_192, val);
1003 
1004 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
1005 					 RX_BITMAP_255_224)];
1006 	st->rx_bitmap_255_224 =
1007 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
1008 			      RX_BITMAP_255_224, val);
1009 
1010 	/* various counts */
1011 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
1012 					 CURRENT_MPDU_COUNT)];
1013 	st->curr_mpdu_cnt =
1014 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
1015 			      CURRENT_MPDU_COUNT, val);
1016 
1017 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
1018 					 CURRENT_MSDU_COUNT)];
1019 	st->curr_msdu_cnt =
1020 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
1021 			      CURRENT_MSDU_COUNT, val);
1022 
1023 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1024 					 TIMEOUT_COUNT)];
1025 	st->fwd_timeout_cnt =
1026 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1027 			      TIMEOUT_COUNT, val);
1028 
1029 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1030 					 FORWARD_DUE_TO_BAR_COUNT)];
1031 	st->fwd_bar_cnt =
1032 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1033 			      FORWARD_DUE_TO_BAR_COUNT, val);
1034 
1035 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1036 					 DUPLICATE_COUNT)];
1037 	st->dup_cnt =
1038 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1039 			      DUPLICATE_COUNT, val);
1040 
1041 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1042 					 FRAMES_IN_ORDER_COUNT)];
1043 	st->frms_in_order_cnt =
1044 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1045 			      FRAMES_IN_ORDER_COUNT, val);
1046 
1047 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1048 					 BAR_RECEIVED_COUNT)];
1049 	st->bar_rcvd_cnt =
1050 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1051 			      BAR_RECEIVED_COUNT, val);
1052 
1053 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
1054 					 MPDU_FRAMES_PROCESSED_COUNT)];
1055 	st->mpdu_frms_cnt =
1056 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
1057 			      MPDU_FRAMES_PROCESSED_COUNT, val);
1058 
1059 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
1060 					 MSDU_FRAMES_PROCESSED_COUNT)];
1061 	st->msdu_frms_cnt =
1062 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
1063 			      MSDU_FRAMES_PROCESSED_COUNT, val);
1064 
1065 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
1066 					 TOTAL_PROCESSED_BYTE_COUNT)];
1067 	st->total_cnt =
1068 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
1069 			      TOTAL_PROCESSED_BYTE_COUNT, val);
1070 
1071 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1072 					 LATE_RECEIVE_MPDU_COUNT)];
1073 	st->late_recv_mpdu_cnt =
1074 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1075 			      LATE_RECEIVE_MPDU_COUNT, val);
1076 
1077 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1078 					 WINDOW_JUMP_2K)];
1079 	st->win_jump_2k =
1080 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1081 			      WINDOW_JUMP_2K, val);
1082 
1083 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1084 					 HOLE_COUNT)];
1085 	st->hole_cnt =
1086 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1087 			      HOLE_COUNT, val);
1088 }
1089 
1090 void
1091 hal_reo_flush_queue_status_li(hal_ring_desc_t ring_desc,
1092 			      void *st_handle,
1093 			      hal_soc_handle_t hal_soc_hdl)
1094 {
1095 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1096 	struct hal_reo_flush_queue_status *st =
1097 			(struct hal_reo_flush_queue_status *)st_handle;
1098 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1099 	uint32_t val;
1100 
1101 	/*
1102 	 * Offsets of descriptor fields defined in HW headers start
1103 	 * from the field after TLV header
1104 	 */
1105 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1106 
1107 	/* header */
1108 	hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV,
1109 				  &(st->header), hal_soc);
1110 
1111 	/* error bit */
1112 	val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
1113 					 ERROR_DETECTED)];
1114 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1115 				  val);
1116 }
1117 
1118 void
1119 hal_reo_flush_cache_status_li(hal_ring_desc_t ring_desc,
1120 			      void *st_handle,
1121 			      hal_soc_handle_t hal_soc_hdl)
1122 {
1123 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1124 	struct hal_reo_flush_cache_status *st =
1125 			(struct hal_reo_flush_cache_status *)st_handle;
1126 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1127 	uint32_t val;
1128 
1129 	/*
1130 	 * Offsets of descriptor fields defined in HW headers start
1131 	 * from the field after TLV header
1132 	 */
1133 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1134 
1135 	/* header */
1136 	hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV,
1137 				  &(st->header), hal_soc);
1138 
1139 	/* error bit */
1140 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1141 					 ERROR_DETECTED)];
1142 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1143 				  val);
1144 
1145 	/* block error */
1146 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1147 					 BLOCK_ERROR_DETAILS)];
1148 	st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1149 					BLOCK_ERROR_DETAILS,
1150 					val);
1151 	if (!st->block_error)
1152 		qdf_set_bit(hal_soc->index,
1153 			    (unsigned long *)&hal_soc->reo_res_bitmap);
1154 
1155 	/* cache flush status */
1156 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1157 				     CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
1158 	st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1159 					CACHE_CONTROLLER_FLUSH_STATUS_HIT,
1160 					val);
1161 
1162 	/* cache flush descriptor type */
1163 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1164 				  CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
1165 	st->cache_flush_status_desc_type =
1166 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1167 			      CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
1168 			      val);
1169 
1170 	/* cache flush count */
1171 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1172 				  CACHE_CONTROLLER_FLUSH_COUNT)];
1173 	st->cache_flush_cnt =
1174 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1175 			      CACHE_CONTROLLER_FLUSH_COUNT,
1176 			      val);
1177 }
1178 
1179 void
1180 hal_reo_unblock_cache_status_li(hal_ring_desc_t ring_desc,
1181 				hal_soc_handle_t hal_soc_hdl,
1182 				void *st_handle)
1183 {
1184 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1185 	struct hal_reo_unblk_cache_status *st =
1186 			(struct hal_reo_unblk_cache_status *)st_handle;
1187 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1188 	uint32_t val;
1189 
1190 	/*
1191 	 * Offsets of descriptor fields defined in HW headers start
1192 	 * from the field after TLV header
1193 	 */
1194 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1195 
1196 	/* header */
1197 	hal_reo_status_get_header(ring_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV,
1198 				  &st->header, hal_soc);
1199 
1200 	/* error bit */
1201 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1202 				  ERROR_DETECTED)];
1203 	st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1204 				  ERROR_DETECTED,
1205 				  val);
1206 
1207 	/* unblock type */
1208 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1209 				  UNBLOCK_TYPE)];
1210 	st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1211 					 UNBLOCK_TYPE,
1212 					 val);
1213 
1214 	if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
1215 		qdf_clear_bit(hal_soc->index,
1216 			      (unsigned long *)&hal_soc->reo_res_bitmap);
1217 }
1218 
1219 void hal_reo_flush_timeout_list_status_li(hal_ring_desc_t ring_desc,
1220 					  void *st_handle,
1221 					  hal_soc_handle_t hal_soc_hdl)
1222 {
1223 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1224 	struct hal_reo_flush_timeout_list_status *st =
1225 			(struct hal_reo_flush_timeout_list_status *)st_handle;
1226 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1227 	uint32_t val;
1228 
1229 	/*
1230 	 * Offsets of descriptor fields defined in HW headers start
1231 	 * from the field after TLV header
1232 	 */
1233 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1234 
1235 	/* header */
1236 	hal_reo_status_get_header(ring_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV,
1237 				  &(st->header), hal_soc);
1238 
1239 	/* error bit */
1240 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1241 					 ERROR_DETECTED)];
1242 	st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1243 				  ERROR_DETECTED,
1244 				  val);
1245 
1246 	/* list empty */
1247 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1248 					 TIMOUT_LIST_EMPTY)];
1249 	st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1250 				       TIMOUT_LIST_EMPTY,
1251 				       val);
1252 
1253 	/* release descriptor count */
1254 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1255 					 RELEASE_DESC_COUNT)];
1256 	st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1257 					 RELEASE_DESC_COUNT,
1258 					 val);
1259 
1260 	/* forward buf count */
1261 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1262 				     FORWARD_BUF_COUNT)];
1263 	st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1264 					FORWARD_BUF_COUNT,
1265 					val);
1266 }
1267 
1268 void hal_reo_desc_thres_reached_status_li(hal_ring_desc_t ring_desc,
1269 					  void *st_handle,
1270 					  hal_soc_handle_t hal_soc_hdl)
1271 {
1272 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1273 	struct hal_reo_desc_thres_reached_status *st =
1274 			(struct hal_reo_desc_thres_reached_status *)st_handle;
1275 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1276 	uint32_t val;
1277 
1278 	/*
1279 	 * Offsets of descriptor fields defined in HW headers start
1280 	 * from the field after TLV header
1281 	 */
1282 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1283 
1284 	/* header */
1285 	hal_reo_status_get_header(ring_desc,
1286 				  HAL_REO_DESC_THRES_STATUS_TLV,
1287 				  &(st->header), hal_soc);
1288 
1289 	/* threshold index */
1290 	val = reo_desc[HAL_OFFSET_DW(
1291 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1292 				 THRESHOLD_INDEX)];
1293 	st->thres_index = HAL_GET_FIELD(
1294 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1295 				THRESHOLD_INDEX,
1296 				val);
1297 
1298 	/* link desc counters */
1299 	val = reo_desc[HAL_OFFSET_DW(
1300 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1301 				 LINK_DESCRIPTOR_COUNTER0)];
1302 	st->link_desc_counter0 = HAL_GET_FIELD(
1303 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1304 				LINK_DESCRIPTOR_COUNTER0,
1305 				val);
1306 
1307 	val = reo_desc[HAL_OFFSET_DW(
1308 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1309 				 LINK_DESCRIPTOR_COUNTER1)];
1310 	st->link_desc_counter1 = HAL_GET_FIELD(
1311 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1312 				LINK_DESCRIPTOR_COUNTER1,
1313 				val);
1314 
1315 	val = reo_desc[HAL_OFFSET_DW(
1316 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1317 				 LINK_DESCRIPTOR_COUNTER2)];
1318 	st->link_desc_counter2 = HAL_GET_FIELD(
1319 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1320 				LINK_DESCRIPTOR_COUNTER2,
1321 				val);
1322 
1323 	val = reo_desc[HAL_OFFSET_DW(
1324 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1325 				 LINK_DESCRIPTOR_COUNTER_SUM)];
1326 	st->link_desc_counter_sum = HAL_GET_FIELD(
1327 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1328 				LINK_DESCRIPTOR_COUNTER_SUM,
1329 				val);
1330 }
1331 
1332 void
1333 hal_reo_rx_update_queue_status_li(hal_ring_desc_t ring_desc,
1334 				  void *st_handle,
1335 				  hal_soc_handle_t hal_soc_hdl)
1336 {
1337 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1338 	struct hal_reo_update_rx_queue_status *st =
1339 			(struct hal_reo_update_rx_queue_status *)st_handle;
1340 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1341 
1342 	/*
1343 	 * Offsets of descriptor fields defined in HW headers start
1344 	 * from the field after TLV header
1345 	 */
1346 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1347 
1348 	/* header */
1349 	hal_reo_status_get_header(ring_desc,
1350 				  HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV,
1351 				  &(st->header), hal_soc);
1352 }
1353 
1354 uint8_t hal_get_tlv_hdr_size_li(void)
1355 {
1356 	return sizeof(struct tlv_32_hdr);
1357 }
1358 
1359 uint64_t hal_rx_get_qdesc_addr_li(uint8_t *dst_ring_desc, uint8_t *buf)
1360 {
1361 	return *(uint64_t *)dst_ring_desc +
1362 		REO_DESTINATION_RING_6_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET;
1363 }
1364