xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/li/hal_li_reo.c (revision bc5590deaf3d694800c9bdbe7001a862612dd42b)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "qdf_module.h"
20 #include "hal_li_hw_headers.h"
21 #include "hal_reo.h"
22 #include "hal_li_reo.h"
23 #include "hal_li_api.h"
24 
25 uint32_t hal_get_reo_reg_base_offset_li(void)
26 {
27 	return SEQ_WCSS_UMAC_REO_REG_OFFSET;
28 }
29 
30 /**
31  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
32  *
33  * @hal_soc: Opaque HAL SOC handle
34  * @ba_window_size: BlockAck window size
35  * @start_seq: Starting sequence number
36  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
37  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
38  * @tid: TID
39  *
40  */
41 void hal_reo_qdesc_setup_li(hal_soc_handle_t hal_soc_hdl, int tid,
42 			    uint32_t ba_window_size,
43 			    uint32_t start_seq, void *hw_qdesc_vaddr,
44 			    qdf_dma_addr_t hw_qdesc_paddr,
45 			    int pn_type)
46 {
47 	uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
48 	uint32_t *reo_queue_ext_desc;
49 	uint32_t reg_val;
50 	uint32_t pn_enable;
51 	uint32_t pn_size = 0;
52 
53 	qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
54 
55 	hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
56 				   HAL_REO_QUEUE_DESC);
57 	/* Fixed pattern in reserved bits for debugging */
58 	HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER_0,
59 			   RESERVED_0A, 0xDDBEEF);
60 
61 	/* This a just a SW meta data and will be copied to REO destination
62 	 * descriptors indicated by hardware.
63 	 * TODO: Setting TID in this field. See if we should set something else.
64 	 */
65 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_1,
66 			   RECEIVE_QUEUE_NUMBER, tid);
67 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
68 			   VLD, 1);
69 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
70 			   ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
71 			   HAL_RX_LINK_DESC_CNTR);
72 
73 	/*
74 	 * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
75 	 */
76 
77 	reg_val = TID_TO_WME_AC(tid);
78 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, AC, reg_val);
79 
80 	if (ba_window_size < 1)
81 		ba_window_size = 1;
82 
83 	/* WAR to get 2k exception in Non BA case.
84 	 * Setting window size to 2 to get 2k jump exception
85 	 * when we receive aggregates in Non BA case
86 	 */
87 	ba_window_size = hal_update_non_ba_win_size(tid, ba_window_size);
88 
89 	/* Set RTY bit for non-BA case. Duplicate detection is currently not
90 	 * done by HW in non-BA case if RTY bit is not set.
91 	 * TODO: This is a temporary War and should be removed once HW fix is
92 	 * made to check and discard duplicates even if RTY bit is not set.
93 	 */
94 	if (ba_window_size == 1)
95 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, RTY, 1);
96 
97 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, BA_WINDOW_SIZE,
98 			   ba_window_size - 1);
99 
100 	switch (pn_type) {
101 	case HAL_PN_WPA:
102 		pn_enable = 1;
103 		pn_size = PN_SIZE_48;
104 		break;
105 	case HAL_PN_WAPI_EVEN:
106 	case HAL_PN_WAPI_UNEVEN:
107 		pn_enable = 1;
108 		pn_size = PN_SIZE_128;
109 		break;
110 	default:
111 		pn_enable = 0;
112 		break;
113 	}
114 
115 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_CHECK_NEEDED,
116 			   pn_enable);
117 
118 	if (pn_type == HAL_PN_WAPI_EVEN)
119 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
120 				   PN_SHALL_BE_EVEN, 1);
121 	else if (pn_type == HAL_PN_WAPI_UNEVEN)
122 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
123 				   PN_SHALL_BE_UNEVEN, 1);
124 
125 	/*
126 	 *  TODO: Need to check if PN handling in SW needs to be enabled
127 	 *  So far this is not a requirement
128 	 */
129 
130 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_SIZE,
131 			   pn_size);
132 
133 	/* TODO: Check if RX_REO_QUEUE_2_IGNORE_AMPDU_FLAG need to be set
134 	 * based on BA window size and/or AMPDU capabilities
135 	 */
136 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
137 			   IGNORE_AMPDU_FLAG, 1);
138 
139 	if (start_seq <= 0xfff)
140 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SSN,
141 				   start_seq);
142 
143 	/* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA,
144 	 * but REO is not delivering packets if we set it to 1. Need to enable
145 	 * this once the issue is resolved
146 	 */
147 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SVLD, 0);
148 
149 	/* TODO: Check if we should set start PN for WAPI */
150 
151 	/* TODO: HW queue descriptors are currently allocated for max BA
152 	 * window size for all QOS TIDs so that same descriptor can be used
153 	 * later when ADDBA request is recevied. This should be changed to
154 	 * allocate HW queue descriptors based on BA window size being
155 	 * negotiated (0 for non BA cases), and reallocate when BA window
156 	 * size changes and also send WMI message to FW to change the REO
157 	 * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
158 	 */
159 	if (tid == HAL_NON_QOS_TID)
160 		return;
161 
162 	reo_queue_ext_desc = (uint32_t *)
163 		(((struct rx_reo_queue *)reo_queue_desc) + 1);
164 	qdf_mem_zero(reo_queue_ext_desc, 3 *
165 		sizeof(struct rx_reo_queue_ext));
166 	/* Initialize first reo queue extension descriptor */
167 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
168 				   HAL_DESC_REO_OWNED,
169 				   HAL_REO_QUEUE_EXT_DESC);
170 	/* Fixed pattern in reserved bits for debugging */
171 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
172 			   UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A,
173 			   0xADBEEF);
174 	/* Initialize second reo queue extension descriptor */
175 	reo_queue_ext_desc = (uint32_t *)
176 		(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
177 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
178 				   HAL_DESC_REO_OWNED,
179 				   HAL_REO_QUEUE_EXT_DESC);
180 	/* Fixed pattern in reserved bits for debugging */
181 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
182 			   UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A,
183 			   0xBDBEEF);
184 	/* Initialize third reo queue extension descriptor */
185 	reo_queue_ext_desc = (uint32_t *)
186 		(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
187 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
188 				   HAL_DESC_REO_OWNED,
189 				   HAL_REO_QUEUE_EXT_DESC);
190 	/* Fixed pattern in reserved bits for debugging */
191 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
192 			   UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A,
193 				   0xCDBEEF);
194 }
195 
196 qdf_export_symbol(hal_reo_qdesc_setup_li);
197 
198 /**
199  * hal_get_ba_aging_timeout_li - Get BA Aging timeout
200  *
201  * @hal_soc: Opaque HAL SOC handle
202  * @ac: Access category
203  * @value: window size to get
204  */
205 void hal_get_ba_aging_timeout_li(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
206 				 uint32_t *value)
207 {
208 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
209 
210 	switch (ac) {
211 	case WME_AC_BE:
212 		*value = HAL_REG_READ(soc,
213 				      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
214 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
215 		break;
216 	case WME_AC_BK:
217 		*value = HAL_REG_READ(soc,
218 				      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
219 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
220 		break;
221 	case WME_AC_VI:
222 		*value = HAL_REG_READ(soc,
223 				      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
224 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
225 		break;
226 	case WME_AC_VO:
227 		*value = HAL_REG_READ(soc,
228 				      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
229 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
230 		break;
231 	default:
232 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
233 			  "Invalid AC: %d\n", ac);
234 	}
235 }
236 qdf_export_symbol(hal_get_ba_aging_timeout_li);
237 
238 /**
239  * hal_set_ba_aging_timeout_li - Set BA Aging timeout
240  *
241  * @hal_soc: Opaque HAL SOC handle
242  * @ac: Access category
243  * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice
244  * @value: Input value to set
245  */
246 void hal_set_ba_aging_timeout_li(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
247 				 uint32_t value)
248 {
249 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
250 
251 	switch (ac) {
252 	case WME_AC_BE:
253 		HAL_REG_WRITE(soc,
254 			      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
255 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
256 			      value * 1000);
257 		break;
258 	case WME_AC_BK:
259 		HAL_REG_WRITE(soc,
260 			      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
261 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
262 			      value * 1000);
263 		break;
264 	case WME_AC_VI:
265 		HAL_REG_WRITE(soc,
266 			      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
267 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
268 			      value * 1000);
269 		break;
270 	case WME_AC_VO:
271 		HAL_REG_WRITE(soc,
272 			      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
273 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
274 			      value * 1000);
275 		break;
276 	default:
277 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
278 			  "Invalid AC: %d\n", ac);
279 	}
280 }
281 qdf_export_symbol(hal_set_ba_aging_timeout_li);
282 
283 static inline void
284 hal_reo_cmd_set_descr_addr_li(uint32_t *reo_desc, enum hal_reo_cmd_type type,
285 			      uint32_t paddr_lo, uint8_t paddr_hi)
286 {
287 	switch (type) {
288 	case CMD_GET_QUEUE_STATS:
289 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
290 				   RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
291 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
292 				   RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
293 		break;
294 	case CMD_FLUSH_QUEUE:
295 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
296 				   FLUSH_DESC_ADDR_31_0, paddr_lo);
297 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
298 				   FLUSH_DESC_ADDR_39_32, paddr_hi);
299 		break;
300 	case CMD_FLUSH_CACHE:
301 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
302 				   FLUSH_ADDR_31_0, paddr_lo);
303 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
304 				   FLUSH_ADDR_39_32, paddr_hi);
305 		break;
306 	case CMD_UPDATE_RX_REO_QUEUE:
307 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
308 				   RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
309 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
310 				   RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
311 		break;
312 	default:
313 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
314 			  "%s: Invalid REO command type", __func__);
315 		break;
316 	}
317 }
318 
319 static inline int
320 hal_reo_cmd_queue_stats_li(hal_ring_handle_t  hal_ring_hdl,
321 			   hal_soc_handle_t hal_soc_hdl,
322 			   struct hal_reo_cmd_params *cmd)
323 {
324 	uint32_t *reo_desc, val;
325 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
326 
327 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
328 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
329 	if (!reo_desc) {
330 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
331 			  "%s: Out of cmd ring entries", __func__);
332 		hal_srng_access_end(hal_soc, hal_ring_hdl);
333 		return -EBUSY;
334 	}
335 
336 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
337 			sizeof(struct reo_get_queue_stats));
338 
339 	/*
340 	 * Offsets of descriptor fields defined in HW headers start from
341 	 * the field after TLV header
342 	 */
343 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
344 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
345 		     sizeof(struct reo_get_queue_stats) -
346 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
347 
348 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
349 			   REO_STATUS_REQUIRED, cmd->std.need_status);
350 
351 	hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_GET_QUEUE_STATS,
352 				      cmd->std.addr_lo,
353 				      cmd->std.addr_hi);
354 
355 	HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
356 			   cmd->u.stats_params.clear);
357 
358 	if (hif_pm_runtime_get(hal_soc->hif_handle,
359 			       RTPM_ID_HAL_REO_CMD, false) == 0) {
360 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
361 		hif_pm_runtime_put(hal_soc->hif_handle,
362 				   RTPM_ID_HAL_REO_CMD);
363 	} else {
364 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
365 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
366 		hal_srng_inc_flush_cnt(hal_ring_hdl);
367 	}
368 
369 	val = reo_desc[CMD_HEADER_DW_OFFSET];
370 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
371 			     val);
372 }
373 
374 static inline int
375 hal_reo_cmd_flush_queue_li(hal_ring_handle_t hal_ring_hdl,
376 			   hal_soc_handle_t hal_soc_hdl,
377 			   struct hal_reo_cmd_params *cmd)
378 {
379 	uint32_t *reo_desc, val;
380 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
381 
382 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
383 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
384 	if (!reo_desc) {
385 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
386 			  "%s: Out of cmd ring entries", __func__);
387 		hal_srng_access_end(hal_soc, hal_ring_hdl);
388 		return -EBUSY;
389 	}
390 
391 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
392 			sizeof(struct reo_flush_queue));
393 
394 	/*
395 	 * Offsets of descriptor fields defined in HW headers start from
396 	 * the field after TLV header
397 	 */
398 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
399 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
400 		     sizeof(struct reo_flush_queue) -
401 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
402 
403 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
404 			   REO_STATUS_REQUIRED, cmd->std.need_status);
405 
406 	hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_FLUSH_QUEUE,
407 				      cmd->std.addr_lo, cmd->std.addr_hi);
408 
409 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
410 			   BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
411 			   cmd->u.fl_queue_params.block_use_after_flush);
412 
413 	if (cmd->u.fl_queue_params.block_use_after_flush) {
414 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
415 				   BLOCK_RESOURCE_INDEX,
416 				   cmd->u.fl_queue_params.index);
417 	}
418 
419 	hal_srng_access_end(hal_soc, hal_ring_hdl);
420 	val = reo_desc[CMD_HEADER_DW_OFFSET];
421 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
422 			     val);
423 }
424 
425 static inline int
426 hal_reo_cmd_flush_cache_li(hal_ring_handle_t hal_ring_hdl,
427 			   hal_soc_handle_t hal_soc_hdl,
428 			   struct hal_reo_cmd_params *cmd)
429 {
430 	uint32_t *reo_desc, val;
431 	struct hal_reo_cmd_flush_cache_params *cp;
432 	uint8_t index = 0;
433 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
434 
435 	cp = &cmd->u.fl_cache_params;
436 
437 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
438 
439 	/* We need a cache block resource for this operation, and REO HW has
440 	 * only 4 such blocking resources. These resources are managed using
441 	 * reo_res_bitmap, and we return failure if none is available.
442 	 */
443 	if (cp->block_use_after_flush) {
444 		index = hal_find_zero_bit(hal_soc->reo_res_bitmap);
445 		if (index > 3) {
446 			qdf_print("No blocking resource available!");
447 			hal_srng_access_end(hal_soc, hal_ring_hdl);
448 			return -EBUSY;
449 		}
450 		hal_soc->index = index;
451 	}
452 
453 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
454 	if (!reo_desc) {
455 		hal_srng_access_end(hal_soc, hal_ring_hdl);
456 		hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl));
457 		return -EBUSY;
458 	}
459 
460 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
461 			sizeof(struct reo_flush_cache));
462 
463 	/*
464 	 * Offsets of descriptor fields defined in HW headers start from
465 	 * the field after TLV header
466 	 */
467 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
468 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
469 		     sizeof(struct reo_flush_cache) -
470 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
471 
472 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
473 			   REO_STATUS_REQUIRED, cmd->std.need_status);
474 
475 	hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_FLUSH_CACHE,
476 				      cmd->std.addr_lo, cmd->std.addr_hi);
477 
478 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
479 			   FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
480 
481 	/* set it to 0 for now */
482 	cp->rel_block_index = 0;
483 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
484 			   RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
485 
486 	if (cp->block_use_after_flush) {
487 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
488 				   CACHE_BLOCK_RESOURCE_INDEX, index);
489 	}
490 
491 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
492 			   FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
493 
494 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
495 			   BLOCK_CACHE_USAGE_AFTER_FLUSH,
496 			   cp->block_use_after_flush);
497 
498 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
499 			   cp->flush_entire_cache);
500 
501 	if (hif_pm_runtime_get(hal_soc->hif_handle,
502 			       RTPM_ID_HAL_REO_CMD, false) == 0) {
503 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
504 		hif_pm_runtime_put(hal_soc->hif_handle,
505 				   RTPM_ID_HAL_REO_CMD);
506 	} else {
507 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
508 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
509 		hal_srng_inc_flush_cnt(hal_ring_hdl);
510 	}
511 
512 	val = reo_desc[CMD_HEADER_DW_OFFSET];
513 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
514 			     val);
515 }
516 
517 static inline int
518 hal_reo_cmd_unblock_cache_li(hal_ring_handle_t hal_ring_hdl,
519 			     hal_soc_handle_t hal_soc_hdl,
520 			     struct hal_reo_cmd_params *cmd)
521 
522 {
523 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
524 	uint32_t *reo_desc, val;
525 	uint8_t index = 0;
526 
527 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
528 
529 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
530 		index = hal_find_one_bit(hal_soc->reo_res_bitmap);
531 		if (index > 3) {
532 			hal_srng_access_end(hal_soc, hal_ring_hdl);
533 			qdf_print("No blocking resource to unblock!");
534 			return -EBUSY;
535 		}
536 	}
537 
538 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
539 	if (!reo_desc) {
540 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
541 			  "%s: Out of cmd ring entries", __func__);
542 		hal_srng_access_end(hal_soc, hal_ring_hdl);
543 		return -EBUSY;
544 	}
545 
546 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
547 			sizeof(struct reo_unblock_cache));
548 
549 	/*
550 	 * Offsets of descriptor fields defined in HW headers start from
551 	 * the field after TLV header
552 	 */
553 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
554 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
555 		     sizeof(struct reo_unblock_cache) -
556 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
557 
558 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
559 			   REO_STATUS_REQUIRED, cmd->std.need_status);
560 
561 	HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
562 			   UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
563 
564 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
565 		HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
566 				   CACHE_BLOCK_RESOURCE_INDEX,
567 				   cmd->u.unblk_cache_params.index);
568 	}
569 
570 	hal_srng_access_end(hal_soc, hal_ring_hdl);
571 	val = reo_desc[CMD_HEADER_DW_OFFSET];
572 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
573 			     val);
574 }
575 
576 static inline int
577 hal_reo_cmd_flush_timeout_list_li(hal_ring_handle_t hal_ring_hdl,
578 				  hal_soc_handle_t hal_soc_hdl,
579 				  struct hal_reo_cmd_params *cmd)
580 {
581 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
582 	uint32_t *reo_desc, val;
583 
584 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
585 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
586 	if (!reo_desc) {
587 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
588 			  "%s: Out of cmd ring entries", __func__);
589 		hal_srng_access_end(hal_soc, hal_ring_hdl);
590 		return -EBUSY;
591 	}
592 
593 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
594 			sizeof(struct reo_flush_timeout_list));
595 
596 	/*
597 	 * Offsets of descriptor fields defined in HW headers start from
598 	 * the field after TLV header
599 	 */
600 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
601 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
602 		     sizeof(struct reo_flush_timeout_list) -
603 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
604 
605 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
606 			   REO_STATUS_REQUIRED, cmd->std.need_status);
607 
608 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
609 			   cmd->u.fl_tim_list_params.ac_list);
610 
611 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
612 			   MINIMUM_RELEASE_DESC_COUNT,
613 			   cmd->u.fl_tim_list_params.min_rel_desc);
614 
615 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
616 			   MINIMUM_FORWARD_BUF_COUNT,
617 			   cmd->u.fl_tim_list_params.min_fwd_buf);
618 
619 	hal_srng_access_end(hal_soc, hal_ring_hdl);
620 	val = reo_desc[CMD_HEADER_DW_OFFSET];
621 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
622 			     val);
623 }
624 
625 static inline int
626 hal_reo_cmd_update_rx_queue_li(hal_ring_handle_t hal_ring_hdl,
627 			       hal_soc_handle_t hal_soc_hdl,
628 			       struct hal_reo_cmd_params *cmd)
629 {
630 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
631 	uint32_t *reo_desc, val;
632 	struct hal_reo_cmd_update_queue_params *p;
633 
634 	p = &cmd->u.upd_queue_params;
635 
636 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
637 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
638 	if (!reo_desc) {
639 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
640 			  "%s: Out of cmd ring entries", __func__);
641 		hal_srng_access_end(hal_soc, hal_ring_hdl);
642 		return -EBUSY;
643 	}
644 
645 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
646 			sizeof(struct reo_update_rx_reo_queue));
647 
648 	/*
649 	 * Offsets of descriptor fields defined in HW headers start from
650 	 * the field after TLV header
651 	 */
652 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
653 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
654 		     sizeof(struct reo_update_rx_reo_queue) -
655 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
656 
657 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
658 			   REO_STATUS_REQUIRED, cmd->std.need_status);
659 
660 	hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
661 				      cmd->std.addr_lo, cmd->std.addr_hi);
662 
663 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
664 			   UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
665 
666 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
667 			   p->update_vld);
668 
669 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
670 			   UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
671 			   p->update_assoc_link_desc);
672 
673 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
674 			   UPDATE_DISABLE_DUPLICATE_DETECTION,
675 			   p->update_disable_dup_detect);
676 
677 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
678 			   UPDATE_DISABLE_DUPLICATE_DETECTION,
679 			   p->update_disable_dup_detect);
680 
681 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
682 			   UPDATE_SOFT_REORDER_ENABLE,
683 			   p->update_soft_reorder_enab);
684 
685 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
686 			   UPDATE_AC, p->update_ac);
687 
688 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
689 			   UPDATE_BAR, p->update_bar);
690 
691 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
692 			   UPDATE_BAR, p->update_bar);
693 
694 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
695 			   UPDATE_RTY, p->update_rty);
696 
697 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
698 			   UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
699 
700 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
701 			   UPDATE_OOR_MODE, p->update_oor_mode);
702 
703 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
704 			   UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
705 
706 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
707 			   UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
708 
709 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
710 			   UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
711 
712 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
713 			   UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
714 
715 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
716 			   UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
717 
718 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
719 			   UPDATE_PN_SIZE, p->update_pn_size);
720 
721 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
722 			   UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
723 
724 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
725 			   UPDATE_SVLD, p->update_svld);
726 
727 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
728 			   UPDATE_SSN, p->update_ssn);
729 
730 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
731 			   UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
732 			   p->update_seq_2k_err_detect);
733 
734 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
735 			   UPDATE_PN_VALID, p->update_pn_valid);
736 
737 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
738 			   UPDATE_PN, p->update_pn);
739 
740 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
741 			   RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
742 
743 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
744 			   VLD, p->vld);
745 
746 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
747 			   ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
748 			   p->assoc_link_desc);
749 
750 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
751 			   DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
752 
753 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
754 			   SOFT_REORDER_ENABLE, p->soft_reorder_enab);
755 
756 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
757 
758 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
759 			   BAR, p->bar);
760 
761 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
762 			   CHK_2K_MODE, p->chk_2k_mode);
763 
764 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
765 			   RTY, p->rty);
766 
767 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
768 			   OOR_MODE, p->oor_mode);
769 
770 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
771 			   PN_CHECK_NEEDED, p->pn_check_needed);
772 
773 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
774 			   PN_SHALL_BE_EVEN, p->pn_even);
775 
776 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
777 			   PN_SHALL_BE_UNEVEN, p->pn_uneven);
778 
779 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
780 			   PN_HANDLING_ENABLE, p->pn_hand_enab);
781 
782 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
783 			   IGNORE_AMPDU_FLAG, p->ignore_ampdu);
784 
785 	if (p->ba_window_size < 1)
786 		p->ba_window_size = 1;
787 	/*
788 	 * WAR to get 2k exception in Non BA case.
789 	 * Setting window size to 2 to get 2k jump exception
790 	 * when we receive aggregates in Non BA case
791 	 */
792 	if (p->ba_window_size == 1)
793 		p->ba_window_size++;
794 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
795 			   BA_WINDOW_SIZE, p->ba_window_size - 1);
796 
797 	if (p->pn_size == 24)
798 		p->pn_size = PN_SIZE_24;
799 	else if (p->pn_size == 48)
800 		p->pn_size = PN_SIZE_48;
801 	else if (p->pn_size == 128)
802 		p->pn_size = PN_SIZE_128;
803 
804 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
805 			   PN_SIZE, p->pn_size);
806 
807 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
808 			   SVLD, p->svld);
809 
810 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
811 			   SSN, p->ssn);
812 
813 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
814 			   SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
815 
816 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
817 			   PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
818 
819 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
820 			   PN_31_0, p->pn_31_0);
821 
822 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
823 			   PN_63_32, p->pn_63_32);
824 
825 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
826 			   PN_95_64, p->pn_95_64);
827 
828 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
829 			   PN_127_96, p->pn_127_96);
830 
831 	if (hif_pm_runtime_get(hal_soc->hif_handle,
832 			       RTPM_ID_HAL_REO_CMD, false) == 0) {
833 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
834 		hif_pm_runtime_put(hal_soc->hif_handle,
835 				   RTPM_ID_HAL_REO_CMD);
836 	} else {
837 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
838 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
839 		hal_srng_inc_flush_cnt(hal_ring_hdl);
840 	}
841 
842 	val = reo_desc[CMD_HEADER_DW_OFFSET];
843 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
844 			     val);
845 }
846 
847 int hal_reo_send_cmd_li(hal_soc_handle_t hal_soc_hdl,
848 			hal_ring_handle_t  hal_ring_hdl,
849 			enum hal_reo_cmd_type cmd,
850 			void *params)
851 {
852 	struct hal_reo_cmd_params *cmd_params =
853 			(struct hal_reo_cmd_params *)params;
854 	int num = 0;
855 
856 	switch (cmd) {
857 	case CMD_GET_QUEUE_STATS:
858 		num = hal_reo_cmd_queue_stats_li(hal_ring_hdl,
859 						 hal_soc_hdl, cmd_params);
860 		break;
861 	case CMD_FLUSH_QUEUE:
862 		num = hal_reo_cmd_flush_queue_li(hal_ring_hdl,
863 						 hal_soc_hdl, cmd_params);
864 		break;
865 	case CMD_FLUSH_CACHE:
866 		num = hal_reo_cmd_flush_cache_li(hal_ring_hdl,
867 						 hal_soc_hdl, cmd_params);
868 		break;
869 	case CMD_UNBLOCK_CACHE:
870 		num = hal_reo_cmd_unblock_cache_li(hal_ring_hdl,
871 						   hal_soc_hdl, cmd_params);
872 		break;
873 	case CMD_FLUSH_TIMEOUT_LIST:
874 		num = hal_reo_cmd_flush_timeout_list_li(hal_ring_hdl,
875 							hal_soc_hdl,
876 							cmd_params);
877 		break;
878 	case CMD_UPDATE_RX_REO_QUEUE:
879 		num = hal_reo_cmd_update_rx_queue_li(hal_ring_hdl,
880 						     hal_soc_hdl, cmd_params);
881 		break;
882 	default:
883 		hal_err("Invalid REO command type: %d", cmd);
884 		return -EINVAL;
885 	};
886 
887 	return num;
888 }
889 
890 void
891 hal_reo_queue_stats_status_li(hal_ring_desc_t ring_desc,
892 			      void *st_handle,
893 			      hal_soc_handle_t hal_soc_hdl)
894 {
895 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
896 	struct hal_reo_queue_status *st =
897 		(struct hal_reo_queue_status *)st_handle;
898 	uint32_t *reo_desc = (uint32_t *)ring_desc;
899 	uint32_t val;
900 
901 	/*
902 	 * Offsets of descriptor fields defined in HW headers start
903 	 * from the field after TLV header
904 	 */
905 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
906 
907 	/* header */
908 	hal_reo_status_get_header(ring_desc, HAL_REO_QUEUE_STATS_STATUS_TLV,
909 				  &(st->header), hal_soc);
910 
911 	/* SSN */
912 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
913 	st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
914 
915 	/* current index */
916 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
917 					 CURRENT_INDEX)];
918 	st->curr_idx =
919 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
920 			      CURRENT_INDEX, val);
921 
922 	/* PN bits */
923 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
924 					 PN_31_0)];
925 	st->pn_31_0 =
926 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
927 			      PN_31_0, val);
928 
929 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
930 					 PN_63_32)];
931 	st->pn_63_32 =
932 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
933 			      PN_63_32, val);
934 
935 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
936 					 PN_95_64)];
937 	st->pn_95_64 =
938 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
939 			      PN_95_64, val);
940 
941 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
942 					 PN_127_96)];
943 	st->pn_127_96 =
944 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
945 			      PN_127_96, val);
946 
947 	/* timestamps */
948 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
949 					 LAST_RX_ENQUEUE_TIMESTAMP)];
950 	st->last_rx_enq_tstamp =
951 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
952 			      LAST_RX_ENQUEUE_TIMESTAMP, val);
953 
954 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
955 					 LAST_RX_DEQUEUE_TIMESTAMP)];
956 	st->last_rx_deq_tstamp =
957 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
958 			      LAST_RX_DEQUEUE_TIMESTAMP, val);
959 
960 	/* rx bitmap */
961 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
962 					 RX_BITMAP_31_0)];
963 	st->rx_bitmap_31_0 =
964 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
965 			      RX_BITMAP_31_0, val);
966 
967 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
968 					 RX_BITMAP_63_32)];
969 	st->rx_bitmap_63_32 =
970 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
971 			      RX_BITMAP_63_32, val);
972 
973 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
974 					 RX_BITMAP_95_64)];
975 	st->rx_bitmap_95_64 =
976 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
977 			      RX_BITMAP_95_64, val);
978 
979 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
980 					 RX_BITMAP_127_96)];
981 	st->rx_bitmap_127_96 =
982 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
983 			      RX_BITMAP_127_96, val);
984 
985 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
986 					 RX_BITMAP_159_128)];
987 	st->rx_bitmap_159_128 =
988 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
989 			      RX_BITMAP_159_128, val);
990 
991 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
992 					 RX_BITMAP_191_160)];
993 	st->rx_bitmap_191_160 =
994 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
995 			      RX_BITMAP_191_160, val);
996 
997 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
998 					 RX_BITMAP_223_192)];
999 	st->rx_bitmap_223_192 =
1000 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
1001 			      RX_BITMAP_223_192, val);
1002 
1003 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
1004 					 RX_BITMAP_255_224)];
1005 	st->rx_bitmap_255_224 =
1006 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
1007 			      RX_BITMAP_255_224, val);
1008 
1009 	/* various counts */
1010 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
1011 					 CURRENT_MPDU_COUNT)];
1012 	st->curr_mpdu_cnt =
1013 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
1014 			      CURRENT_MPDU_COUNT, val);
1015 
1016 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
1017 					 CURRENT_MSDU_COUNT)];
1018 	st->curr_msdu_cnt =
1019 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
1020 			      CURRENT_MSDU_COUNT, val);
1021 
1022 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1023 					 TIMEOUT_COUNT)];
1024 	st->fwd_timeout_cnt =
1025 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1026 			      TIMEOUT_COUNT, val);
1027 
1028 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1029 					 FORWARD_DUE_TO_BAR_COUNT)];
1030 	st->fwd_bar_cnt =
1031 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1032 			      FORWARD_DUE_TO_BAR_COUNT, val);
1033 
1034 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1035 					 DUPLICATE_COUNT)];
1036 	st->dup_cnt =
1037 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1038 			      DUPLICATE_COUNT, val);
1039 
1040 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1041 					 FRAMES_IN_ORDER_COUNT)];
1042 	st->frms_in_order_cnt =
1043 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1044 			      FRAMES_IN_ORDER_COUNT, val);
1045 
1046 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1047 					 BAR_RECEIVED_COUNT)];
1048 	st->bar_rcvd_cnt =
1049 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1050 			      BAR_RECEIVED_COUNT, val);
1051 
1052 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
1053 					 MPDU_FRAMES_PROCESSED_COUNT)];
1054 	st->mpdu_frms_cnt =
1055 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
1056 			      MPDU_FRAMES_PROCESSED_COUNT, val);
1057 
1058 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
1059 					 MSDU_FRAMES_PROCESSED_COUNT)];
1060 	st->msdu_frms_cnt =
1061 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
1062 			      MSDU_FRAMES_PROCESSED_COUNT, val);
1063 
1064 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
1065 					 TOTAL_PROCESSED_BYTE_COUNT)];
1066 	st->total_cnt =
1067 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
1068 			      TOTAL_PROCESSED_BYTE_COUNT, val);
1069 
1070 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1071 					 LATE_RECEIVE_MPDU_COUNT)];
1072 	st->late_recv_mpdu_cnt =
1073 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1074 			      LATE_RECEIVE_MPDU_COUNT, val);
1075 
1076 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1077 					 WINDOW_JUMP_2K)];
1078 	st->win_jump_2k =
1079 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1080 			      WINDOW_JUMP_2K, val);
1081 
1082 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1083 					 HOLE_COUNT)];
1084 	st->hole_cnt =
1085 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1086 			      HOLE_COUNT, val);
1087 }
1088 
1089 void
1090 hal_reo_flush_queue_status_li(hal_ring_desc_t ring_desc,
1091 			      void *st_handle,
1092 			      hal_soc_handle_t hal_soc_hdl)
1093 {
1094 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1095 	struct hal_reo_flush_queue_status *st =
1096 			(struct hal_reo_flush_queue_status *)st_handle;
1097 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1098 	uint32_t val;
1099 
1100 	/*
1101 	 * Offsets of descriptor fields defined in HW headers start
1102 	 * from the field after TLV header
1103 	 */
1104 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1105 
1106 	/* header */
1107 	hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV,
1108 				  &(st->header), hal_soc);
1109 
1110 	/* error bit */
1111 	val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
1112 					 ERROR_DETECTED)];
1113 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1114 				  val);
1115 }
1116 
1117 void
1118 hal_reo_flush_cache_status_li(hal_ring_desc_t ring_desc,
1119 			      void *st_handle,
1120 			      hal_soc_handle_t hal_soc_hdl)
1121 {
1122 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1123 	struct hal_reo_flush_cache_status *st =
1124 			(struct hal_reo_flush_cache_status *)st_handle;
1125 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1126 	uint32_t val;
1127 
1128 	/*
1129 	 * Offsets of descriptor fields defined in HW headers start
1130 	 * from the field after TLV header
1131 	 */
1132 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1133 
1134 	/* header */
1135 	hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV,
1136 				  &(st->header), hal_soc);
1137 
1138 	/* error bit */
1139 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1140 					 ERROR_DETECTED)];
1141 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1142 				  val);
1143 
1144 	/* block error */
1145 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1146 					 BLOCK_ERROR_DETAILS)];
1147 	st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1148 					BLOCK_ERROR_DETAILS,
1149 					val);
1150 	if (!st->block_error)
1151 		qdf_set_bit(hal_soc->index,
1152 			    (unsigned long *)&hal_soc->reo_res_bitmap);
1153 
1154 	/* cache flush status */
1155 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1156 				     CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
1157 	st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1158 					CACHE_CONTROLLER_FLUSH_STATUS_HIT,
1159 					val);
1160 
1161 	/* cache flush descriptor type */
1162 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1163 				  CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
1164 	st->cache_flush_status_desc_type =
1165 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1166 			      CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
1167 			      val);
1168 
1169 	/* cache flush count */
1170 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1171 				  CACHE_CONTROLLER_FLUSH_COUNT)];
1172 	st->cache_flush_cnt =
1173 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1174 			      CACHE_CONTROLLER_FLUSH_COUNT,
1175 			      val);
1176 }
1177 
1178 void
1179 hal_reo_unblock_cache_status_li(hal_ring_desc_t ring_desc,
1180 				hal_soc_handle_t hal_soc_hdl,
1181 				void *st_handle)
1182 {
1183 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1184 	struct hal_reo_unblk_cache_status *st =
1185 			(struct hal_reo_unblk_cache_status *)st_handle;
1186 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1187 	uint32_t val;
1188 
1189 	/*
1190 	 * Offsets of descriptor fields defined in HW headers start
1191 	 * from the field after TLV header
1192 	 */
1193 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1194 
1195 	/* header */
1196 	hal_reo_status_get_header(ring_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV,
1197 				  &st->header, hal_soc);
1198 
1199 	/* error bit */
1200 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1201 				  ERROR_DETECTED)];
1202 	st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1203 				  ERROR_DETECTED,
1204 				  val);
1205 
1206 	/* unblock type */
1207 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1208 				  UNBLOCK_TYPE)];
1209 	st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1210 					 UNBLOCK_TYPE,
1211 					 val);
1212 
1213 	if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
1214 		qdf_clear_bit(hal_soc->index,
1215 			      (unsigned long *)&hal_soc->reo_res_bitmap);
1216 }
1217 
1218 void hal_reo_flush_timeout_list_status_li(hal_ring_desc_t ring_desc,
1219 					  void *st_handle,
1220 					  hal_soc_handle_t hal_soc_hdl)
1221 {
1222 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1223 	struct hal_reo_flush_timeout_list_status *st =
1224 			(struct hal_reo_flush_timeout_list_status *)st_handle;
1225 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1226 	uint32_t val;
1227 
1228 	/*
1229 	 * Offsets of descriptor fields defined in HW headers start
1230 	 * from the field after TLV header
1231 	 */
1232 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1233 
1234 	/* header */
1235 	hal_reo_status_get_header(ring_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV,
1236 				  &(st->header), hal_soc);
1237 
1238 	/* error bit */
1239 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1240 					 ERROR_DETECTED)];
1241 	st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1242 				  ERROR_DETECTED,
1243 				  val);
1244 
1245 	/* list empty */
1246 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1247 					 TIMOUT_LIST_EMPTY)];
1248 	st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1249 				       TIMOUT_LIST_EMPTY,
1250 				       val);
1251 
1252 	/* release descriptor count */
1253 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1254 					 RELEASE_DESC_COUNT)];
1255 	st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1256 					 RELEASE_DESC_COUNT,
1257 					 val);
1258 
1259 	/* forward buf count */
1260 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1261 				     FORWARD_BUF_COUNT)];
1262 	st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1263 					FORWARD_BUF_COUNT,
1264 					val);
1265 }
1266 
1267 void hal_reo_desc_thres_reached_status_li(hal_ring_desc_t ring_desc,
1268 					  void *st_handle,
1269 					  hal_soc_handle_t hal_soc_hdl)
1270 {
1271 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1272 	struct hal_reo_desc_thres_reached_status *st =
1273 			(struct hal_reo_desc_thres_reached_status *)st_handle;
1274 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1275 	uint32_t val;
1276 
1277 	/*
1278 	 * Offsets of descriptor fields defined in HW headers start
1279 	 * from the field after TLV header
1280 	 */
1281 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1282 
1283 	/* header */
1284 	hal_reo_status_get_header(ring_desc,
1285 				  HAL_REO_DESC_THRES_STATUS_TLV,
1286 				  &(st->header), hal_soc);
1287 
1288 	/* threshold index */
1289 	val = reo_desc[HAL_OFFSET_DW(
1290 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1291 				 THRESHOLD_INDEX)];
1292 	st->thres_index = HAL_GET_FIELD(
1293 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1294 				THRESHOLD_INDEX,
1295 				val);
1296 
1297 	/* link desc counters */
1298 	val = reo_desc[HAL_OFFSET_DW(
1299 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1300 				 LINK_DESCRIPTOR_COUNTER0)];
1301 	st->link_desc_counter0 = HAL_GET_FIELD(
1302 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1303 				LINK_DESCRIPTOR_COUNTER0,
1304 				val);
1305 
1306 	val = reo_desc[HAL_OFFSET_DW(
1307 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1308 				 LINK_DESCRIPTOR_COUNTER1)];
1309 	st->link_desc_counter1 = HAL_GET_FIELD(
1310 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1311 				LINK_DESCRIPTOR_COUNTER1,
1312 				val);
1313 
1314 	val = reo_desc[HAL_OFFSET_DW(
1315 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1316 				 LINK_DESCRIPTOR_COUNTER2)];
1317 	st->link_desc_counter2 = HAL_GET_FIELD(
1318 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1319 				LINK_DESCRIPTOR_COUNTER2,
1320 				val);
1321 
1322 	val = reo_desc[HAL_OFFSET_DW(
1323 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1324 				 LINK_DESCRIPTOR_COUNTER_SUM)];
1325 	st->link_desc_counter_sum = HAL_GET_FIELD(
1326 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1327 				LINK_DESCRIPTOR_COUNTER_SUM,
1328 				val);
1329 }
1330 
1331 void
1332 hal_reo_rx_update_queue_status_li(hal_ring_desc_t ring_desc,
1333 				  void *st_handle,
1334 				  hal_soc_handle_t hal_soc_hdl)
1335 {
1336 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1337 	struct hal_reo_update_rx_queue_status *st =
1338 			(struct hal_reo_update_rx_queue_status *)st_handle;
1339 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1340 
1341 	/*
1342 	 * Offsets of descriptor fields defined in HW headers start
1343 	 * from the field after TLV header
1344 	 */
1345 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1346 
1347 	/* header */
1348 	hal_reo_status_get_header(ring_desc,
1349 				  HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV,
1350 				  &(st->header), hal_soc);
1351 }
1352 
1353 uint8_t hal_get_tlv_hdr_size_li(void)
1354 {
1355 	return sizeof(struct tlv_32_hdr);
1356 }
1357