xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/li/hal_li_reo.c (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "qdf_module.h"
21 #include "hal_li_hw_headers.h"
22 #include "hal_reo.h"
23 #include "hal_li_reo.h"
24 #include "hal_li_api.h"
25 
26 uint32_t hal_get_reo_reg_base_offset_li(void)
27 {
28 	return SEQ_WCSS_UMAC_REO_REG_OFFSET;
29 }
30 
31 /**
32  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
33  *
34  * @hal_soc: Opaque HAL SOC handle
35  * @ba_window_size: BlockAck window size
36  * @start_seq: Starting sequence number
37  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
38  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
39  * @tid: TID
40  *
41  */
42 void hal_reo_qdesc_setup_li(hal_soc_handle_t hal_soc_hdl, int tid,
43 			    uint32_t ba_window_size,
44 			    uint32_t start_seq, void *hw_qdesc_vaddr,
45 			    qdf_dma_addr_t hw_qdesc_paddr,
46 			    int pn_type, uint8_t vdev_stats_id)
47 {
48 	uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
49 	uint32_t *reo_queue_ext_desc;
50 	uint32_t reg_val;
51 	uint32_t pn_enable;
52 	uint32_t pn_size = 0;
53 
54 	qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
55 
56 	hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
57 				   HAL_REO_QUEUE_DESC);
58 	/* Fixed pattern in reserved bits for debugging */
59 	HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER_0,
60 			   RESERVED_0A, 0xDDBEEF);
61 
62 	/* This a just a SW meta data and will be copied to REO destination
63 	 * descriptors indicated by hardware.
64 	 * TODO: Setting TID in this field. See if we should set something else.
65 	 */
66 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_1,
67 			   RECEIVE_QUEUE_NUMBER, tid);
68 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
69 			   VLD, 1);
70 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
71 			   ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
72 			   HAL_RX_LINK_DESC_CNTR);
73 
74 	/*
75 	 * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
76 	 */
77 
78 	reg_val = TID_TO_WME_AC(tid);
79 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, AC, reg_val);
80 
81 	if (ba_window_size < 1)
82 		ba_window_size = 1;
83 
84 	/* WAR to get 2k exception in Non BA case.
85 	 * Setting window size to 2 to get 2k jump exception
86 	 * when we receive aggregates in Non BA case
87 	 */
88 	ba_window_size = hal_update_non_ba_win_size(tid, ba_window_size);
89 
90 	/* Set RTY bit for non-BA case. Duplicate detection is currently not
91 	 * done by HW in non-BA case if RTY bit is not set.
92 	 * TODO: This is a temporary War and should be removed once HW fix is
93 	 * made to check and discard duplicates even if RTY bit is not set.
94 	 */
95 	if (ba_window_size == 1)
96 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, RTY, 1);
97 
98 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, BA_WINDOW_SIZE,
99 			   ba_window_size - 1);
100 
101 	switch (pn_type) {
102 	case HAL_PN_WPA:
103 		pn_enable = 1;
104 		pn_size = PN_SIZE_48;
105 		break;
106 	case HAL_PN_WAPI_EVEN:
107 	case HAL_PN_WAPI_UNEVEN:
108 		pn_enable = 1;
109 		pn_size = PN_SIZE_128;
110 		break;
111 	default:
112 		pn_enable = 0;
113 		break;
114 	}
115 
116 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_CHECK_NEEDED,
117 			   pn_enable);
118 
119 	if (pn_type == HAL_PN_WAPI_EVEN)
120 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
121 				   PN_SHALL_BE_EVEN, 1);
122 	else if (pn_type == HAL_PN_WAPI_UNEVEN)
123 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
124 				   PN_SHALL_BE_UNEVEN, 1);
125 
126 	/*
127 	 *  TODO: Need to check if PN handling in SW needs to be enabled
128 	 *  So far this is not a requirement
129 	 */
130 
131 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_SIZE,
132 			   pn_size);
133 
134 	/* TODO: Check if RX_REO_QUEUE_2_IGNORE_AMPDU_FLAG need to be set
135 	 * based on BA window size and/or AMPDU capabilities
136 	 */
137 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
138 			   IGNORE_AMPDU_FLAG, 1);
139 
140 	if (start_seq <= 0xfff)
141 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SSN,
142 				   start_seq);
143 
144 	/* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA,
145 	 * but REO is not delivering packets if we set it to 1. Need to enable
146 	 * this once the issue is resolved
147 	 */
148 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SVLD, 0);
149 
150 	/* TODO: Check if we should set start PN for WAPI */
151 
152 	/* TODO: HW queue descriptors are currently allocated for max BA
153 	 * window size for all QOS TIDs so that same descriptor can be used
154 	 * later when ADDBA request is recevied. This should be changed to
155 	 * allocate HW queue descriptors based on BA window size being
156 	 * negotiated (0 for non BA cases), and reallocate when BA window
157 	 * size changes and also send WMI message to FW to change the REO
158 	 * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
159 	 */
160 	if (tid == HAL_NON_QOS_TID)
161 		return;
162 
163 	reo_queue_ext_desc = (uint32_t *)
164 		(((struct rx_reo_queue *)reo_queue_desc) + 1);
165 	qdf_mem_zero(reo_queue_ext_desc, 3 *
166 		sizeof(struct rx_reo_queue_ext));
167 	/* Initialize first reo queue extension descriptor */
168 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
169 				   HAL_DESC_REO_OWNED,
170 				   HAL_REO_QUEUE_EXT_DESC);
171 	/* Fixed pattern in reserved bits for debugging */
172 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
173 			   UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A,
174 			   0xADBEEF);
175 	/* Initialize second reo queue extension descriptor */
176 	reo_queue_ext_desc = (uint32_t *)
177 		(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
178 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
179 				   HAL_DESC_REO_OWNED,
180 				   HAL_REO_QUEUE_EXT_DESC);
181 	/* Fixed pattern in reserved bits for debugging */
182 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
183 			   UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A,
184 			   0xBDBEEF);
185 	/* Initialize third reo queue extension descriptor */
186 	reo_queue_ext_desc = (uint32_t *)
187 		(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
188 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
189 				   HAL_DESC_REO_OWNED,
190 				   HAL_REO_QUEUE_EXT_DESC);
191 	/* Fixed pattern in reserved bits for debugging */
192 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
193 			   UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A,
194 				   0xCDBEEF);
195 }
196 
197 qdf_export_symbol(hal_reo_qdesc_setup_li);
198 
199 /**
200  * hal_get_ba_aging_timeout_li - Get BA Aging timeout
201  *
202  * @hal_soc: Opaque HAL SOC handle
203  * @ac: Access category
204  * @value: window size to get
205  */
206 void hal_get_ba_aging_timeout_li(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
207 				 uint32_t *value)
208 {
209 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
210 
211 	switch (ac) {
212 	case WME_AC_BE:
213 		*value = HAL_REG_READ(soc,
214 				      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
215 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
216 		break;
217 	case WME_AC_BK:
218 		*value = HAL_REG_READ(soc,
219 				      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
220 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
221 		break;
222 	case WME_AC_VI:
223 		*value = HAL_REG_READ(soc,
224 				      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
225 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
226 		break;
227 	case WME_AC_VO:
228 		*value = HAL_REG_READ(soc,
229 				      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
230 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
231 		break;
232 	default:
233 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
234 			  "Invalid AC: %d\n", ac);
235 	}
236 }
237 qdf_export_symbol(hal_get_ba_aging_timeout_li);
238 
239 /**
240  * hal_set_ba_aging_timeout_li - Set BA Aging timeout
241  *
242  * @hal_soc: Opaque HAL SOC handle
243  * @ac: Access category
244  * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice
245  * @value: Input value to set
246  */
247 void hal_set_ba_aging_timeout_li(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
248 				 uint32_t value)
249 {
250 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
251 
252 	switch (ac) {
253 	case WME_AC_BE:
254 		HAL_REG_WRITE(soc,
255 			      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
256 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
257 			      value * 1000);
258 		break;
259 	case WME_AC_BK:
260 		HAL_REG_WRITE(soc,
261 			      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
262 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
263 			      value * 1000);
264 		break;
265 	case WME_AC_VI:
266 		HAL_REG_WRITE(soc,
267 			      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
268 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
269 			      value * 1000);
270 		break;
271 	case WME_AC_VO:
272 		HAL_REG_WRITE(soc,
273 			      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
274 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
275 			      value * 1000);
276 		break;
277 	default:
278 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
279 			  "Invalid AC: %d\n", ac);
280 	}
281 }
282 qdf_export_symbol(hal_set_ba_aging_timeout_li);
283 
284 static inline void
285 hal_reo_cmd_set_descr_addr_li(uint32_t *reo_desc, enum hal_reo_cmd_type type,
286 			      uint32_t paddr_lo, uint8_t paddr_hi)
287 {
288 	switch (type) {
289 	case CMD_GET_QUEUE_STATS:
290 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
291 				   RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
292 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
293 				   RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
294 		break;
295 	case CMD_FLUSH_QUEUE:
296 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
297 				   FLUSH_DESC_ADDR_31_0, paddr_lo);
298 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
299 				   FLUSH_DESC_ADDR_39_32, paddr_hi);
300 		break;
301 	case CMD_FLUSH_CACHE:
302 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
303 				   FLUSH_ADDR_31_0, paddr_lo);
304 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
305 				   FLUSH_ADDR_39_32, paddr_hi);
306 		break;
307 	case CMD_UPDATE_RX_REO_QUEUE:
308 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
309 				   RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
310 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
311 				   RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
312 		break;
313 	default:
314 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
315 			  "%s: Invalid REO command type", __func__);
316 		break;
317 	}
318 }
319 
320 static inline int
321 hal_reo_cmd_queue_stats_li(hal_ring_handle_t  hal_ring_hdl,
322 			   hal_soc_handle_t hal_soc_hdl,
323 			   struct hal_reo_cmd_params *cmd)
324 {
325 	uint32_t *reo_desc, val;
326 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
327 
328 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
329 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
330 	if (!reo_desc) {
331 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
332 			  "%s: Out of cmd ring entries", __func__);
333 		hal_srng_access_end(hal_soc, hal_ring_hdl);
334 		return -EBUSY;
335 	}
336 
337 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
338 			sizeof(struct reo_get_queue_stats));
339 
340 	/*
341 	 * Offsets of descriptor fields defined in HW headers start from
342 	 * the field after TLV header
343 	 */
344 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
345 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
346 		     sizeof(struct reo_get_queue_stats) -
347 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
348 
349 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
350 			   REO_STATUS_REQUIRED, cmd->std.need_status);
351 
352 	hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_GET_QUEUE_STATS,
353 				      cmd->std.addr_lo,
354 				      cmd->std.addr_hi);
355 
356 	HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
357 			   cmd->u.stats_params.clear);
358 
359 	if (hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_HAL_REO_CMD) == 0) {
360 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
361 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_HAL_REO_CMD);
362 	} else {
363 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
364 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
365 		hal_srng_inc_flush_cnt(hal_ring_hdl);
366 	}
367 
368 	val = reo_desc[CMD_HEADER_DW_OFFSET];
369 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
370 			     val);
371 }
372 
373 static inline int
374 hal_reo_cmd_flush_queue_li(hal_ring_handle_t hal_ring_hdl,
375 			   hal_soc_handle_t hal_soc_hdl,
376 			   struct hal_reo_cmd_params *cmd)
377 {
378 	uint32_t *reo_desc, val;
379 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
380 
381 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
382 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
383 	if (!reo_desc) {
384 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
385 			  "%s: Out of cmd ring entries", __func__);
386 		hal_srng_access_end(hal_soc, hal_ring_hdl);
387 		return -EBUSY;
388 	}
389 
390 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
391 			sizeof(struct reo_flush_queue));
392 
393 	/*
394 	 * Offsets of descriptor fields defined in HW headers start from
395 	 * the field after TLV header
396 	 */
397 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
398 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
399 		     sizeof(struct reo_flush_queue) -
400 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
401 
402 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
403 			   REO_STATUS_REQUIRED, cmd->std.need_status);
404 
405 	hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_FLUSH_QUEUE,
406 				      cmd->std.addr_lo, cmd->std.addr_hi);
407 
408 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
409 			   BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
410 			   cmd->u.fl_queue_params.block_use_after_flush);
411 
412 	if (cmd->u.fl_queue_params.block_use_after_flush) {
413 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
414 				   BLOCK_RESOURCE_INDEX,
415 				   cmd->u.fl_queue_params.index);
416 	}
417 
418 	hal_srng_access_end(hal_soc, hal_ring_hdl);
419 	val = reo_desc[CMD_HEADER_DW_OFFSET];
420 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
421 			     val);
422 }
423 
424 static inline int
425 hal_reo_cmd_flush_cache_li(hal_ring_handle_t hal_ring_hdl,
426 			   hal_soc_handle_t hal_soc_hdl,
427 			   struct hal_reo_cmd_params *cmd)
428 {
429 	uint32_t *reo_desc, val;
430 	struct hal_reo_cmd_flush_cache_params *cp;
431 	uint8_t index = 0;
432 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
433 
434 	cp = &cmd->u.fl_cache_params;
435 
436 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
437 
438 	/* We need a cache block resource for this operation, and REO HW has
439 	 * only 4 such blocking resources. These resources are managed using
440 	 * reo_res_bitmap, and we return failure if none is available.
441 	 */
442 	if (cp->block_use_after_flush) {
443 		index = hal_find_zero_bit(hal_soc->reo_res_bitmap);
444 		if (index > 3) {
445 			qdf_print("No blocking resource available!");
446 			hal_srng_access_end(hal_soc, hal_ring_hdl);
447 			return -EBUSY;
448 		}
449 		hal_soc->index = index;
450 	}
451 
452 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
453 	if (!reo_desc) {
454 		hal_srng_access_end(hal_soc, hal_ring_hdl);
455 		hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl));
456 		return -EBUSY;
457 	}
458 
459 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
460 			sizeof(struct reo_flush_cache));
461 
462 	/*
463 	 * Offsets of descriptor fields defined in HW headers start from
464 	 * the field after TLV header
465 	 */
466 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
467 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
468 		     sizeof(struct reo_flush_cache) -
469 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
470 
471 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
472 			   REO_STATUS_REQUIRED, cmd->std.need_status);
473 
474 	hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_FLUSH_CACHE,
475 				      cmd->std.addr_lo, cmd->std.addr_hi);
476 
477 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
478 			   FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
479 
480 	/* set it to 0 for now */
481 	cp->rel_block_index = 0;
482 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
483 			   RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
484 
485 	if (cp->block_use_after_flush) {
486 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
487 				   CACHE_BLOCK_RESOURCE_INDEX, index);
488 	}
489 
490 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
491 			   FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
492 
493 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
494 			   BLOCK_CACHE_USAGE_AFTER_FLUSH,
495 			   cp->block_use_after_flush);
496 
497 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
498 			   cp->flush_entire_cache);
499 
500 	if (hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_HAL_REO_CMD) == 0) {
501 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
502 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_HAL_REO_CMD);
503 	} else {
504 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
505 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
506 		hal_srng_inc_flush_cnt(hal_ring_hdl);
507 	}
508 
509 	val = reo_desc[CMD_HEADER_DW_OFFSET];
510 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
511 			     val);
512 }
513 
514 static inline int
515 hal_reo_cmd_unblock_cache_li(hal_ring_handle_t hal_ring_hdl,
516 			     hal_soc_handle_t hal_soc_hdl,
517 			     struct hal_reo_cmd_params *cmd)
518 
519 {
520 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
521 	uint32_t *reo_desc, val;
522 	uint8_t index = 0;
523 
524 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
525 
526 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
527 		index = hal_find_one_bit(hal_soc->reo_res_bitmap);
528 		if (index > 3) {
529 			hal_srng_access_end(hal_soc, hal_ring_hdl);
530 			qdf_print("No blocking resource to unblock!");
531 			return -EBUSY;
532 		}
533 	}
534 
535 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
536 	if (!reo_desc) {
537 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
538 			  "%s: Out of cmd ring entries", __func__);
539 		hal_srng_access_end(hal_soc, hal_ring_hdl);
540 		return -EBUSY;
541 	}
542 
543 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
544 			sizeof(struct reo_unblock_cache));
545 
546 	/*
547 	 * Offsets of descriptor fields defined in HW headers start from
548 	 * the field after TLV header
549 	 */
550 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
551 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
552 		     sizeof(struct reo_unblock_cache) -
553 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
554 
555 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
556 			   REO_STATUS_REQUIRED, cmd->std.need_status);
557 
558 	HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
559 			   UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
560 
561 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
562 		HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
563 				   CACHE_BLOCK_RESOURCE_INDEX,
564 				   cmd->u.unblk_cache_params.index);
565 	}
566 
567 	hal_srng_access_end(hal_soc, hal_ring_hdl);
568 	val = reo_desc[CMD_HEADER_DW_OFFSET];
569 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
570 			     val);
571 }
572 
573 static inline int
574 hal_reo_cmd_flush_timeout_list_li(hal_ring_handle_t hal_ring_hdl,
575 				  hal_soc_handle_t hal_soc_hdl,
576 				  struct hal_reo_cmd_params *cmd)
577 {
578 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
579 	uint32_t *reo_desc, val;
580 
581 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
582 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
583 	if (!reo_desc) {
584 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
585 			  "%s: Out of cmd ring entries", __func__);
586 		hal_srng_access_end(hal_soc, hal_ring_hdl);
587 		return -EBUSY;
588 	}
589 
590 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
591 			sizeof(struct reo_flush_timeout_list));
592 
593 	/*
594 	 * Offsets of descriptor fields defined in HW headers start from
595 	 * the field after TLV header
596 	 */
597 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
598 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
599 		     sizeof(struct reo_flush_timeout_list) -
600 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
601 
602 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
603 			   REO_STATUS_REQUIRED, cmd->std.need_status);
604 
605 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
606 			   cmd->u.fl_tim_list_params.ac_list);
607 
608 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
609 			   MINIMUM_RELEASE_DESC_COUNT,
610 			   cmd->u.fl_tim_list_params.min_rel_desc);
611 
612 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
613 			   MINIMUM_FORWARD_BUF_COUNT,
614 			   cmd->u.fl_tim_list_params.min_fwd_buf);
615 
616 	hal_srng_access_end(hal_soc, hal_ring_hdl);
617 	val = reo_desc[CMD_HEADER_DW_OFFSET];
618 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
619 			     val);
620 }
621 
622 static inline int
623 hal_reo_cmd_update_rx_queue_li(hal_ring_handle_t hal_ring_hdl,
624 			       hal_soc_handle_t hal_soc_hdl,
625 			       struct hal_reo_cmd_params *cmd)
626 {
627 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
628 	uint32_t *reo_desc, val;
629 	struct hal_reo_cmd_update_queue_params *p;
630 
631 	p = &cmd->u.upd_queue_params;
632 
633 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
634 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
635 	if (!reo_desc) {
636 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
637 			  "%s: Out of cmd ring entries", __func__);
638 		hal_srng_access_end(hal_soc, hal_ring_hdl);
639 		return -EBUSY;
640 	}
641 
642 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
643 			sizeof(struct reo_update_rx_reo_queue));
644 
645 	/*
646 	 * Offsets of descriptor fields defined in HW headers start from
647 	 * the field after TLV header
648 	 */
649 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
650 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
651 		     sizeof(struct reo_update_rx_reo_queue) -
652 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
653 
654 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
655 			   REO_STATUS_REQUIRED, cmd->std.need_status);
656 
657 	hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
658 				      cmd->std.addr_lo, cmd->std.addr_hi);
659 
660 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
661 			   UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
662 
663 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
664 			   p->update_vld);
665 
666 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
667 			   UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
668 			   p->update_assoc_link_desc);
669 
670 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
671 			   UPDATE_DISABLE_DUPLICATE_DETECTION,
672 			   p->update_disable_dup_detect);
673 
674 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
675 			   UPDATE_DISABLE_DUPLICATE_DETECTION,
676 			   p->update_disable_dup_detect);
677 
678 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
679 			   UPDATE_SOFT_REORDER_ENABLE,
680 			   p->update_soft_reorder_enab);
681 
682 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
683 			   UPDATE_AC, p->update_ac);
684 
685 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
686 			   UPDATE_BAR, p->update_bar);
687 
688 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
689 			   UPDATE_BAR, p->update_bar);
690 
691 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
692 			   UPDATE_RTY, p->update_rty);
693 
694 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
695 			   UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
696 
697 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
698 			   UPDATE_OOR_MODE, p->update_oor_mode);
699 
700 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
701 			   UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
702 
703 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
704 			   UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
705 
706 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
707 			   UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
708 
709 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
710 			   UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
711 
712 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
713 			   UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
714 
715 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
716 			   UPDATE_PN_SIZE, p->update_pn_size);
717 
718 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
719 			   UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
720 
721 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
722 			   UPDATE_SVLD, p->update_svld);
723 
724 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
725 			   UPDATE_SSN, p->update_ssn);
726 
727 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
728 			   UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
729 			   p->update_seq_2k_err_detect);
730 
731 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
732 			   UPDATE_PN_VALID, p->update_pn_valid);
733 
734 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
735 			   UPDATE_PN, p->update_pn);
736 
737 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
738 			   RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
739 
740 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
741 			   VLD, p->vld);
742 
743 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
744 			   ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
745 			   p->assoc_link_desc);
746 
747 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
748 			   DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
749 
750 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
751 			   SOFT_REORDER_ENABLE, p->soft_reorder_enab);
752 
753 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
754 
755 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
756 			   BAR, p->bar);
757 
758 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
759 			   CHK_2K_MODE, p->chk_2k_mode);
760 
761 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
762 			   RTY, p->rty);
763 
764 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
765 			   OOR_MODE, p->oor_mode);
766 
767 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
768 			   PN_CHECK_NEEDED, p->pn_check_needed);
769 
770 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
771 			   PN_SHALL_BE_EVEN, p->pn_even);
772 
773 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
774 			   PN_SHALL_BE_UNEVEN, p->pn_uneven);
775 
776 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
777 			   PN_HANDLING_ENABLE, p->pn_hand_enab);
778 
779 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
780 			   IGNORE_AMPDU_FLAG, p->ignore_ampdu);
781 
782 	if (p->ba_window_size < 1)
783 		p->ba_window_size = 1;
784 	/*
785 	 * WAR to get 2k exception in Non BA case.
786 	 * Setting window size to 2 to get 2k jump exception
787 	 * when we receive aggregates in Non BA case
788 	 */
789 	if (p->ba_window_size == 1)
790 		p->ba_window_size++;
791 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
792 			   BA_WINDOW_SIZE, p->ba_window_size - 1);
793 
794 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
795 			   PN_SIZE, p->pn_size);
796 
797 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
798 			   SVLD, p->svld);
799 
800 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
801 			   SSN, p->ssn);
802 
803 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
804 			   SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
805 
806 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
807 			   PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
808 
809 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
810 			   PN_31_0, p->pn_31_0);
811 
812 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
813 			   PN_63_32, p->pn_63_32);
814 
815 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
816 			   PN_95_64, p->pn_95_64);
817 
818 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
819 			   PN_127_96, p->pn_127_96);
820 
821 	if (hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_HAL_REO_CMD) == 0) {
822 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
823 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_HAL_REO_CMD);
824 	} else {
825 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
826 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
827 		hal_srng_inc_flush_cnt(hal_ring_hdl);
828 	}
829 
830 	val = reo_desc[CMD_HEADER_DW_OFFSET];
831 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
832 			     val);
833 }
834 
835 int hal_reo_send_cmd_li(hal_soc_handle_t hal_soc_hdl,
836 			hal_ring_handle_t  hal_ring_hdl,
837 			enum hal_reo_cmd_type cmd,
838 			void *params)
839 {
840 	struct hal_reo_cmd_params *cmd_params =
841 			(struct hal_reo_cmd_params *)params;
842 	int num = 0;
843 
844 	switch (cmd) {
845 	case CMD_GET_QUEUE_STATS:
846 		num = hal_reo_cmd_queue_stats_li(hal_ring_hdl,
847 						 hal_soc_hdl, cmd_params);
848 		break;
849 	case CMD_FLUSH_QUEUE:
850 		num = hal_reo_cmd_flush_queue_li(hal_ring_hdl,
851 						 hal_soc_hdl, cmd_params);
852 		break;
853 	case CMD_FLUSH_CACHE:
854 		num = hal_reo_cmd_flush_cache_li(hal_ring_hdl,
855 						 hal_soc_hdl, cmd_params);
856 		break;
857 	case CMD_UNBLOCK_CACHE:
858 		num = hal_reo_cmd_unblock_cache_li(hal_ring_hdl,
859 						   hal_soc_hdl, cmd_params);
860 		break;
861 	case CMD_FLUSH_TIMEOUT_LIST:
862 		num = hal_reo_cmd_flush_timeout_list_li(hal_ring_hdl,
863 							hal_soc_hdl,
864 							cmd_params);
865 		break;
866 	case CMD_UPDATE_RX_REO_QUEUE:
867 		num = hal_reo_cmd_update_rx_queue_li(hal_ring_hdl,
868 						     hal_soc_hdl, cmd_params);
869 		break;
870 	default:
871 		hal_err("Invalid REO command type: %d", cmd);
872 		return -EINVAL;
873 	};
874 
875 	return num;
876 }
877 
878 void
879 hal_reo_queue_stats_status_li(hal_ring_desc_t ring_desc,
880 			      void *st_handle,
881 			      hal_soc_handle_t hal_soc_hdl)
882 {
883 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
884 	struct hal_reo_queue_status *st =
885 		(struct hal_reo_queue_status *)st_handle;
886 	uint32_t *reo_desc = (uint32_t *)ring_desc;
887 	uint32_t val;
888 
889 	/*
890 	 * Offsets of descriptor fields defined in HW headers start
891 	 * from the field after TLV header
892 	 */
893 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
894 
895 	/* header */
896 	hal_reo_status_get_header(ring_desc, HAL_REO_QUEUE_STATS_STATUS_TLV,
897 				  &(st->header), hal_soc);
898 
899 	/* SSN */
900 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
901 	st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
902 
903 	/* current index */
904 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
905 					 CURRENT_INDEX)];
906 	st->curr_idx =
907 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
908 			      CURRENT_INDEX, val);
909 
910 	/* PN bits */
911 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
912 					 PN_31_0)];
913 	st->pn_31_0 =
914 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
915 			      PN_31_0, val);
916 
917 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
918 					 PN_63_32)];
919 	st->pn_63_32 =
920 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
921 			      PN_63_32, val);
922 
923 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
924 					 PN_95_64)];
925 	st->pn_95_64 =
926 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
927 			      PN_95_64, val);
928 
929 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
930 					 PN_127_96)];
931 	st->pn_127_96 =
932 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
933 			      PN_127_96, val);
934 
935 	/* timestamps */
936 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
937 					 LAST_RX_ENQUEUE_TIMESTAMP)];
938 	st->last_rx_enq_tstamp =
939 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
940 			      LAST_RX_ENQUEUE_TIMESTAMP, val);
941 
942 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
943 					 LAST_RX_DEQUEUE_TIMESTAMP)];
944 	st->last_rx_deq_tstamp =
945 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
946 			      LAST_RX_DEQUEUE_TIMESTAMP, val);
947 
948 	/* rx bitmap */
949 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
950 					 RX_BITMAP_31_0)];
951 	st->rx_bitmap_31_0 =
952 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
953 			      RX_BITMAP_31_0, val);
954 
955 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
956 					 RX_BITMAP_63_32)];
957 	st->rx_bitmap_63_32 =
958 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
959 			      RX_BITMAP_63_32, val);
960 
961 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
962 					 RX_BITMAP_95_64)];
963 	st->rx_bitmap_95_64 =
964 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
965 			      RX_BITMAP_95_64, val);
966 
967 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
968 					 RX_BITMAP_127_96)];
969 	st->rx_bitmap_127_96 =
970 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
971 			      RX_BITMAP_127_96, val);
972 
973 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
974 					 RX_BITMAP_159_128)];
975 	st->rx_bitmap_159_128 =
976 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
977 			      RX_BITMAP_159_128, val);
978 
979 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
980 					 RX_BITMAP_191_160)];
981 	st->rx_bitmap_191_160 =
982 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
983 			      RX_BITMAP_191_160, val);
984 
985 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
986 					 RX_BITMAP_223_192)];
987 	st->rx_bitmap_223_192 =
988 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
989 			      RX_BITMAP_223_192, val);
990 
991 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
992 					 RX_BITMAP_255_224)];
993 	st->rx_bitmap_255_224 =
994 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
995 			      RX_BITMAP_255_224, val);
996 
997 	/* various counts */
998 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
999 					 CURRENT_MPDU_COUNT)];
1000 	st->curr_mpdu_cnt =
1001 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
1002 			      CURRENT_MPDU_COUNT, val);
1003 
1004 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
1005 					 CURRENT_MSDU_COUNT)];
1006 	st->curr_msdu_cnt =
1007 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
1008 			      CURRENT_MSDU_COUNT, val);
1009 
1010 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1011 					 TIMEOUT_COUNT)];
1012 	st->fwd_timeout_cnt =
1013 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1014 			      TIMEOUT_COUNT, val);
1015 
1016 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1017 					 FORWARD_DUE_TO_BAR_COUNT)];
1018 	st->fwd_bar_cnt =
1019 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1020 			      FORWARD_DUE_TO_BAR_COUNT, val);
1021 
1022 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1023 					 DUPLICATE_COUNT)];
1024 	st->dup_cnt =
1025 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1026 			      DUPLICATE_COUNT, val);
1027 
1028 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1029 					 FRAMES_IN_ORDER_COUNT)];
1030 	st->frms_in_order_cnt =
1031 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1032 			      FRAMES_IN_ORDER_COUNT, val);
1033 
1034 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1035 					 BAR_RECEIVED_COUNT)];
1036 	st->bar_rcvd_cnt =
1037 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1038 			      BAR_RECEIVED_COUNT, val);
1039 
1040 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
1041 					 MPDU_FRAMES_PROCESSED_COUNT)];
1042 	st->mpdu_frms_cnt =
1043 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
1044 			      MPDU_FRAMES_PROCESSED_COUNT, val);
1045 
1046 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
1047 					 MSDU_FRAMES_PROCESSED_COUNT)];
1048 	st->msdu_frms_cnt =
1049 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
1050 			      MSDU_FRAMES_PROCESSED_COUNT, val);
1051 
1052 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
1053 					 TOTAL_PROCESSED_BYTE_COUNT)];
1054 	st->total_cnt =
1055 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
1056 			      TOTAL_PROCESSED_BYTE_COUNT, val);
1057 
1058 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1059 					 LATE_RECEIVE_MPDU_COUNT)];
1060 	st->late_recv_mpdu_cnt =
1061 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1062 			      LATE_RECEIVE_MPDU_COUNT, val);
1063 
1064 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1065 					 WINDOW_JUMP_2K)];
1066 	st->win_jump_2k =
1067 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1068 			      WINDOW_JUMP_2K, val);
1069 
1070 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1071 					 HOLE_COUNT)];
1072 	st->hole_cnt =
1073 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1074 			      HOLE_COUNT, val);
1075 }
1076 
1077 void
1078 hal_reo_flush_queue_status_li(hal_ring_desc_t ring_desc,
1079 			      void *st_handle,
1080 			      hal_soc_handle_t hal_soc_hdl)
1081 {
1082 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1083 	struct hal_reo_flush_queue_status *st =
1084 			(struct hal_reo_flush_queue_status *)st_handle;
1085 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1086 	uint32_t val;
1087 
1088 	/*
1089 	 * Offsets of descriptor fields defined in HW headers start
1090 	 * from the field after TLV header
1091 	 */
1092 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1093 
1094 	/* header */
1095 	hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV,
1096 				  &(st->header), hal_soc);
1097 
1098 	/* error bit */
1099 	val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
1100 					 ERROR_DETECTED)];
1101 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1102 				  val);
1103 }
1104 
1105 void
1106 hal_reo_flush_cache_status_li(hal_ring_desc_t ring_desc,
1107 			      void *st_handle,
1108 			      hal_soc_handle_t hal_soc_hdl)
1109 {
1110 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1111 	struct hal_reo_flush_cache_status *st =
1112 			(struct hal_reo_flush_cache_status *)st_handle;
1113 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1114 	uint32_t val;
1115 
1116 	/*
1117 	 * Offsets of descriptor fields defined in HW headers start
1118 	 * from the field after TLV header
1119 	 */
1120 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1121 
1122 	/* header */
1123 	hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV,
1124 				  &(st->header), hal_soc);
1125 
1126 	/* error bit */
1127 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1128 					 ERROR_DETECTED)];
1129 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1130 				  val);
1131 
1132 	/* block error */
1133 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1134 					 BLOCK_ERROR_DETAILS)];
1135 	st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1136 					BLOCK_ERROR_DETAILS,
1137 					val);
1138 	if (!st->block_error)
1139 		qdf_set_bit(hal_soc->index,
1140 			    (unsigned long *)&hal_soc->reo_res_bitmap);
1141 
1142 	/* cache flush status */
1143 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1144 				     CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
1145 	st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1146 					CACHE_CONTROLLER_FLUSH_STATUS_HIT,
1147 					val);
1148 
1149 	/* cache flush descriptor type */
1150 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1151 				  CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
1152 	st->cache_flush_status_desc_type =
1153 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1154 			      CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
1155 			      val);
1156 
1157 	/* cache flush count */
1158 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1159 				  CACHE_CONTROLLER_FLUSH_COUNT)];
1160 	st->cache_flush_cnt =
1161 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1162 			      CACHE_CONTROLLER_FLUSH_COUNT,
1163 			      val);
1164 }
1165 
1166 void
1167 hal_reo_unblock_cache_status_li(hal_ring_desc_t ring_desc,
1168 				hal_soc_handle_t hal_soc_hdl,
1169 				void *st_handle)
1170 {
1171 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1172 	struct hal_reo_unblk_cache_status *st =
1173 			(struct hal_reo_unblk_cache_status *)st_handle;
1174 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1175 	uint32_t val;
1176 
1177 	/*
1178 	 * Offsets of descriptor fields defined in HW headers start
1179 	 * from the field after TLV header
1180 	 */
1181 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1182 
1183 	/* header */
1184 	hal_reo_status_get_header(ring_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV,
1185 				  &st->header, hal_soc);
1186 
1187 	/* error bit */
1188 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1189 				  ERROR_DETECTED)];
1190 	st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1191 				  ERROR_DETECTED,
1192 				  val);
1193 
1194 	/* unblock type */
1195 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1196 				  UNBLOCK_TYPE)];
1197 	st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1198 					 UNBLOCK_TYPE,
1199 					 val);
1200 
1201 	if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
1202 		qdf_clear_bit(hal_soc->index,
1203 			      (unsigned long *)&hal_soc->reo_res_bitmap);
1204 }
1205 
1206 void hal_reo_flush_timeout_list_status_li(hal_ring_desc_t ring_desc,
1207 					  void *st_handle,
1208 					  hal_soc_handle_t hal_soc_hdl)
1209 {
1210 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1211 	struct hal_reo_flush_timeout_list_status *st =
1212 			(struct hal_reo_flush_timeout_list_status *)st_handle;
1213 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1214 	uint32_t val;
1215 
1216 	/*
1217 	 * Offsets of descriptor fields defined in HW headers start
1218 	 * from the field after TLV header
1219 	 */
1220 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1221 
1222 	/* header */
1223 	hal_reo_status_get_header(ring_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV,
1224 				  &(st->header), hal_soc);
1225 
1226 	/* error bit */
1227 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1228 					 ERROR_DETECTED)];
1229 	st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1230 				  ERROR_DETECTED,
1231 				  val);
1232 
1233 	/* list empty */
1234 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1235 					 TIMOUT_LIST_EMPTY)];
1236 	st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1237 				       TIMOUT_LIST_EMPTY,
1238 				       val);
1239 
1240 	/* release descriptor count */
1241 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1242 					 RELEASE_DESC_COUNT)];
1243 	st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1244 					 RELEASE_DESC_COUNT,
1245 					 val);
1246 
1247 	/* forward buf count */
1248 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1249 				     FORWARD_BUF_COUNT)];
1250 	st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1251 					FORWARD_BUF_COUNT,
1252 					val);
1253 }
1254 
1255 void hal_reo_desc_thres_reached_status_li(hal_ring_desc_t ring_desc,
1256 					  void *st_handle,
1257 					  hal_soc_handle_t hal_soc_hdl)
1258 {
1259 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1260 	struct hal_reo_desc_thres_reached_status *st =
1261 			(struct hal_reo_desc_thres_reached_status *)st_handle;
1262 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1263 	uint32_t val;
1264 
1265 	/*
1266 	 * Offsets of descriptor fields defined in HW headers start
1267 	 * from the field after TLV header
1268 	 */
1269 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1270 
1271 	/* header */
1272 	hal_reo_status_get_header(ring_desc,
1273 				  HAL_REO_DESC_THRES_STATUS_TLV,
1274 				  &(st->header), hal_soc);
1275 
1276 	/* threshold index */
1277 	val = reo_desc[HAL_OFFSET_DW(
1278 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1279 				 THRESHOLD_INDEX)];
1280 	st->thres_index = HAL_GET_FIELD(
1281 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1282 				THRESHOLD_INDEX,
1283 				val);
1284 
1285 	/* link desc counters */
1286 	val = reo_desc[HAL_OFFSET_DW(
1287 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1288 				 LINK_DESCRIPTOR_COUNTER0)];
1289 	st->link_desc_counter0 = HAL_GET_FIELD(
1290 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1291 				LINK_DESCRIPTOR_COUNTER0,
1292 				val);
1293 
1294 	val = reo_desc[HAL_OFFSET_DW(
1295 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1296 				 LINK_DESCRIPTOR_COUNTER1)];
1297 	st->link_desc_counter1 = HAL_GET_FIELD(
1298 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1299 				LINK_DESCRIPTOR_COUNTER1,
1300 				val);
1301 
1302 	val = reo_desc[HAL_OFFSET_DW(
1303 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1304 				 LINK_DESCRIPTOR_COUNTER2)];
1305 	st->link_desc_counter2 = HAL_GET_FIELD(
1306 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1307 				LINK_DESCRIPTOR_COUNTER2,
1308 				val);
1309 
1310 	val = reo_desc[HAL_OFFSET_DW(
1311 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1312 				 LINK_DESCRIPTOR_COUNTER_SUM)];
1313 	st->link_desc_counter_sum = HAL_GET_FIELD(
1314 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1315 				LINK_DESCRIPTOR_COUNTER_SUM,
1316 				val);
1317 }
1318 
1319 void
1320 hal_reo_rx_update_queue_status_li(hal_ring_desc_t ring_desc,
1321 				  void *st_handle,
1322 				  hal_soc_handle_t hal_soc_hdl)
1323 {
1324 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1325 	struct hal_reo_update_rx_queue_status *st =
1326 			(struct hal_reo_update_rx_queue_status *)st_handle;
1327 	uint32_t *reo_desc = (uint32_t *)ring_desc;
1328 
1329 	/*
1330 	 * Offsets of descriptor fields defined in HW headers start
1331 	 * from the field after TLV header
1332 	 */
1333 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1334 
1335 	/* header */
1336 	hal_reo_status_get_header(ring_desc,
1337 				  HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV,
1338 				  &(st->header), hal_soc);
1339 }
1340 
1341 uint8_t hal_get_tlv_hdr_size_li(void)
1342 {
1343 	return sizeof(struct tlv_32_hdr);
1344 }
1345 
1346 uint64_t hal_rx_get_qdesc_addr_li(uint8_t *dst_ring_desc, uint8_t *buf)
1347 {
1348 	return *(uint64_t *)dst_ring_desc +
1349 		REO_DESTINATION_RING_6_RX_REO_QUEUE_DESC_ADDR_31_0_OFFSET;
1350 }
1351