xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/be/hal_be_reo.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "qdf_module.h"
20 #include "hal_hw_headers.h"
21 #include "hal_be_hw_headers.h"
22 #include "hal_reo.h"
23 #include "hal_be_reo.h"
24 #include "hal_be_api.h"
25 
26 uint32_t hal_get_reo_reg_base_offset_be(void)
27 {
28 	return REO_REG_REG_BASE;
29 }
30 
31 /**
32  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
33  *
34  * @hal_soc: Opaque HAL SOC handle
35  * @ba_window_size: BlockAck window size
36  * @start_seq: Starting sequence number
37  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
38  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
39  * @tid: TID
40  *
41  */
42 void hal_reo_qdesc_setup_be(hal_soc_handle_t hal_soc_hdl, int tid,
43 			    uint32_t ba_window_size,
44 			    uint32_t start_seq, void *hw_qdesc_vaddr,
45 			    qdf_dma_addr_t hw_qdesc_paddr,
46 			    int pn_type)
47 {
48 	uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
49 	uint32_t *reo_queue_ext_desc;
50 	uint32_t reg_val;
51 	uint32_t pn_enable;
52 	uint32_t pn_size = 0;
53 
54 	qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
55 
56 	hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
57 				   HAL_REO_QUEUE_DESC);
58 	/* Fixed pattern in reserved bits for debugging */
59 	HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER,
60 			   RESERVED_0A, 0xDDBEEF);
61 
62 	/* This a just a SW meta data and will be copied to REO destination
63 	 * descriptors indicated by hardware.
64 	 * TODO: Setting TID in this field. See if we should set something else.
65 	 */
66 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
67 			   RECEIVE_QUEUE_NUMBER, tid);
68 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
69 			   VLD, 1);
70 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
71 			   ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
72 			   HAL_RX_LINK_DESC_CNTR);
73 
74 	/*
75 	 * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
76 	 */
77 
78 	reg_val = TID_TO_WME_AC(tid);
79 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, AC, reg_val);
80 
81 	if (ba_window_size < 1)
82 		ba_window_size = 1;
83 
84 	/* WAR to get 2k exception in Non BA case.
85 	 * Setting window size to 2 to get 2k jump exception
86 	 * when we receive aggregates in Non BA case
87 	 */
88 	ba_window_size = hal_update_non_ba_win_size(tid, ba_window_size);
89 
90 	/* Set RTY bit for non-BA case. Duplicate detection is currently not
91 	 * done by HW in non-BA case if RTY bit is not set.
92 	 * TODO: This is a temporary War and should be removed once HW fix is
93 	 * made to check and discard duplicates even if RTY bit is not set.
94 	 */
95 	if (ba_window_size == 1)
96 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, RTY, 1);
97 
98 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, BA_WINDOW_SIZE,
99 			   ba_window_size - 1);
100 
101 	switch (pn_type) {
102 	case HAL_PN_WPA:
103 		pn_enable = 1;
104 		pn_size = PN_SIZE_48;
105 		break;
106 	case HAL_PN_WAPI_EVEN:
107 	case HAL_PN_WAPI_UNEVEN:
108 		pn_enable = 1;
109 		pn_size = PN_SIZE_128;
110 		break;
111 	default:
112 		pn_enable = 0;
113 		break;
114 	}
115 
116 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, PN_CHECK_NEEDED,
117 			   pn_enable);
118 
119 	if (pn_type == HAL_PN_WAPI_EVEN)
120 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
121 				   PN_SHALL_BE_EVEN, 1);
122 	else if (pn_type == HAL_PN_WAPI_UNEVEN)
123 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
124 				   PN_SHALL_BE_UNEVEN, 1);
125 
126 	/*
127 	 *  TODO: Need to check if PN handling in SW needs to be enabled
128 	 *  So far this is not a requirement
129 	 */
130 
131 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, PN_SIZE,
132 			   pn_size);
133 
134 	/* TODO: Check if RX_REO_QUEUE_IGNORE_AMPDU_FLAG need to be set
135 	 * based on BA window size and/or AMPDU capabilities
136 	 */
137 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
138 			   IGNORE_AMPDU_FLAG, 1);
139 
140 	if (start_seq <= 0xfff)
141 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, SSN,
142 				   start_seq);
143 
144 	/* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA,
145 	 * but REO is not delivering packets if we set it to 1. Need to enable
146 	 * this once the issue is resolved
147 	 */
148 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, SVLD, 0);
149 
150 	/* TODO: Check if we should set start PN for WAPI */
151 
152 	/* TODO: HW queue descriptors are currently allocated for max BA
153 	 * window size for all QOS TIDs so that same descriptor can be used
154 	 * later when ADDBA request is recevied. This should be changed to
155 	 * allocate HW queue descriptors based on BA window size being
156 	 * negotiated (0 for non BA cases), and reallocate when BA window
157 	 * size changes and also send WMI message to FW to change the REO
158 	 * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
159 	 */
160 	if (tid == HAL_NON_QOS_TID)
161 		return;
162 
163 	reo_queue_ext_desc = (uint32_t *)
164 		(((struct rx_reo_queue *)reo_queue_desc) + 1);
165 	qdf_mem_zero(reo_queue_ext_desc, 3 *
166 		     sizeof(struct rx_reo_queue_ext));
167 	/* Initialize first reo queue extension descriptor */
168 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
169 				   HAL_DESC_REO_OWNED,
170 				   HAL_REO_QUEUE_EXT_DESC);
171 	/* Fixed pattern in reserved bits for debugging */
172 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
173 			   UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A,
174 			   0xADBEEF);
175 	/* Initialize second reo queue extension descriptor */
176 	reo_queue_ext_desc = (uint32_t *)
177 		(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
178 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
179 				   HAL_DESC_REO_OWNED,
180 				   HAL_REO_QUEUE_EXT_DESC);
181 	/* Fixed pattern in reserved bits for debugging */
182 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
183 			   UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A,
184 			   0xBDBEEF);
185 	/* Initialize third reo queue extension descriptor */
186 	reo_queue_ext_desc = (uint32_t *)
187 		(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
188 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
189 				   HAL_DESC_REO_OWNED,
190 				   HAL_REO_QUEUE_EXT_DESC);
191 	/* Fixed pattern in reserved bits for debugging */
192 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
193 			   UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A,
194 			   0xCDBEEF);
195 }
196 
197 qdf_export_symbol(hal_reo_qdesc_setup_be);
198 
199 /**
200  * hal_get_ba_aging_timeout_be - Get BA Aging timeout
201  *
202  * @hal_soc: Opaque HAL SOC handle
203  * @ac: Access category
204  * @value: window size to get
205  */
206 void hal_get_ba_aging_timeout_be(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
207 				 uint32_t *value)
208 {
209 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
210 
211 	switch (ac) {
212 	case WME_AC_BE:
213 		*value = HAL_REG_READ(soc,
214 				      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
215 				      REO_REG_REG_BASE)) / 1000;
216 		break;
217 	case WME_AC_BK:
218 		*value = HAL_REG_READ(soc,
219 				      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
220 				      REO_REG_REG_BASE)) / 1000;
221 		break;
222 	case WME_AC_VI:
223 		*value = HAL_REG_READ(soc,
224 				      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
225 				      REO_REG_REG_BASE)) / 1000;
226 		break;
227 	case WME_AC_VO:
228 		*value = HAL_REG_READ(soc,
229 				      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
230 				      REO_REG_REG_BASE)) / 1000;
231 		break;
232 	default:
233 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
234 			  "Invalid AC: %d\n", ac);
235 	}
236 }
237 qdf_export_symbol(hal_get_ba_aging_timeout_be);
238 
239 /**
240  * hal_set_ba_aging_timeout_be - Set BA Aging timeout
241  *
242  * @hal_soc: Opaque HAL SOC handle
243  * @ac: Access category
244  * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice
245  * @value: Input value to set
246  */
247 void hal_set_ba_aging_timeout_be(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
248 				 uint32_t value)
249 {
250 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
251 
252 	switch (ac) {
253 	case WME_AC_BE:
254 		HAL_REG_WRITE(soc,
255 			      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
256 			      REO_REG_REG_BASE),
257 			      value * 1000);
258 		break;
259 	case WME_AC_BK:
260 		HAL_REG_WRITE(soc,
261 			      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
262 			      REO_REG_REG_BASE),
263 			      value * 1000);
264 		break;
265 	case WME_AC_VI:
266 		HAL_REG_WRITE(soc,
267 			      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
268 			      REO_REG_REG_BASE),
269 			      value * 1000);
270 		break;
271 	case WME_AC_VO:
272 		HAL_REG_WRITE(soc,
273 			      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
274 			      REO_REG_REG_BASE),
275 			      value * 1000);
276 		break;
277 	default:
278 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
279 			  "Invalid AC: %d\n", ac);
280 	}
281 }
282 qdf_export_symbol(hal_set_ba_aging_timeout_be);
283 
284 static inline void
285 hal_reo_cmd_set_descr_addr_be(uint32_t *reo_desc,
286 			      enum hal_reo_cmd_type type,
287 			      uint32_t paddr_lo,
288 			      uint8_t paddr_hi)
289 {
290 	switch (type) {
291 	case CMD_GET_QUEUE_STATS:
292 		HAL_DESC_64_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS,
293 				      RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
294 		HAL_DESC_64_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS,
295 				      RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
296 		break;
297 	case CMD_FLUSH_QUEUE:
298 		HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
299 				      FLUSH_DESC_ADDR_31_0, paddr_lo);
300 		HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
301 				      FLUSH_DESC_ADDR_39_32, paddr_hi);
302 		break;
303 	case CMD_FLUSH_CACHE:
304 		HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
305 				      FLUSH_ADDR_31_0, paddr_lo);
306 		HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
307 				      FLUSH_ADDR_39_32, paddr_hi);
308 		break;
309 	case CMD_UPDATE_RX_REO_QUEUE:
310 		HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
311 				      RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
312 		HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
313 				      RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
314 		break;
315 	default:
316 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
317 			  "%s: Invalid REO command type", __func__);
318 		break;
319 	}
320 }
321 
322 static inline int
323 hal_reo_cmd_queue_stats_be(hal_ring_handle_t  hal_ring_hdl,
324 			   hal_soc_handle_t hal_soc_hdl,
325 			   struct hal_reo_cmd_params *cmd)
326 {
327 	uint32_t *reo_desc, val;
328 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
329 
330 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
331 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
332 	if (!reo_desc) {
333 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
334 			  "%s: Out of cmd ring entries", __func__);
335 		hal_srng_access_end(hal_soc, hal_ring_hdl);
336 		return -EBUSY;
337 	}
338 
339 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
340 			sizeof(struct reo_get_queue_stats));
341 
342 	/*
343 	 * Offsets of descriptor fields defined in HW headers start from
344 	 * the field after TLV header
345 	 */
346 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
347 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
348 		     sizeof(struct reo_get_queue_stats) -
349 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
350 
351 	HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
352 			      REO_STATUS_REQUIRED, cmd->std.need_status);
353 
354 	hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_GET_QUEUE_STATS,
355 				      cmd->std.addr_lo,
356 				      cmd->std.addr_hi);
357 
358 	HAL_DESC_64_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS, CLEAR_STATS,
359 			      cmd->u.stats_params.clear);
360 
361 	if (hif_pm_runtime_get(hal_soc->hif_handle,
362 			       RTPM_ID_HAL_REO_CMD, true) == 0) {
363 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
364 		hif_pm_runtime_put(hal_soc->hif_handle,
365 				   RTPM_ID_HAL_REO_CMD);
366 	} else {
367 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
368 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
369 		hal_srng_inc_flush_cnt(hal_ring_hdl);
370 	}
371 
372 	val = reo_desc[CMD_HEADER_DW_OFFSET];
373 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
374 				     val);
375 }
376 
377 static inline int
378 hal_reo_cmd_flush_queue_be(hal_ring_handle_t hal_ring_hdl,
379 			   hal_soc_handle_t hal_soc_hdl,
380 			   struct hal_reo_cmd_params *cmd)
381 {
382 	uint32_t *reo_desc, val;
383 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
384 
385 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
386 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
387 	if (!reo_desc) {
388 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
389 			  "%s: Out of cmd ring entries", __func__);
390 		hal_srng_access_end(hal_soc, hal_ring_hdl);
391 		return -EBUSY;
392 	}
393 
394 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
395 			sizeof(struct reo_flush_queue));
396 
397 	/*
398 	 * Offsets of descriptor fields defined in HW headers start from
399 	 * the field after TLV header
400 	 */
401 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
402 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
403 		     sizeof(struct reo_flush_queue) -
404 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
405 
406 	HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
407 			      REO_STATUS_REQUIRED, cmd->std.need_status);
408 
409 	hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_FLUSH_QUEUE,
410 				      cmd->std.addr_lo, cmd->std.addr_hi);
411 
412 	HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
413 			      BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
414 			      cmd->u.fl_queue_params.block_use_after_flush);
415 
416 	if (cmd->u.fl_queue_params.block_use_after_flush) {
417 		HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
418 				      BLOCK_RESOURCE_INDEX,
419 				      cmd->u.fl_queue_params.index);
420 	}
421 
422 	hal_srng_access_end(hal_soc, hal_ring_hdl);
423 	val = reo_desc[CMD_HEADER_DW_OFFSET];
424 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
425 				     val);
426 }
427 
428 static inline int
429 hal_reo_cmd_flush_cache_be(hal_ring_handle_t hal_ring_hdl,
430 			   hal_soc_handle_t hal_soc_hdl,
431 			   struct hal_reo_cmd_params *cmd)
432 {
433 	uint32_t *reo_desc, val;
434 	struct hal_reo_cmd_flush_cache_params *cp;
435 	uint8_t index = 0;
436 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
437 
438 	cp = &cmd->u.fl_cache_params;
439 
440 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
441 
442 	/* We need a cache block resource for this operation, and REO HW has
443 	 * only 4 such blocking resources. These resources are managed using
444 	 * reo_res_bitmap, and we return failure if none is available.
445 	 */
446 	if (cp->block_use_after_flush) {
447 		index = hal_find_zero_bit(hal_soc->reo_res_bitmap);
448 		if (index > 3) {
449 			qdf_print("No blocking resource available!");
450 			hal_srng_access_end(hal_soc, hal_ring_hdl);
451 			return -EBUSY;
452 		}
453 		hal_soc->index = index;
454 	}
455 
456 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
457 	if (!reo_desc) {
458 		hal_srng_access_end(hal_soc, hal_ring_hdl);
459 		hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl));
460 		return -EBUSY;
461 	}
462 
463 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
464 			sizeof(struct reo_flush_cache));
465 
466 	/*
467 	 * Offsets of descriptor fields defined in HW headers start from
468 	 * the field after TLV header
469 	 */
470 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
471 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
472 		     sizeof(struct reo_flush_cache) -
473 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
474 
475 	HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
476 			      REO_STATUS_REQUIRED, cmd->std.need_status);
477 
478 	hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_FLUSH_CACHE,
479 				      cmd->std.addr_lo, cmd->std.addr_hi);
480 
481 	HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
482 			      FORWARD_ALL_MPDUS_IN_QUEUE,
483 			      cp->fwd_mpdus_in_queue);
484 
485 	/* set it to 0 for now */
486 	cp->rel_block_index = 0;
487 	HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
488 			      RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
489 
490 	if (cp->block_use_after_flush) {
491 		HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
492 				      CACHE_BLOCK_RESOURCE_INDEX, index);
493 	}
494 
495 	HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
496 			      FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
497 
498 	HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
499 			      BLOCK_CACHE_USAGE_AFTER_FLUSH,
500 			      cp->block_use_after_flush);
501 
502 	HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE, FLUSH_ENTIRE_CACHE,
503 			      cp->flush_entire_cache);
504 
505 	if (hif_pm_runtime_get(hal_soc->hif_handle,
506 			       RTPM_ID_HAL_REO_CMD, true) == 0) {
507 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
508 		hif_pm_runtime_put(hal_soc->hif_handle,
509 				   RTPM_ID_HAL_REO_CMD);
510 	} else {
511 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
512 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
513 		hal_srng_inc_flush_cnt(hal_ring_hdl);
514 	}
515 
516 	val = reo_desc[CMD_HEADER_DW_OFFSET];
517 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
518 				     val);
519 }
520 
521 static inline int
522 hal_reo_cmd_unblock_cache_be(hal_ring_handle_t hal_ring_hdl,
523 			     hal_soc_handle_t hal_soc_hdl,
524 			     struct hal_reo_cmd_params *cmd)
525 
526 {
527 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
528 	uint32_t *reo_desc, val;
529 	uint8_t index = 0;
530 
531 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
532 
533 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
534 		index = hal_find_one_bit(hal_soc->reo_res_bitmap);
535 		if (index > 3) {
536 			hal_srng_access_end(hal_soc, hal_ring_hdl);
537 			qdf_print("No blocking resource to unblock!");
538 			return -EBUSY;
539 		}
540 	}
541 
542 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
543 	if (!reo_desc) {
544 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
545 			  "%s: Out of cmd ring entries", __func__);
546 		hal_srng_access_end(hal_soc, hal_ring_hdl);
547 		return -EBUSY;
548 	}
549 
550 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
551 			sizeof(struct reo_unblock_cache));
552 
553 	/*
554 	 * Offsets of descriptor fields defined in HW headers start from
555 	 * the field after TLV header
556 	 */
557 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
558 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
559 		     sizeof(struct reo_unblock_cache) -
560 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
561 
562 	HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
563 			      REO_STATUS_REQUIRED, cmd->std.need_status);
564 
565 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE,
566 			      UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
567 
568 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
569 		HAL_DESC_64_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE,
570 				      CACHE_BLOCK_RESOURCE_INDEX,
571 				      cmd->u.unblk_cache_params.index);
572 	}
573 
574 	hal_srng_access_end(hal_soc, hal_ring_hdl);
575 	val = reo_desc[CMD_HEADER_DW_OFFSET];
576 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
577 				     val);
578 }
579 
580 static inline int
581 hal_reo_cmd_flush_timeout_list_be(hal_ring_handle_t hal_ring_hdl,
582 				  hal_soc_handle_t hal_soc_hdl,
583 				  struct hal_reo_cmd_params *cmd)
584 {
585 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
586 	uint32_t *reo_desc, val;
587 
588 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
589 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
590 	if (!reo_desc) {
591 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
592 			  "%s: Out of cmd ring entries", __func__);
593 		hal_srng_access_end(hal_soc, hal_ring_hdl);
594 		return -EBUSY;
595 	}
596 
597 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
598 			sizeof(struct reo_flush_timeout_list));
599 
600 	/*
601 	 * Offsets of descriptor fields defined in HW headers start from
602 	 * the field after TLV header
603 	 */
604 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
605 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
606 		     sizeof(struct reo_flush_timeout_list) -
607 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
608 
609 	HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
610 			      REO_STATUS_REQUIRED, cmd->std.need_status);
611 
612 	HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST, AC_TIMOUT_LIST,
613 			      cmd->u.fl_tim_list_params.ac_list);
614 
615 	HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST,
616 			      MINIMUM_RELEASE_DESC_COUNT,
617 			      cmd->u.fl_tim_list_params.min_rel_desc);
618 
619 	HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST,
620 			      MINIMUM_FORWARD_BUF_COUNT,
621 			      cmd->u.fl_tim_list_params.min_fwd_buf);
622 
623 	hal_srng_access_end(hal_soc, hal_ring_hdl);
624 	val = reo_desc[CMD_HEADER_DW_OFFSET];
625 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
626 				     val);
627 }
628 
629 static inline int
630 hal_reo_cmd_update_rx_queue_be(hal_ring_handle_t hal_ring_hdl,
631 			       hal_soc_handle_t hal_soc_hdl,
632 			       struct hal_reo_cmd_params *cmd)
633 {
634 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
635 	uint32_t *reo_desc, val;
636 	struct hal_reo_cmd_update_queue_params *p;
637 
638 	p = &cmd->u.upd_queue_params;
639 
640 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
641 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
642 	if (!reo_desc) {
643 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
644 			  "%s: Out of cmd ring entries", __func__);
645 		hal_srng_access_end(hal_soc, hal_ring_hdl);
646 		return -EBUSY;
647 	}
648 
649 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
650 			sizeof(struct reo_update_rx_reo_queue));
651 
652 	/*
653 	 * Offsets of descriptor fields defined in HW headers start from
654 	 * the field after TLV header
655 	 */
656 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
657 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
658 		     sizeof(struct reo_update_rx_reo_queue) -
659 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
660 
661 	HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
662 			      REO_STATUS_REQUIRED, cmd->std.need_status);
663 
664 	hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
665 				      cmd->std.addr_lo, cmd->std.addr_hi);
666 
667 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
668 			      UPDATE_RECEIVE_QUEUE_NUMBER,
669 			      p->update_rx_queue_num);
670 
671 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, UPDATE_VLD,
672 			      p->update_vld);
673 
674 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
675 			      UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
676 			      p->update_assoc_link_desc);
677 
678 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
679 			      UPDATE_DISABLE_DUPLICATE_DETECTION,
680 			      p->update_disable_dup_detect);
681 
682 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
683 			      UPDATE_DISABLE_DUPLICATE_DETECTION,
684 			      p->update_disable_dup_detect);
685 
686 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
687 			      UPDATE_SOFT_REORDER_ENABLE,
688 			      p->update_soft_reorder_enab);
689 
690 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
691 			      UPDATE_AC, p->update_ac);
692 
693 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
694 			      UPDATE_BAR, p->update_bar);
695 
696 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
697 			      UPDATE_BAR, p->update_bar);
698 
699 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
700 			      UPDATE_RTY, p->update_rty);
701 
702 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
703 			      UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
704 
705 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
706 			      UPDATE_OOR_MODE, p->update_oor_mode);
707 
708 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
709 			      UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
710 
711 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
712 			      UPDATE_PN_CHECK_NEEDED,
713 			      p->update_pn_check_needed);
714 
715 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
716 			      UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
717 
718 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
719 			      UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
720 
721 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
722 			      UPDATE_PN_HANDLING_ENABLE,
723 			      p->update_pn_hand_enab);
724 
725 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
726 			      UPDATE_PN_SIZE, p->update_pn_size);
727 
728 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
729 			      UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
730 
731 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
732 			      UPDATE_SVLD, p->update_svld);
733 
734 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
735 			      UPDATE_SSN, p->update_ssn);
736 
737 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
738 			      UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
739 			      p->update_seq_2k_err_detect);
740 
741 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
742 			      UPDATE_PN_VALID, p->update_pn_valid);
743 
744 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
745 			      UPDATE_PN, p->update_pn);
746 
747 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
748 			      RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
749 
750 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
751 			      VLD, p->vld);
752 
753 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
754 			      ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
755 			      p->assoc_link_desc);
756 
757 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
758 			      DISABLE_DUPLICATE_DETECTION,
759 			      p->disable_dup_detect);
760 
761 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
762 			      SOFT_REORDER_ENABLE, p->soft_reorder_enab);
763 
764 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, AC, p->ac);
765 
766 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
767 			      BAR, p->bar);
768 
769 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
770 			      CHK_2K_MODE, p->chk_2k_mode);
771 
772 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
773 			      RTY, p->rty);
774 
775 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
776 			      OOR_MODE, p->oor_mode);
777 
778 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
779 			      PN_CHECK_NEEDED, p->pn_check_needed);
780 
781 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
782 			      PN_SHALL_BE_EVEN, p->pn_even);
783 
784 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
785 			      PN_SHALL_BE_UNEVEN, p->pn_uneven);
786 
787 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
788 			      PN_HANDLING_ENABLE, p->pn_hand_enab);
789 
790 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
791 			      IGNORE_AMPDU_FLAG, p->ignore_ampdu);
792 
793 	if (p->ba_window_size < 1)
794 		p->ba_window_size = 1;
795 	/*
796 	 * WAR to get 2k exception in Non BA case.
797 	 * Setting window size to 2 to get 2k jump exception
798 	 * when we receive aggregates in Non BA case
799 	 */
800 	if (p->ba_window_size == 1)
801 		p->ba_window_size++;
802 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
803 			      BA_WINDOW_SIZE, p->ba_window_size - 1);
804 
805 	if (p->pn_size == 24)
806 		p->pn_size = PN_SIZE_24;
807 	else if (p->pn_size == 48)
808 		p->pn_size = PN_SIZE_48;
809 	else if (p->pn_size == 128)
810 		p->pn_size = PN_SIZE_128;
811 
812 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
813 			      PN_SIZE, p->pn_size);
814 
815 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
816 			      SVLD, p->svld);
817 
818 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
819 			      SSN, p->ssn);
820 
821 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
822 			      SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
823 
824 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
825 			      PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
826 
827 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
828 			      PN_31_0, p->pn_31_0);
829 
830 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
831 			      PN_63_32, p->pn_63_32);
832 
833 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
834 			      PN_95_64, p->pn_95_64);
835 
836 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
837 			      PN_127_96, p->pn_127_96);
838 
839 	if (hif_pm_runtime_get(hal_soc->hif_handle,
840 			       RTPM_ID_HAL_REO_CMD, false) == 0) {
841 		hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
842 		hif_pm_runtime_put(hal_soc->hif_handle,
843 				   RTPM_ID_HAL_REO_CMD);
844 	} else {
845 		hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
846 		hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
847 		hal_srng_inc_flush_cnt(hal_ring_hdl);
848 	}
849 
850 	val = reo_desc[CMD_HEADER_DW_OFFSET];
851 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
852 				     val);
853 }
854 
855 int hal_reo_send_cmd_be(hal_soc_handle_t hal_soc_hdl,
856 			hal_ring_handle_t  hal_ring_hdl,
857 			enum hal_reo_cmd_type cmd,
858 			void *params)
859 {
860 	struct hal_reo_cmd_params *cmd_params =
861 			(struct hal_reo_cmd_params *)params;
862 	int num = 0;
863 
864 	switch (cmd) {
865 	case CMD_GET_QUEUE_STATS:
866 		num = hal_reo_cmd_queue_stats_be(hal_ring_hdl,
867 						 hal_soc_hdl, cmd_params);
868 		break;
869 	case CMD_FLUSH_QUEUE:
870 		num = hal_reo_cmd_flush_queue_be(hal_ring_hdl,
871 						 hal_soc_hdl, cmd_params);
872 		break;
873 	case CMD_FLUSH_CACHE:
874 		num = hal_reo_cmd_flush_cache_be(hal_ring_hdl,
875 						 hal_soc_hdl, cmd_params);
876 		break;
877 	case CMD_UNBLOCK_CACHE:
878 		num = hal_reo_cmd_unblock_cache_be(hal_ring_hdl,
879 						   hal_soc_hdl, cmd_params);
880 		break;
881 	case CMD_FLUSH_TIMEOUT_LIST:
882 		num = hal_reo_cmd_flush_timeout_list_be(hal_ring_hdl,
883 							hal_soc_hdl,
884 							cmd_params);
885 		break;
886 	case CMD_UPDATE_RX_REO_QUEUE:
887 		num = hal_reo_cmd_update_rx_queue_be(hal_ring_hdl,
888 						     hal_soc_hdl, cmd_params);
889 		break;
890 	default:
891 		hal_err("Invalid REO command type: %d", cmd);
892 		return -EINVAL;
893 	};
894 
895 	return num;
896 }
897 
898 void
899 hal_reo_queue_stats_status_be(hal_ring_desc_t ring_desc,
900 			      void *st_handle,
901 			      hal_soc_handle_t hal_soc_hdl)
902 {
903 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
904 	struct hal_reo_queue_status *st =
905 		(struct hal_reo_queue_status *)st_handle;
906 	uint64_t *reo_desc = (uint64_t *)ring_desc;
907 	uint64_t val;
908 
909 	/*
910 	 * Offsets of descriptor fields defined in HW headers start
911 	 * from the field after TLV header
912 	 */
913 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
914 
915 	/* header */
916 	hal_reo_status_get_header(ring_desc, HAL_REO_QUEUE_STATS_STATUS_TLV,
917 				  &(st->header), hal_soc);
918 
919 	/* SSN */
920 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, SSN)];
921 	st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, SSN, val);
922 
923 	/* current index */
924 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
925 					 CURRENT_INDEX)];
926 	st->curr_idx =
927 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
928 			      CURRENT_INDEX, val);
929 
930 	/* PN bits */
931 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
932 					 PN_31_0)];
933 	st->pn_31_0 =
934 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
935 			      PN_31_0, val);
936 
937 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
938 					 PN_63_32)];
939 	st->pn_63_32 =
940 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
941 			      PN_63_32, val);
942 
943 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
944 					 PN_95_64)];
945 	st->pn_95_64 =
946 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
947 			      PN_95_64, val);
948 
949 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
950 					 PN_127_96)];
951 	st->pn_127_96 =
952 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
953 			      PN_127_96, val);
954 
955 	/* timestamps */
956 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
957 					 LAST_RX_ENQUEUE_TIMESTAMP)];
958 	st->last_rx_enq_tstamp =
959 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
960 			      LAST_RX_ENQUEUE_TIMESTAMP, val);
961 
962 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
963 					 LAST_RX_DEQUEUE_TIMESTAMP)];
964 	st->last_rx_deq_tstamp =
965 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
966 			      LAST_RX_DEQUEUE_TIMESTAMP, val);
967 
968 	/* rx bitmap */
969 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
970 					 RX_BITMAP_31_0)];
971 	st->rx_bitmap_31_0 =
972 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
973 			      RX_BITMAP_31_0, val);
974 
975 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
976 					 RX_BITMAP_63_32)];
977 	st->rx_bitmap_63_32 =
978 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
979 			      RX_BITMAP_63_32, val);
980 
981 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
982 					 RX_BITMAP_95_64)];
983 	st->rx_bitmap_95_64 =
984 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
985 			      RX_BITMAP_95_64, val);
986 
987 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
988 					 RX_BITMAP_127_96)];
989 	st->rx_bitmap_127_96 =
990 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
991 			      RX_BITMAP_127_96, val);
992 
993 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
994 					 RX_BITMAP_159_128)];
995 	st->rx_bitmap_159_128 =
996 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
997 			      RX_BITMAP_159_128, val);
998 
999 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
1000 					 RX_BITMAP_191_160)];
1001 	st->rx_bitmap_191_160 =
1002 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
1003 			      RX_BITMAP_191_160, val);
1004 
1005 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
1006 					 RX_BITMAP_223_192)];
1007 	st->rx_bitmap_223_192 =
1008 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
1009 			      RX_BITMAP_223_192, val);
1010 
1011 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
1012 					 RX_BITMAP_255_224)];
1013 	st->rx_bitmap_255_224 =
1014 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
1015 			      RX_BITMAP_255_224, val);
1016 
1017 	/* various counts */
1018 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
1019 					 CURRENT_MPDU_COUNT)];
1020 	st->curr_mpdu_cnt =
1021 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
1022 			      CURRENT_MPDU_COUNT, val);
1023 
1024 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
1025 					 CURRENT_MSDU_COUNT)];
1026 	st->curr_msdu_cnt =
1027 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
1028 			      CURRENT_MSDU_COUNT, val);
1029 
1030 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
1031 					 TIMEOUT_COUNT)];
1032 	st->fwd_timeout_cnt =
1033 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
1034 			      TIMEOUT_COUNT, val);
1035 
1036 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
1037 					 FORWARD_DUE_TO_BAR_COUNT)];
1038 	st->fwd_bar_cnt =
1039 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
1040 			      FORWARD_DUE_TO_BAR_COUNT, val);
1041 
1042 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
1043 					 DUPLICATE_COUNT)];
1044 	st->dup_cnt =
1045 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
1046 			      DUPLICATE_COUNT, val);
1047 
1048 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
1049 					 FRAMES_IN_ORDER_COUNT)];
1050 	st->frms_in_order_cnt =
1051 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
1052 			      FRAMES_IN_ORDER_COUNT, val);
1053 
1054 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
1055 					 BAR_RECEIVED_COUNT)];
1056 	st->bar_rcvd_cnt =
1057 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
1058 			      BAR_RECEIVED_COUNT, val);
1059 
1060 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
1061 					 MPDU_FRAMES_PROCESSED_COUNT)];
1062 	st->mpdu_frms_cnt =
1063 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
1064 			      MPDU_FRAMES_PROCESSED_COUNT, val);
1065 
1066 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
1067 					 MSDU_FRAMES_PROCESSED_COUNT)];
1068 	st->msdu_frms_cnt =
1069 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
1070 			      MSDU_FRAMES_PROCESSED_COUNT, val);
1071 
1072 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
1073 					 TOTAL_PROCESSED_BYTE_COUNT)];
1074 	st->total_cnt =
1075 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
1076 			      TOTAL_PROCESSED_BYTE_COUNT, val);
1077 
1078 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
1079 					 LATE_RECEIVE_MPDU_COUNT)];
1080 	st->late_recv_mpdu_cnt =
1081 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
1082 			      LATE_RECEIVE_MPDU_COUNT, val);
1083 
1084 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
1085 					 WINDOW_JUMP_2K)];
1086 	st->win_jump_2k =
1087 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
1088 			      WINDOW_JUMP_2K, val);
1089 
1090 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
1091 					 HOLE_COUNT)];
1092 	st->hole_cnt =
1093 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
1094 			      HOLE_COUNT, val);
1095 }
1096 
1097 void
1098 hal_reo_flush_queue_status_be(hal_ring_desc_t ring_desc,
1099 			      void *st_handle,
1100 			      hal_soc_handle_t hal_soc_hdl)
1101 {
1102 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1103 	struct hal_reo_flush_queue_status *st =
1104 			(struct hal_reo_flush_queue_status *)st_handle;
1105 	uint64_t *reo_desc = (uint64_t *)ring_desc;
1106 	uint64_t val;
1107 
1108 	/*
1109 	 * Offsets of descriptor fields defined in HW headers start
1110 	 * from the field after TLV header
1111 	 */
1112 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
1113 
1114 	/* header */
1115 	hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV,
1116 				  &(st->header), hal_soc);
1117 
1118 	/* error bit */
1119 	val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS,
1120 					 ERROR_DETECTED)];
1121 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS, ERROR_DETECTED,
1122 				  val);
1123 }
1124 
1125 void
1126 hal_reo_flush_cache_status_be(hal_ring_desc_t ring_desc,
1127 			      void *st_handle,
1128 			      hal_soc_handle_t hal_soc_hdl)
1129 {
1130 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1131 	struct hal_reo_flush_cache_status *st =
1132 			(struct hal_reo_flush_cache_status *)st_handle;
1133 	uint64_t *reo_desc = (uint64_t *)ring_desc;
1134 	uint64_t val;
1135 
1136 	/*
1137 	 * Offsets of descriptor fields defined in HW headers start
1138 	 * from the field after TLV header
1139 	 */
1140 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
1141 
1142 	/* header */
1143 	hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV,
1144 				  &(st->header), hal_soc);
1145 
1146 	/* error bit */
1147 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
1148 					 ERROR_DETECTED)];
1149 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS, ERROR_DETECTED,
1150 				  val);
1151 
1152 	/* block error */
1153 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
1154 					 BLOCK_ERROR_DETAILS)];
1155 	st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
1156 					BLOCK_ERROR_DETAILS,
1157 					val);
1158 	if (!st->block_error)
1159 		qdf_set_bit(hal_soc->index,
1160 			    (unsigned long *)&hal_soc->reo_res_bitmap);
1161 
1162 	/* cache flush status */
1163 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
1164 					 CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
1165 	st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
1166 					CACHE_CONTROLLER_FLUSH_STATUS_HIT,
1167 					val);
1168 
1169 	/* cache flush descriptor type */
1170 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
1171 				  CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
1172 	st->cache_flush_status_desc_type =
1173 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
1174 			      CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
1175 			      val);
1176 
1177 	/* cache flush count */
1178 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
1179 				  CACHE_CONTROLLER_FLUSH_COUNT)];
1180 	st->cache_flush_cnt =
1181 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
1182 			      CACHE_CONTROLLER_FLUSH_COUNT,
1183 			      val);
1184 }
1185 
1186 void
1187 hal_reo_unblock_cache_status_be(hal_ring_desc_t ring_desc,
1188 				hal_soc_handle_t hal_soc_hdl,
1189 				void *st_handle)
1190 {
1191 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1192 	struct hal_reo_unblk_cache_status *st =
1193 			(struct hal_reo_unblk_cache_status *)st_handle;
1194 	uint64_t *reo_desc = (uint64_t *)ring_desc;
1195 	uint64_t val;
1196 
1197 	/*
1198 	 * Offsets of descriptor fields defined in HW headers start
1199 	 * from the field after TLV header
1200 	 */
1201 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
1202 
1203 	/* header */
1204 	hal_reo_status_get_header(ring_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV,
1205 				  &st->header, hal_soc);
1206 
1207 	/* error bit */
1208 	val = reo_desc[HAL_OFFSET_QW(REO_UNBLOCK_CACHE_STATUS,
1209 				  ERROR_DETECTED)];
1210 	st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS,
1211 				  ERROR_DETECTED,
1212 				  val);
1213 
1214 	/* unblock type */
1215 	val = reo_desc[HAL_OFFSET_QW(REO_UNBLOCK_CACHE_STATUS,
1216 				  UNBLOCK_TYPE)];
1217 	st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS,
1218 					 UNBLOCK_TYPE,
1219 					 val);
1220 
1221 	if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
1222 		qdf_clear_bit(hal_soc->index,
1223 			      (unsigned long *)&hal_soc->reo_res_bitmap);
1224 }
1225 
1226 void hal_reo_flush_timeout_list_status_be(hal_ring_desc_t ring_desc,
1227 					  void *st_handle,
1228 					  hal_soc_handle_t hal_soc_hdl)
1229 {
1230 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1231 	struct hal_reo_flush_timeout_list_status *st =
1232 			(struct hal_reo_flush_timeout_list_status *)st_handle;
1233 	uint64_t *reo_desc = (uint64_t *)ring_desc;
1234 	uint64_t val;
1235 
1236 	/*
1237 	 * Offsets of descriptor fields defined in HW headers start
1238 	 * from the field after TLV header
1239 	 */
1240 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
1241 
1242 	/* header */
1243 	hal_reo_status_get_header(ring_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV,
1244 				  &(st->header), hal_soc);
1245 
1246 	/* error bit */
1247 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
1248 					 ERROR_DETECTED)];
1249 	st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
1250 				  ERROR_DETECTED,
1251 				  val);
1252 
1253 	/* list empty */
1254 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
1255 					 TIMOUT_LIST_EMPTY)];
1256 	st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
1257 				       TIMOUT_LIST_EMPTY,
1258 				       val);
1259 
1260 	/* release descriptor count */
1261 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
1262 					 RELEASE_DESC_COUNT)];
1263 	st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
1264 					 RELEASE_DESC_COUNT,
1265 					 val);
1266 
1267 	/* forward buf count */
1268 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
1269 					 FORWARD_BUF_COUNT)];
1270 	st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
1271 					FORWARD_BUF_COUNT,
1272 					val);
1273 }
1274 
1275 void hal_reo_desc_thres_reached_status_be(hal_ring_desc_t ring_desc,
1276 					  void *st_handle,
1277 					  hal_soc_handle_t hal_soc_hdl)
1278 {
1279 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1280 	struct hal_reo_desc_thres_reached_status *st =
1281 			(struct hal_reo_desc_thres_reached_status *)st_handle;
1282 	uint64_t *reo_desc = (uint64_t *)ring_desc;
1283 	uint64_t val;
1284 
1285 	/*
1286 	 * Offsets of descriptor fields defined in HW headers start
1287 	 * from the field after TLV header
1288 	 */
1289 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
1290 
1291 	/* header */
1292 	hal_reo_status_get_header(ring_desc,
1293 				  HAL_REO_DESC_THRES_STATUS_TLV,
1294 				  &(st->header), hal_soc);
1295 
1296 	/* threshold index */
1297 	val = reo_desc[HAL_OFFSET_QW(
1298 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1299 				 THRESHOLD_INDEX)];
1300 	st->thres_index = HAL_GET_FIELD(
1301 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1302 				THRESHOLD_INDEX,
1303 				val);
1304 
1305 	/* link desc counters */
1306 	val = reo_desc[HAL_OFFSET_QW(
1307 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1308 				 LINK_DESCRIPTOR_COUNTER0)];
1309 	st->link_desc_counter0 = HAL_GET_FIELD(
1310 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1311 				LINK_DESCRIPTOR_COUNTER0,
1312 				val);
1313 
1314 	val = reo_desc[HAL_OFFSET_QW(
1315 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1316 				 LINK_DESCRIPTOR_COUNTER1)];
1317 	st->link_desc_counter1 = HAL_GET_FIELD(
1318 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1319 				LINK_DESCRIPTOR_COUNTER1,
1320 				val);
1321 
1322 	val = reo_desc[HAL_OFFSET_QW(
1323 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1324 				 LINK_DESCRIPTOR_COUNTER2)];
1325 	st->link_desc_counter2 = HAL_GET_FIELD(
1326 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1327 				LINK_DESCRIPTOR_COUNTER2,
1328 				val);
1329 
1330 	val = reo_desc[HAL_OFFSET_QW(
1331 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1332 				 LINK_DESCRIPTOR_COUNTER_SUM)];
1333 	st->link_desc_counter_sum = HAL_GET_FIELD(
1334 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1335 				LINK_DESCRIPTOR_COUNTER_SUM,
1336 				val);
1337 }
1338 
1339 void
1340 hal_reo_rx_update_queue_status_be(hal_ring_desc_t ring_desc,
1341 				  void *st_handle,
1342 				  hal_soc_handle_t hal_soc_hdl)
1343 {
1344 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1345 	struct hal_reo_update_rx_queue_status *st =
1346 			(struct hal_reo_update_rx_queue_status *)st_handle;
1347 	uint64_t *reo_desc = (uint64_t *)ring_desc;
1348 
1349 	/*
1350 	 * Offsets of descriptor fields defined in HW headers start
1351 	 * from the field after TLV header
1352 	 */
1353 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
1354 
1355 	/* header */
1356 	hal_reo_status_get_header(ring_desc,
1357 				  HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV,
1358 				  &(st->header), hal_soc);
1359 }
1360 
1361 uint8_t hal_get_tlv_hdr_size_be(void)
1362 {
1363 	return sizeof(struct tlv_32_hdr);
1364 }
1365