xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/be/hal_be_reo.c (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "qdf_module.h"
21 #include "hal_hw_headers.h"
22 #include "hal_be_hw_headers.h"
23 #include "hal_reo.h"
24 #include "hal_be_reo.h"
25 #include "hal_be_api.h"
26 
27 uint32_t hal_get_reo_reg_base_offset_be(void)
28 {
29 	return REO_REG_REG_BASE;
30 }
31 
32 /**
33  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
34  *
35  * @hal_soc: Opaque HAL SOC handle
36  * @ba_window_size: BlockAck window size
37  * @start_seq: Starting sequence number
38  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
39  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
40  * @tid: TID
41  *
42  */
43 void hal_reo_qdesc_setup_be(hal_soc_handle_t hal_soc_hdl, int tid,
44 			    uint32_t ba_window_size,
45 			    uint32_t start_seq, void *hw_qdesc_vaddr,
46 			    qdf_dma_addr_t hw_qdesc_paddr,
47 			    int pn_type, uint8_t vdev_stats_id)
48 {
49 	uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
50 	uint32_t *reo_queue_ext_desc;
51 	uint32_t reg_val;
52 	uint32_t pn_enable;
53 	uint32_t pn_size = 0;
54 
55 	qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
56 
57 	hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
58 				   HAL_REO_QUEUE_DESC);
59 	/* Fixed pattern in reserved bits for debugging */
60 	HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER,
61 			   RESERVED_0A, 0xDDBEEF);
62 
63 	/* This a just a SW meta data and will be copied to REO destination
64 	 * descriptors indicated by hardware.
65 	 * TODO: Setting TID in this field. See if we should set something else.
66 	 */
67 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
68 			   RECEIVE_QUEUE_NUMBER, tid);
69 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
70 			   VLD, 1);
71 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
72 			   ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
73 			   HAL_RX_LINK_DESC_CNTR);
74 
75 	/*
76 	 * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
77 	 */
78 
79 	reg_val = TID_TO_WME_AC(tid);
80 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, AC, reg_val);
81 
82 	if (ba_window_size < 1)
83 		ba_window_size = 1;
84 
85 	/* WAR to get 2k exception in Non BA case.
86 	 * Setting window size to 2 to get 2k jump exception
87 	 * when we receive aggregates in Non BA case
88 	 */
89 	ba_window_size = hal_update_non_ba_win_size(tid, ba_window_size);
90 
91 	/* Set RTY bit for non-BA case. Duplicate detection is currently not
92 	 * done by HW in non-BA case if RTY bit is not set.
93 	 * TODO: This is a temporary War and should be removed once HW fix is
94 	 * made to check and discard duplicates even if RTY bit is not set.
95 	 */
96 	if (ba_window_size == 1)
97 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, RTY, 1);
98 
99 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, BA_WINDOW_SIZE,
100 			   ba_window_size - 1);
101 
102 	switch (pn_type) {
103 	case HAL_PN_WPA:
104 		pn_enable = 1;
105 		pn_size = PN_SIZE_48;
106 		break;
107 	case HAL_PN_WAPI_EVEN:
108 	case HAL_PN_WAPI_UNEVEN:
109 		pn_enable = 1;
110 		pn_size = PN_SIZE_128;
111 		break;
112 	default:
113 		pn_enable = 0;
114 		break;
115 	}
116 
117 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, PN_CHECK_NEEDED,
118 			   pn_enable);
119 
120 	if (pn_type == HAL_PN_WAPI_EVEN)
121 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
122 				   PN_SHALL_BE_EVEN, 1);
123 	else if (pn_type == HAL_PN_WAPI_UNEVEN)
124 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
125 				   PN_SHALL_BE_UNEVEN, 1);
126 
127 	/*
128 	 *  TODO: Need to check if PN handling in SW needs to be enabled
129 	 *  So far this is not a requirement
130 	 */
131 
132 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, PN_SIZE,
133 			   pn_size);
134 
135 	/* TODO: Check if RX_REO_QUEUE_IGNORE_AMPDU_FLAG need to be set
136 	 * based on BA window size and/or AMPDU capabilities
137 	 */
138 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
139 			   IGNORE_AMPDU_FLAG, 1);
140 
141 	if (start_seq <= 0xfff)
142 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, SSN,
143 				   start_seq);
144 
145 	/* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA,
146 	 * but REO is not delivering packets if we set it to 1. Need to enable
147 	 * this once the issue is resolved
148 	 */
149 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, SVLD, 0);
150 
151 	hal_update_stats_counter_index(reo_queue_desc, vdev_stats_id);
152 
153 	/* TODO: Check if we should set start PN for WAPI */
154 
155 	/* TODO: HW queue descriptors are currently allocated for max BA
156 	 * window size for all QOS TIDs so that same descriptor can be used
157 	 * later when ADDBA request is received. This should be changed to
158 	 * allocate HW queue descriptors based on BA window size being
159 	 * negotiated (0 for non BA cases), and reallocate when BA window
160 	 * size changes and also send WMI message to FW to change the REO
161 	 * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
162 	 */
163 	if (tid == HAL_NON_QOS_TID)
164 		return;
165 
166 	reo_queue_ext_desc = (uint32_t *)
167 		(((struct rx_reo_queue *)reo_queue_desc) + 1);
168 	qdf_mem_zero(reo_queue_ext_desc, 3 *
169 		     sizeof(struct rx_reo_queue_ext));
170 	/* Initialize first reo queue extension descriptor */
171 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
172 				   HAL_DESC_REO_OWNED,
173 				   HAL_REO_QUEUE_EXT_DESC);
174 	/* Fixed pattern in reserved bits for debugging */
175 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
176 			   UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A,
177 			   0xADBEEF);
178 	/* Initialize second reo queue extension descriptor */
179 	reo_queue_ext_desc = (uint32_t *)
180 		(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
181 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
182 				   HAL_DESC_REO_OWNED,
183 				   HAL_REO_QUEUE_EXT_DESC);
184 	/* Fixed pattern in reserved bits for debugging */
185 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
186 			   UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A,
187 			   0xBDBEEF);
188 	/* Initialize third reo queue extension descriptor */
189 	reo_queue_ext_desc = (uint32_t *)
190 		(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
191 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
192 				   HAL_DESC_REO_OWNED,
193 				   HAL_REO_QUEUE_EXT_DESC);
194 	/* Fixed pattern in reserved bits for debugging */
195 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
196 			   UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A,
197 			   0xCDBEEF);
198 }
199 
200 qdf_export_symbol(hal_reo_qdesc_setup_be);
201 
202 static void
203 hal_reo_cmd_set_descr_addr_be(uint32_t *reo_desc,
204 			      enum hal_reo_cmd_type type,
205 			      uint32_t paddr_lo,
206 			      uint8_t paddr_hi)
207 {
208 	switch (type) {
209 	case CMD_GET_QUEUE_STATS:
210 		HAL_DESC_64_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS,
211 				      RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
212 		HAL_DESC_64_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS,
213 				      RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
214 		break;
215 	case CMD_FLUSH_QUEUE:
216 		HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
217 				      FLUSH_DESC_ADDR_31_0, paddr_lo);
218 		HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
219 				      FLUSH_DESC_ADDR_39_32, paddr_hi);
220 		break;
221 	case CMD_FLUSH_CACHE:
222 		HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
223 				      FLUSH_ADDR_31_0, paddr_lo);
224 		HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
225 				      FLUSH_ADDR_39_32, paddr_hi);
226 		break;
227 	case CMD_UPDATE_RX_REO_QUEUE:
228 		HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
229 				      RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
230 		HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
231 				      RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
232 		break;
233 	default:
234 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
235 			  "%s: Invalid REO command type", __func__);
236 		break;
237 	}
238 }
239 
240 static int
241 hal_reo_cmd_queue_stats_be(hal_ring_handle_t  hal_ring_hdl,
242 			   hal_soc_handle_t hal_soc_hdl,
243 			   struct hal_reo_cmd_params *cmd)
244 {
245 	uint32_t *reo_desc, val;
246 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
247 
248 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
249 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
250 	if (!reo_desc) {
251 		hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
252 		hal_warn_rl("Out of cmd ring entries");
253 		return -EBUSY;
254 	}
255 
256 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
257 			sizeof(struct reo_get_queue_stats));
258 
259 	/*
260 	 * Offsets of descriptor fields defined in HW headers start from
261 	 * the field after TLV header
262 	 */
263 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
264 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
265 		     sizeof(struct reo_get_queue_stats) -
266 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
267 
268 	HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
269 			      REO_STATUS_REQUIRED, cmd->std.need_status);
270 
271 	hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_GET_QUEUE_STATS,
272 				      cmd->std.addr_lo,
273 				      cmd->std.addr_hi);
274 
275 	HAL_DESC_64_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS, CLEAR_STATS,
276 			      cmd->u.stats_params.clear);
277 
278 	hal_srng_access_end_v1(hal_soc_hdl, hal_ring_hdl,
279 			       HIF_RTPM_ID_HAL_REO_CMD);
280 
281 	val = reo_desc[CMD_HEADER_DW_OFFSET];
282 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
283 				     val);
284 }
285 
286 static int
287 hal_reo_cmd_flush_queue_be(hal_ring_handle_t hal_ring_hdl,
288 			   hal_soc_handle_t hal_soc_hdl,
289 			   struct hal_reo_cmd_params *cmd)
290 {
291 	uint32_t *reo_desc, val;
292 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
293 
294 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
295 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
296 	if (!reo_desc) {
297 		hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
298 		hal_warn_rl("Out of cmd ring entries");
299 		return -EBUSY;
300 	}
301 
302 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
303 			sizeof(struct reo_flush_queue));
304 
305 	/*
306 	 * Offsets of descriptor fields defined in HW headers start from
307 	 * the field after TLV header
308 	 */
309 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
310 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
311 		     sizeof(struct reo_flush_queue) -
312 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
313 
314 	HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
315 			      REO_STATUS_REQUIRED, cmd->std.need_status);
316 
317 	hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_FLUSH_QUEUE,
318 				      cmd->std.addr_lo, cmd->std.addr_hi);
319 
320 	HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
321 			      BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
322 			      cmd->u.fl_queue_params.block_use_after_flush);
323 
324 	if (cmd->u.fl_queue_params.block_use_after_flush) {
325 		HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
326 				      BLOCK_RESOURCE_INDEX,
327 				      cmd->u.fl_queue_params.index);
328 	}
329 
330 	hal_srng_access_end_v1(hal_soc_hdl, hal_ring_hdl,
331 			       HIF_RTPM_ID_HAL_REO_CMD);
332 
333 	val = reo_desc[CMD_HEADER_DW_OFFSET];
334 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
335 				     val);
336 }
337 
338 static int
339 hal_reo_cmd_flush_cache_be(hal_ring_handle_t hal_ring_hdl,
340 			   hal_soc_handle_t hal_soc_hdl,
341 			   struct hal_reo_cmd_params *cmd)
342 {
343 	uint32_t *reo_desc, val;
344 	struct hal_reo_cmd_flush_cache_params *cp;
345 	uint8_t index = 0;
346 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
347 
348 	cp = &cmd->u.fl_cache_params;
349 
350 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
351 
352 	/* We need a cache block resource for this operation, and REO HW has
353 	 * only 4 such blocking resources. These resources are managed using
354 	 * reo_res_bitmap, and we return failure if none is available.
355 	 */
356 	if (cp->block_use_after_flush) {
357 		index = hal_find_zero_bit(hal_soc->reo_res_bitmap);
358 		if (index > 3) {
359 			hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
360 			hal_warn_rl("No blocking resource available!");
361 			return -EBUSY;
362 		}
363 		hal_soc->index = index;
364 	}
365 
366 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
367 	if (!reo_desc) {
368 		hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
369 		hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl));
370 		return -EBUSY;
371 	}
372 
373 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
374 			sizeof(struct reo_flush_cache));
375 
376 	/*
377 	 * Offsets of descriptor fields defined in HW headers start from
378 	 * the field after TLV header
379 	 */
380 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
381 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
382 		     sizeof(struct reo_flush_cache) -
383 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
384 
385 	HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
386 			      REO_STATUS_REQUIRED, cmd->std.need_status);
387 
388 	hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_FLUSH_CACHE,
389 				      cmd->std.addr_lo, cmd->std.addr_hi);
390 
391 	HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
392 			      FORWARD_ALL_MPDUS_IN_QUEUE,
393 			      cp->fwd_mpdus_in_queue);
394 
395 	/* set it to 0 for now */
396 	cp->rel_block_index = 0;
397 	HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
398 			      RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
399 
400 	if (cp->block_use_after_flush) {
401 		HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
402 				      CACHE_BLOCK_RESOURCE_INDEX, index);
403 	}
404 
405 	HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
406 			      FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
407 
408 	HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
409 			      BLOCK_CACHE_USAGE_AFTER_FLUSH,
410 			      cp->block_use_after_flush);
411 
412 	HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE, FLUSH_ENTIRE_CACHE,
413 			      cp->flush_entire_cache);
414 
415 	hal_srng_access_end_v1(hal_soc_hdl, hal_ring_hdl,
416 			       HIF_RTPM_ID_HAL_REO_CMD);
417 
418 	val = reo_desc[CMD_HEADER_DW_OFFSET];
419 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
420 				     val);
421 }
422 
423 static int
424 hal_reo_cmd_unblock_cache_be(hal_ring_handle_t hal_ring_hdl,
425 			     hal_soc_handle_t hal_soc_hdl,
426 			     struct hal_reo_cmd_params *cmd)
427 
428 {
429 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
430 	uint32_t *reo_desc, val;
431 	uint8_t index = 0;
432 
433 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
434 
435 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
436 		index = hal_find_one_bit(hal_soc->reo_res_bitmap);
437 		if (index > 3) {
438 			hal_srng_access_end(hal_soc, hal_ring_hdl);
439 			qdf_print("No blocking resource to unblock!");
440 			return -EBUSY;
441 		}
442 	}
443 
444 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
445 	if (!reo_desc) {
446 		hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
447 		hal_warn_rl("Out of cmd ring entries");
448 		return -EBUSY;
449 	}
450 
451 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
452 			sizeof(struct reo_unblock_cache));
453 
454 	/*
455 	 * Offsets of descriptor fields defined in HW headers start from
456 	 * the field after TLV header
457 	 */
458 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
459 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
460 		     sizeof(struct reo_unblock_cache) -
461 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
462 
463 	HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
464 			      REO_STATUS_REQUIRED, cmd->std.need_status);
465 
466 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE,
467 			      UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
468 
469 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
470 		HAL_DESC_64_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE,
471 				      CACHE_BLOCK_RESOURCE_INDEX,
472 				      cmd->u.unblk_cache_params.index);
473 	}
474 
475 	hal_srng_access_end(hal_soc, hal_ring_hdl);
476 	val = reo_desc[CMD_HEADER_DW_OFFSET];
477 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
478 				     val);
479 }
480 
481 static int
482 hal_reo_cmd_flush_timeout_list_be(hal_ring_handle_t hal_ring_hdl,
483 				  hal_soc_handle_t hal_soc_hdl,
484 				  struct hal_reo_cmd_params *cmd)
485 {
486 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
487 	uint32_t *reo_desc, val;
488 
489 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
490 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
491 	if (!reo_desc) {
492 		hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
493 		hal_warn_rl("Out of cmd ring entries");
494 		return -EBUSY;
495 	}
496 
497 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
498 			sizeof(struct reo_flush_timeout_list));
499 
500 	/*
501 	 * Offsets of descriptor fields defined in HW headers start from
502 	 * the field after TLV header
503 	 */
504 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
505 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
506 		     sizeof(struct reo_flush_timeout_list) -
507 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
508 
509 	HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
510 			      REO_STATUS_REQUIRED, cmd->std.need_status);
511 
512 	HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST, AC_TIMOUT_LIST,
513 			      cmd->u.fl_tim_list_params.ac_list);
514 
515 	HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST,
516 			      MINIMUM_RELEASE_DESC_COUNT,
517 			      cmd->u.fl_tim_list_params.min_rel_desc);
518 
519 	HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST,
520 			      MINIMUM_FORWARD_BUF_COUNT,
521 			      cmd->u.fl_tim_list_params.min_fwd_buf);
522 
523 	hal_srng_access_end(hal_soc, hal_ring_hdl);
524 	val = reo_desc[CMD_HEADER_DW_OFFSET];
525 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
526 				     val);
527 }
528 
529 static int
530 hal_reo_cmd_update_rx_queue_be(hal_ring_handle_t hal_ring_hdl,
531 			       hal_soc_handle_t hal_soc_hdl,
532 			       struct hal_reo_cmd_params *cmd)
533 {
534 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
535 	uint32_t *reo_desc, val;
536 	struct hal_reo_cmd_update_queue_params *p;
537 
538 	p = &cmd->u.upd_queue_params;
539 
540 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
541 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
542 	if (!reo_desc) {
543 		hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
544 		hal_warn_rl("Out of cmd ring entries");
545 		return -EBUSY;
546 	}
547 
548 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
549 			sizeof(struct reo_update_rx_reo_queue));
550 
551 	/*
552 	 * Offsets of descriptor fields defined in HW headers start from
553 	 * the field after TLV header
554 	 */
555 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
556 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
557 		     sizeof(struct reo_update_rx_reo_queue) -
558 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
559 
560 	HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
561 			      REO_STATUS_REQUIRED, cmd->std.need_status);
562 
563 	hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
564 				      cmd->std.addr_lo, cmd->std.addr_hi);
565 
566 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
567 			      UPDATE_RECEIVE_QUEUE_NUMBER,
568 			      p->update_rx_queue_num);
569 
570 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, UPDATE_VLD,
571 			      p->update_vld);
572 
573 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
574 			      UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
575 			      p->update_assoc_link_desc);
576 
577 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
578 			      UPDATE_DISABLE_DUPLICATE_DETECTION,
579 			      p->update_disable_dup_detect);
580 
581 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
582 			      UPDATE_DISABLE_DUPLICATE_DETECTION,
583 			      p->update_disable_dup_detect);
584 
585 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
586 			      UPDATE_SOFT_REORDER_ENABLE,
587 			      p->update_soft_reorder_enab);
588 
589 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
590 			      UPDATE_AC, p->update_ac);
591 
592 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
593 			      UPDATE_BAR, p->update_bar);
594 
595 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
596 			      UPDATE_BAR, p->update_bar);
597 
598 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
599 			      UPDATE_RTY, p->update_rty);
600 
601 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
602 			      UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
603 
604 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
605 			      UPDATE_OOR_MODE, p->update_oor_mode);
606 
607 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
608 			      UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
609 
610 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
611 			      UPDATE_PN_CHECK_NEEDED,
612 			      p->update_pn_check_needed);
613 
614 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
615 			      UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
616 
617 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
618 			      UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
619 
620 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
621 			      UPDATE_PN_HANDLING_ENABLE,
622 			      p->update_pn_hand_enab);
623 
624 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
625 			      UPDATE_PN_SIZE, p->update_pn_size);
626 
627 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
628 			      UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
629 
630 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
631 			      UPDATE_SVLD, p->update_svld);
632 
633 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
634 			      UPDATE_SSN, p->update_ssn);
635 
636 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
637 			      UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
638 			      p->update_seq_2k_err_detect);
639 
640 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
641 			      UPDATE_PN_VALID, p->update_pn_valid);
642 
643 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
644 			      UPDATE_PN, p->update_pn);
645 
646 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
647 			      RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
648 
649 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
650 			      VLD, p->vld);
651 
652 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
653 			      ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
654 			      p->assoc_link_desc);
655 
656 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
657 			      DISABLE_DUPLICATE_DETECTION,
658 			      p->disable_dup_detect);
659 
660 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
661 			      SOFT_REORDER_ENABLE, p->soft_reorder_enab);
662 
663 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, AC, p->ac);
664 
665 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
666 			      BAR, p->bar);
667 
668 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
669 			      CHK_2K_MODE, p->chk_2k_mode);
670 
671 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
672 			      RTY, p->rty);
673 
674 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
675 			      OOR_MODE, p->oor_mode);
676 
677 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
678 			      PN_CHECK_NEEDED, p->pn_check_needed);
679 
680 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
681 			      PN_SHALL_BE_EVEN, p->pn_even);
682 
683 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
684 			      PN_SHALL_BE_UNEVEN, p->pn_uneven);
685 
686 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
687 			      PN_HANDLING_ENABLE, p->pn_hand_enab);
688 
689 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
690 			      IGNORE_AMPDU_FLAG, p->ignore_ampdu);
691 
692 	if (p->ba_window_size < 1)
693 		p->ba_window_size = 1;
694 	/*
695 	 * WAR to get 2k exception in Non BA case.
696 	 * Setting window size to 2 to get 2k jump exception
697 	 * when we receive aggregates in Non BA case
698 	 */
699 	if (p->ba_window_size == 1)
700 		p->ba_window_size++;
701 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
702 			      BA_WINDOW_SIZE, p->ba_window_size - 1);
703 
704 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
705 			      PN_SIZE, p->pn_size);
706 
707 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
708 			      SVLD, p->svld);
709 
710 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
711 			      SSN, p->ssn);
712 
713 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
714 			      SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
715 
716 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
717 			      PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
718 
719 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
720 			      PN_31_0, p->pn_31_0);
721 
722 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
723 			      PN_63_32, p->pn_63_32);
724 
725 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
726 			      PN_95_64, p->pn_95_64);
727 
728 	HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
729 			      PN_127_96, p->pn_127_96);
730 
731 	hal_srng_access_end_v1(hal_soc_hdl, hal_ring_hdl,
732 			       HIF_RTPM_ID_HAL_REO_CMD);
733 
734 	val = reo_desc[CMD_HEADER_DW_OFFSET];
735 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
736 				     val);
737 }
738 
739 int hal_reo_send_cmd_be(hal_soc_handle_t hal_soc_hdl,
740 			hal_ring_handle_t  hal_ring_hdl,
741 			enum hal_reo_cmd_type cmd,
742 			void *params)
743 {
744 	struct hal_reo_cmd_params *cmd_params =
745 			(struct hal_reo_cmd_params *)params;
746 	int num = 0;
747 
748 	switch (cmd) {
749 	case CMD_GET_QUEUE_STATS:
750 		num = hal_reo_cmd_queue_stats_be(hal_ring_hdl,
751 						 hal_soc_hdl, cmd_params);
752 		break;
753 	case CMD_FLUSH_QUEUE:
754 		num = hal_reo_cmd_flush_queue_be(hal_ring_hdl,
755 						 hal_soc_hdl, cmd_params);
756 		break;
757 	case CMD_FLUSH_CACHE:
758 		num = hal_reo_cmd_flush_cache_be(hal_ring_hdl,
759 						 hal_soc_hdl, cmd_params);
760 		break;
761 	case CMD_UNBLOCK_CACHE:
762 		num = hal_reo_cmd_unblock_cache_be(hal_ring_hdl,
763 						   hal_soc_hdl, cmd_params);
764 		break;
765 	case CMD_FLUSH_TIMEOUT_LIST:
766 		num = hal_reo_cmd_flush_timeout_list_be(hal_ring_hdl,
767 							hal_soc_hdl,
768 							cmd_params);
769 		break;
770 	case CMD_UPDATE_RX_REO_QUEUE:
771 		num = hal_reo_cmd_update_rx_queue_be(hal_ring_hdl,
772 						     hal_soc_hdl, cmd_params);
773 		break;
774 	default:
775 		hal_err("Invalid REO command type: %d", cmd);
776 		return -EINVAL;
777 	};
778 
779 	return num;
780 }
781 
782 void
783 hal_reo_queue_stats_status_be(hal_ring_desc_t ring_desc,
784 			      void *st_handle,
785 			      hal_soc_handle_t hal_soc_hdl)
786 {
787 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
788 	struct hal_reo_queue_status *st =
789 		(struct hal_reo_queue_status *)st_handle;
790 	uint64_t *reo_desc = (uint64_t *)ring_desc;
791 	uint64_t val;
792 
793 	/*
794 	 * Offsets of descriptor fields defined in HW headers start
795 	 * from the field after TLV header
796 	 */
797 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
798 
799 	/* header */
800 	hal_reo_status_get_header(ring_desc, HAL_REO_QUEUE_STATS_STATUS_TLV,
801 				  &(st->header), hal_soc);
802 
803 	/* SSN */
804 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, SSN)];
805 	st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, SSN, val);
806 
807 	/* current index */
808 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
809 					 CURRENT_INDEX)];
810 	st->curr_idx =
811 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
812 			      CURRENT_INDEX, val);
813 
814 	/* PN bits */
815 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
816 					 PN_31_0)];
817 	st->pn_31_0 =
818 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
819 			      PN_31_0, val);
820 
821 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
822 					 PN_63_32)];
823 	st->pn_63_32 =
824 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
825 			      PN_63_32, val);
826 
827 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
828 					 PN_95_64)];
829 	st->pn_95_64 =
830 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
831 			      PN_95_64, val);
832 
833 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
834 					 PN_127_96)];
835 	st->pn_127_96 =
836 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
837 			      PN_127_96, val);
838 
839 	/* timestamps */
840 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
841 					 LAST_RX_ENQUEUE_TIMESTAMP)];
842 	st->last_rx_enq_tstamp =
843 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
844 			      LAST_RX_ENQUEUE_TIMESTAMP, val);
845 
846 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
847 					 LAST_RX_DEQUEUE_TIMESTAMP)];
848 	st->last_rx_deq_tstamp =
849 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
850 			      LAST_RX_DEQUEUE_TIMESTAMP, val);
851 
852 	/* rx bitmap */
853 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
854 					 RX_BITMAP_31_0)];
855 	st->rx_bitmap_31_0 =
856 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
857 			      RX_BITMAP_31_0, val);
858 
859 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
860 					 RX_BITMAP_63_32)];
861 	st->rx_bitmap_63_32 =
862 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
863 			      RX_BITMAP_63_32, val);
864 
865 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
866 					 RX_BITMAP_95_64)];
867 	st->rx_bitmap_95_64 =
868 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
869 			      RX_BITMAP_95_64, val);
870 
871 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
872 					 RX_BITMAP_127_96)];
873 	st->rx_bitmap_127_96 =
874 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
875 			      RX_BITMAP_127_96, val);
876 
877 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
878 					 RX_BITMAP_159_128)];
879 	st->rx_bitmap_159_128 =
880 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
881 			      RX_BITMAP_159_128, val);
882 
883 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
884 					 RX_BITMAP_191_160)];
885 	st->rx_bitmap_191_160 =
886 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
887 			      RX_BITMAP_191_160, val);
888 
889 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
890 					 RX_BITMAP_223_192)];
891 	st->rx_bitmap_223_192 =
892 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
893 			      RX_BITMAP_223_192, val);
894 
895 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
896 					 RX_BITMAP_255_224)];
897 	st->rx_bitmap_255_224 =
898 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
899 			      RX_BITMAP_255_224, val);
900 
901 	/* various counts */
902 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
903 					 CURRENT_MPDU_COUNT)];
904 	st->curr_mpdu_cnt =
905 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
906 			      CURRENT_MPDU_COUNT, val);
907 
908 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
909 					 CURRENT_MSDU_COUNT)];
910 	st->curr_msdu_cnt =
911 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
912 			      CURRENT_MSDU_COUNT, val);
913 
914 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
915 					 TIMEOUT_COUNT)];
916 	st->fwd_timeout_cnt =
917 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
918 			      TIMEOUT_COUNT, val);
919 
920 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
921 					 FORWARD_DUE_TO_BAR_COUNT)];
922 	st->fwd_bar_cnt =
923 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
924 			      FORWARD_DUE_TO_BAR_COUNT, val);
925 
926 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
927 					 DUPLICATE_COUNT)];
928 	st->dup_cnt =
929 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
930 			      DUPLICATE_COUNT, val);
931 
932 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
933 					 FRAMES_IN_ORDER_COUNT)];
934 	st->frms_in_order_cnt =
935 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
936 			      FRAMES_IN_ORDER_COUNT, val);
937 
938 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
939 					 BAR_RECEIVED_COUNT)];
940 	st->bar_rcvd_cnt =
941 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
942 			      BAR_RECEIVED_COUNT, val);
943 
944 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
945 					 MPDU_FRAMES_PROCESSED_COUNT)];
946 	st->mpdu_frms_cnt =
947 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
948 			      MPDU_FRAMES_PROCESSED_COUNT, val);
949 
950 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
951 					 MSDU_FRAMES_PROCESSED_COUNT)];
952 	st->msdu_frms_cnt =
953 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
954 			      MSDU_FRAMES_PROCESSED_COUNT, val);
955 
956 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
957 					 TOTAL_PROCESSED_BYTE_COUNT)];
958 	st->total_cnt =
959 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
960 			      TOTAL_PROCESSED_BYTE_COUNT, val);
961 
962 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
963 					 LATE_RECEIVE_MPDU_COUNT)];
964 	st->late_recv_mpdu_cnt =
965 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
966 			      LATE_RECEIVE_MPDU_COUNT, val);
967 
968 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
969 					 WINDOW_JUMP_2K)];
970 	st->win_jump_2k =
971 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
972 			      WINDOW_JUMP_2K, val);
973 
974 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
975 					 HOLE_COUNT)];
976 	st->hole_cnt =
977 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
978 			      HOLE_COUNT, val);
979 }
980 
981 void
982 hal_reo_flush_queue_status_be(hal_ring_desc_t ring_desc,
983 			      void *st_handle,
984 			      hal_soc_handle_t hal_soc_hdl)
985 {
986 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
987 	struct hal_reo_flush_queue_status *st =
988 			(struct hal_reo_flush_queue_status *)st_handle;
989 	uint64_t *reo_desc = (uint64_t *)ring_desc;
990 	uint64_t val;
991 
992 	/*
993 	 * Offsets of descriptor fields defined in HW headers start
994 	 * from the field after TLV header
995 	 */
996 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
997 
998 	/* header */
999 	hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV,
1000 				  &(st->header), hal_soc);
1001 
1002 	/* error bit */
1003 	val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS,
1004 					 ERROR_DETECTED)];
1005 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS, ERROR_DETECTED,
1006 				  val);
1007 }
1008 
1009 void
1010 hal_reo_flush_cache_status_be(hal_ring_desc_t ring_desc,
1011 			      void *st_handle,
1012 			      hal_soc_handle_t hal_soc_hdl)
1013 {
1014 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1015 	struct hal_reo_flush_cache_status *st =
1016 			(struct hal_reo_flush_cache_status *)st_handle;
1017 	uint64_t *reo_desc = (uint64_t *)ring_desc;
1018 	uint64_t val;
1019 
1020 	/*
1021 	 * Offsets of descriptor fields defined in HW headers start
1022 	 * from the field after TLV header
1023 	 */
1024 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
1025 
1026 	/* header */
1027 	hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV,
1028 				  &(st->header), hal_soc);
1029 
1030 	/* error bit */
1031 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
1032 					 ERROR_DETECTED)];
1033 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS, ERROR_DETECTED,
1034 				  val);
1035 
1036 	/* block error */
1037 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
1038 					 BLOCK_ERROR_DETAILS)];
1039 	st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
1040 					BLOCK_ERROR_DETAILS,
1041 					val);
1042 	if (!st->block_error)
1043 		qdf_set_bit(hal_soc->index,
1044 			    (unsigned long *)&hal_soc->reo_res_bitmap);
1045 
1046 	/* cache flush status */
1047 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
1048 					 CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
1049 	st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
1050 					CACHE_CONTROLLER_FLUSH_STATUS_HIT,
1051 					val);
1052 
1053 	/* cache flush descriptor type */
1054 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
1055 				  CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
1056 	st->cache_flush_status_desc_type =
1057 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
1058 			      CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
1059 			      val);
1060 
1061 	/* cache flush count */
1062 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
1063 				  CACHE_CONTROLLER_FLUSH_COUNT)];
1064 	st->cache_flush_cnt =
1065 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
1066 			      CACHE_CONTROLLER_FLUSH_COUNT,
1067 			      val);
1068 }
1069 
1070 void
1071 hal_reo_unblock_cache_status_be(hal_ring_desc_t ring_desc,
1072 				hal_soc_handle_t hal_soc_hdl,
1073 				void *st_handle)
1074 {
1075 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1076 	struct hal_reo_unblk_cache_status *st =
1077 			(struct hal_reo_unblk_cache_status *)st_handle;
1078 	uint64_t *reo_desc = (uint64_t *)ring_desc;
1079 	uint64_t val;
1080 
1081 	/*
1082 	 * Offsets of descriptor fields defined in HW headers start
1083 	 * from the field after TLV header
1084 	 */
1085 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
1086 
1087 	/* header */
1088 	hal_reo_status_get_header(ring_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV,
1089 				  &st->header, hal_soc);
1090 
1091 	/* error bit */
1092 	val = reo_desc[HAL_OFFSET_QW(REO_UNBLOCK_CACHE_STATUS,
1093 				  ERROR_DETECTED)];
1094 	st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS,
1095 				  ERROR_DETECTED,
1096 				  val);
1097 
1098 	/* unblock type */
1099 	val = reo_desc[HAL_OFFSET_QW(REO_UNBLOCK_CACHE_STATUS,
1100 				  UNBLOCK_TYPE)];
1101 	st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS,
1102 					 UNBLOCK_TYPE,
1103 					 val);
1104 
1105 	if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
1106 		qdf_clear_bit(hal_soc->index,
1107 			      (unsigned long *)&hal_soc->reo_res_bitmap);
1108 }
1109 
1110 void hal_reo_flush_timeout_list_status_be(hal_ring_desc_t ring_desc,
1111 					  void *st_handle,
1112 					  hal_soc_handle_t hal_soc_hdl)
1113 {
1114 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1115 	struct hal_reo_flush_timeout_list_status *st =
1116 			(struct hal_reo_flush_timeout_list_status *)st_handle;
1117 	uint64_t *reo_desc = (uint64_t *)ring_desc;
1118 	uint64_t val;
1119 
1120 	/*
1121 	 * Offsets of descriptor fields defined in HW headers start
1122 	 * from the field after TLV header
1123 	 */
1124 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
1125 
1126 	/* header */
1127 	hal_reo_status_get_header(ring_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV,
1128 				  &(st->header), hal_soc);
1129 
1130 	/* error bit */
1131 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
1132 					 ERROR_DETECTED)];
1133 	st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
1134 				  ERROR_DETECTED,
1135 				  val);
1136 
1137 	/* list empty */
1138 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
1139 					 TIMOUT_LIST_EMPTY)];
1140 	st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
1141 				       TIMOUT_LIST_EMPTY,
1142 				       val);
1143 
1144 	/* release descriptor count */
1145 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
1146 					 RELEASE_DESC_COUNT)];
1147 	st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
1148 					 RELEASE_DESC_COUNT,
1149 					 val);
1150 
1151 	/* forward buf count */
1152 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
1153 					 FORWARD_BUF_COUNT)];
1154 	st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
1155 					FORWARD_BUF_COUNT,
1156 					val);
1157 }
1158 
1159 void hal_reo_desc_thres_reached_status_be(hal_ring_desc_t ring_desc,
1160 					  void *st_handle,
1161 					  hal_soc_handle_t hal_soc_hdl)
1162 {
1163 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1164 	struct hal_reo_desc_thres_reached_status *st =
1165 			(struct hal_reo_desc_thres_reached_status *)st_handle;
1166 	uint64_t *reo_desc = (uint64_t *)ring_desc;
1167 	uint64_t val;
1168 
1169 	/*
1170 	 * Offsets of descriptor fields defined in HW headers start
1171 	 * from the field after TLV header
1172 	 */
1173 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
1174 
1175 	/* header */
1176 	hal_reo_status_get_header(ring_desc,
1177 				  HAL_REO_DESC_THRES_STATUS_TLV,
1178 				  &(st->header), hal_soc);
1179 
1180 	/* threshold index */
1181 	val = reo_desc[HAL_OFFSET_QW(
1182 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1183 				 THRESHOLD_INDEX)];
1184 	st->thres_index = HAL_GET_FIELD(
1185 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1186 				THRESHOLD_INDEX,
1187 				val);
1188 
1189 	/* link desc counters */
1190 	val = reo_desc[HAL_OFFSET_QW(
1191 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1192 				 LINK_DESCRIPTOR_COUNTER0)];
1193 	st->link_desc_counter0 = HAL_GET_FIELD(
1194 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1195 				LINK_DESCRIPTOR_COUNTER0,
1196 				val);
1197 
1198 	val = reo_desc[HAL_OFFSET_QW(
1199 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1200 				 LINK_DESCRIPTOR_COUNTER1)];
1201 	st->link_desc_counter1 = HAL_GET_FIELD(
1202 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1203 				LINK_DESCRIPTOR_COUNTER1,
1204 				val);
1205 
1206 	val = reo_desc[HAL_OFFSET_QW(
1207 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1208 				 LINK_DESCRIPTOR_COUNTER2)];
1209 	st->link_desc_counter2 = HAL_GET_FIELD(
1210 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1211 				LINK_DESCRIPTOR_COUNTER2,
1212 				val);
1213 
1214 	val = reo_desc[HAL_OFFSET_QW(
1215 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1216 				 LINK_DESCRIPTOR_COUNTER_SUM)];
1217 	st->link_desc_counter_sum = HAL_GET_FIELD(
1218 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1219 				LINK_DESCRIPTOR_COUNTER_SUM,
1220 				val);
1221 }
1222 
1223 void
1224 hal_reo_rx_update_queue_status_be(hal_ring_desc_t ring_desc,
1225 				  void *st_handle,
1226 				  hal_soc_handle_t hal_soc_hdl)
1227 {
1228 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1229 	struct hal_reo_update_rx_queue_status *st =
1230 			(struct hal_reo_update_rx_queue_status *)st_handle;
1231 	uint64_t *reo_desc = (uint64_t *)ring_desc;
1232 
1233 	/*
1234 	 * Offsets of descriptor fields defined in HW headers start
1235 	 * from the field after TLV header
1236 	 */
1237 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
1238 
1239 	/* header */
1240 	hal_reo_status_get_header(ring_desc,
1241 				  HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV,
1242 				  &(st->header), hal_soc);
1243 }
1244 
1245 uint8_t hal_get_tlv_hdr_size_be(void)
1246 {
1247 	return sizeof(struct tlv_32_hdr);
1248 }
1249