xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_reo.c (revision 302a1d9701784af5f4797b1a9fe07ae820b51907)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_api.h"
20 #include "hal_hw_headers.h"
21 #include "hal_reo.h"
22 #include "hal_tx.h"
23 #include "hal_rx.h"
24 #include "qdf_module.h"
25 
26 /* TODO: See if the following definition is available in HW headers */
27 #define HAL_REO_OWNED 4
28 #define HAL_REO_QUEUE_DESC 8
29 #define HAL_REO_QUEUE_EXT_DESC 9
30 
31 /* TODO: Using associated link desc counter 1 for Rx. Check with FW on
32  * how these counters are assigned
33  */
34 #define HAL_RX_LINK_DESC_CNTR 1
35 /* TODO: Following definition should be from HW headers */
36 #define HAL_DESC_REO_OWNED 4
37 
38 /**
39  * hal_uniform_desc_hdr_setup - setup reo_queue_ext descritpro
40  * @owner - owner info
41  * @buffer_type - buffer type
42  */
43 static inline void hal_uniform_desc_hdr_setup(uint32_t *desc, uint32_t owner,
44 	uint32_t buffer_type)
45 {
46 	HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, OWNER,
47 		owner);
48 	HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, BUFFER_TYPE,
49 		buffer_type);
50 }
51 
52 #ifndef TID_TO_WME_AC
53 #define WME_AC_BE 0 /* best effort */
54 #define WME_AC_BK 1 /* background */
55 #define WME_AC_VI 2 /* video */
56 #define WME_AC_VO 3 /* voice */
57 
58 #define TID_TO_WME_AC(_tid) ( \
59 	(((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
60 	(((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
61 	(((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
62 	WME_AC_VO)
63 #endif
64 #define HAL_NON_QOS_TID 16
65 
66 /**
67  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
68  *
69  * @hal_soc: Opaque HAL SOC handle
70  * @ba_window_size: BlockAck window size
71  * @start_seq: Starting sequence number
72  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
73  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
74  * @tid: TID
75  *
76  */
77 void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size,
78 	uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr,
79 	int pn_type)
80 {
81 	uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
82 	uint32_t *reo_queue_ext_desc;
83 	uint32_t reg_val;
84 	uint32_t pn_enable;
85 	uint32_t pn_size = 0;
86 
87 	qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
88 
89 	hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
90 		HAL_REO_QUEUE_DESC);
91 	/* Fixed pattern in reserved bits for debugging */
92 	HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER_0,
93 		RESERVED_0A, 0xDDBEEF);
94 
95 	/* This a just a SW meta data and will be copied to REO destination
96 	 * descriptors indicated by hardware.
97 	 * TODO: Setting TID in this field. See if we should set something else.
98 	 */
99 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_1,
100 		RECEIVE_QUEUE_NUMBER, tid);
101 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
102 		VLD, 1);
103 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
104 		ASSOCIATED_LINK_DESCRIPTOR_COUNTER, HAL_RX_LINK_DESC_CNTR);
105 
106 	/*
107 	 * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
108 	 */
109 
110 	reg_val = TID_TO_WME_AC(tid);
111 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, AC, reg_val);
112 
113 	if (ba_window_size < 1)
114 		ba_window_size = 1;
115 
116 	/* Set RTY bit for non-BA case. Duplicate detection is currently not
117 	 * done by HW in non-BA case if RTY bit is not set.
118 	 * TODO: This is a temporary War and should be removed once HW fix is
119 	 * made to check and discard duplicates even if RTY bit is not set.
120 	 */
121 	if (ba_window_size == 1)
122 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, RTY, 1);
123 
124 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, BA_WINDOW_SIZE,
125 		ba_window_size - 1);
126 
127 	switch (pn_type) {
128 	case HAL_PN_WPA:
129 		pn_enable = 1;
130 		pn_size = PN_SIZE_48;
131 		break;
132 	case HAL_PN_WAPI_EVEN:
133 	case HAL_PN_WAPI_UNEVEN:
134 		pn_enable = 1;
135 		pn_size = PN_SIZE_128;
136 		break;
137 	default:
138 		pn_enable = 0;
139 		break;
140 	}
141 
142 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_CHECK_NEEDED,
143 		pn_enable);
144 
145 	if (pn_type == HAL_PN_WAPI_EVEN)
146 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
147 			PN_SHALL_BE_EVEN, 1);
148 	else if (pn_type == HAL_PN_WAPI_UNEVEN)
149 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
150 			PN_SHALL_BE_UNEVEN, 1);
151 
152 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_HANDLING_ENABLE,
153 		pn_enable);
154 
155 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_SIZE,
156 		pn_size);
157 
158 	/* TODO: Check if RX_REO_QUEUE_2_IGNORE_AMPDU_FLAG need to be set
159 	 * based on BA window size and/or AMPDU capabilities
160 	 */
161 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
162 		IGNORE_AMPDU_FLAG, 1);
163 
164 	if (start_seq <= 0xfff)
165 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SSN,
166 			start_seq);
167 
168 	/* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA,
169 	 * but REO is not delivering packets if we set it to 1. Need to enable
170 	 * this once the issue is resolved
171 	 */
172 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SVLD, 0);
173 
174 	/* TODO: Check if we should set start PN for WAPI */
175 
176 #ifdef notyet
177 	/* Setup first queue extension if BA window size is more than 1 */
178 	if (ba_window_size > 1) {
179 		reo_queue_ext_desc =
180 			(uint32_t *)(((struct rx_reo_queue *)reo_queue_desc) +
181 			1);
182 		qdf_mem_zero(reo_queue_ext_desc,
183 			sizeof(struct rx_reo_queue_ext));
184 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
185 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
186 	}
187 	/* Setup second queue extension if BA window size is more than 105 */
188 	if (ba_window_size > 105) {
189 		reo_queue_ext_desc = (uint32_t *)
190 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
191 		qdf_mem_zero(reo_queue_ext_desc,
192 			sizeof(struct rx_reo_queue_ext));
193 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
194 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
195 	}
196 	/* Setup third queue extension if BA window size is more than 210 */
197 	if (ba_window_size > 210) {
198 		reo_queue_ext_desc = (uint32_t *)
199 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
200 		qdf_mem_zero(reo_queue_ext_desc,
201 			sizeof(struct rx_reo_queue_ext));
202 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
203 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
204 	}
205 #else
206 	/* TODO: HW queue descriptors are currently allocated for max BA
207 	 * window size for all QOS TIDs so that same descriptor can be used
208 	 * later when ADDBA request is recevied. This should be changed to
209 	 * allocate HW queue descriptors based on BA window size being
210 	 * negotiated (0 for non BA cases), and reallocate when BA window
211 	 * size changes and also send WMI message to FW to change the REO
212 	 * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
213 	 */
214 	if (tid != HAL_NON_QOS_TID) {
215 		reo_queue_ext_desc = (uint32_t *)
216 			(((struct rx_reo_queue *)reo_queue_desc) + 1);
217 		qdf_mem_zero(reo_queue_ext_desc, 3 *
218 			sizeof(struct rx_reo_queue_ext));
219 		/* Initialize first reo queue extension descriptor */
220 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
221 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
222 		/* Fixed pattern in reserved bits for debugging */
223 		HAL_DESC_SET_FIELD(reo_queue_ext_desc,
224 			UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xADBEEF);
225 		/* Initialize second reo queue extension descriptor */
226 		reo_queue_ext_desc = (uint32_t *)
227 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
228 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
229 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
230 		/* Fixed pattern in reserved bits for debugging */
231 		HAL_DESC_SET_FIELD(reo_queue_ext_desc,
232 			UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xBDBEEF);
233 		/* Initialize third reo queue extension descriptor */
234 		reo_queue_ext_desc = (uint32_t *)
235 			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
236 		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
237 			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
238 		/* Fixed pattern in reserved bits for debugging */
239 		HAL_DESC_SET_FIELD(reo_queue_ext_desc,
240 			UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xCDBEEF);
241 	}
242 #endif
243 }
244 qdf_export_symbol(hal_reo_qdesc_setup);
245 
246 /**
247  * hal_get_ba_aging_timeout - Get BA Aging timeout
248  *
249  * @hal_soc: Opaque HAL SOC handle
250  * @ac: Access category
251  * @value: window size to get
252  */
253 void hal_get_ba_aging_timeout(void *hal_soc, uint8_t ac,
254 			      uint32_t *value)
255 {
256 	struct hal_soc *soc = (struct hal_soc *)hal_soc;
257 
258 	switch (ac) {
259 	case WME_AC_BE:
260 		*value = HAL_REG_READ(soc,
261 				      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
262 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
263 		break;
264 	case WME_AC_BK:
265 		*value = HAL_REG_READ(soc,
266 				      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
267 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
268 		break;
269 	case WME_AC_VI:
270 		*value = HAL_REG_READ(soc,
271 				      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
272 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
273 		break;
274 	case WME_AC_VO:
275 		*value = HAL_REG_READ(soc,
276 				      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
277 				      SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
278 		break;
279 	default:
280 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
281 			  "Invalid AC: %d\n", ac);
282 	}
283 }
284 
285 qdf_export_symbol(hal_get_ba_aging_timeout);
286 
287 /**
288  * hal_set_ba_aging_timeout - Set BA Aging timeout
289  *
290  * @hal_soc: Opaque HAL SOC handle
291  * @ac: Access category
292  * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice
293  * @value: Input value to set
294  */
295 void hal_set_ba_aging_timeout(void *hal_soc, uint8_t ac,
296 			      uint32_t value)
297 {
298 	struct hal_soc *soc = (struct hal_soc *)hal_soc;
299 
300 	switch (ac) {
301 	case WME_AC_BE:
302 		HAL_REG_WRITE(soc,
303 			      HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
304 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
305 			      value * 1000);
306 		break;
307 	case WME_AC_BK:
308 		HAL_REG_WRITE(soc,
309 			      HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
310 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
311 			      value * 1000);
312 		break;
313 	case WME_AC_VI:
314 		HAL_REG_WRITE(soc,
315 			      HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
316 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
317 			      value * 1000);
318 		break;
319 	case WME_AC_VO:
320 		HAL_REG_WRITE(soc,
321 			      HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
322 			      SEQ_WCSS_UMAC_REO_REG_OFFSET),
323 			      value * 1000);
324 		break;
325 	default:
326 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
327 			  "Invalid AC: %d\n", ac);
328 	}
329 }
330 
331 qdf_export_symbol(hal_set_ba_aging_timeout);
332 
333 #define BLOCK_RES_MASK		0xF
334 static inline uint8_t hal_find_one_bit(uint8_t x)
335 {
336 	uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK;
337 	uint8_t pos;
338 
339 	for (pos = 0; y; y >>= 1)
340 		pos++;
341 
342 	return pos-1;
343 }
344 
345 static inline uint8_t hal_find_zero_bit(uint8_t x)
346 {
347 	uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK;
348 	uint8_t pos;
349 
350 	for (pos = 0; y; y >>= 1)
351 		pos++;
352 
353 	return pos-1;
354 }
355 
356 inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc,
357 				       enum hal_reo_cmd_type type,
358 				       uint32_t paddr_lo,
359 				       uint8_t paddr_hi)
360 {
361 	switch (type) {
362 	case CMD_GET_QUEUE_STATS:
363 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
364 			RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
365 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
366 				    RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
367 		break;
368 	case CMD_FLUSH_QUEUE:
369 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
370 					FLUSH_DESC_ADDR_31_0, paddr_lo);
371 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
372 					FLUSH_DESC_ADDR_39_32, paddr_hi);
373 		break;
374 	case CMD_FLUSH_CACHE:
375 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
376 					FLUSH_ADDR_31_0, paddr_lo);
377 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
378 					FLUSH_ADDR_39_32, paddr_hi);
379 		break;
380 	case CMD_UPDATE_RX_REO_QUEUE:
381 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
382 					RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
383 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
384 					RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
385 		break;
386 	default:
387 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
388 			"%s: Invalid REO command type", __func__);
389 		break;
390 	}
391 }
392 
393 inline int hal_reo_cmd_queue_stats(void *reo_ring, struct hal_soc *soc,
394 				    struct hal_reo_cmd_params *cmd)
395 
396 {
397 	uint32_t *reo_desc, val;
398 
399 	hal_srng_access_start(soc, reo_ring);
400 	reo_desc = hal_srng_src_get_next(soc, reo_ring);
401 	if (!reo_desc) {
402 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
403 			"%s: Out of cmd ring entries", __func__);
404 		hal_srng_access_end(soc, reo_ring);
405 		return -EBUSY;
406 	}
407 
408 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
409 			     sizeof(struct reo_get_queue_stats));
410 
411 	/* Offsets of descriptor fields defined in HW headers start from
412 	 * the field after TLV header */
413 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
414 	qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
415 		     sizeof(struct reo_get_queue_stats) -
416 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
417 
418 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
419 		REO_STATUS_REQUIRED, cmd->std.need_status);
420 
421 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_GET_QUEUE_STATS,
422 				   cmd->std.addr_lo,
423 				   cmd->std.addr_hi);
424 
425 	HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
426 			      cmd->u.stats_params.clear);
427 
428 	hal_srng_access_end(soc, reo_ring);
429 
430 	val = reo_desc[CMD_HEADER_DW_OFFSET];
431 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
432 				     val);
433 }
434 qdf_export_symbol(hal_reo_cmd_queue_stats);
435 
436 inline int hal_reo_cmd_flush_queue(void *reo_ring, struct hal_soc *soc,
437 				    struct hal_reo_cmd_params *cmd)
438 {
439 	uint32_t *reo_desc, val;
440 
441 	hal_srng_access_start(soc, reo_ring);
442 	reo_desc = hal_srng_src_get_next(soc, reo_ring);
443 	if (!reo_desc) {
444 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
445 			"%s: Out of cmd ring entries", __func__);
446 		hal_srng_access_end(soc, reo_ring);
447 		return -EBUSY;
448 	}
449 
450 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
451 			     sizeof(struct reo_flush_queue));
452 
453 	/* Offsets of descriptor fields defined in HW headers start from
454 	 * the field after TLV header */
455 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
456 	qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
457 		     sizeof(struct reo_flush_queue) -
458 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
459 
460 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
461 		REO_STATUS_REQUIRED, cmd->std.need_status);
462 
463 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_QUEUE, cmd->std.addr_lo,
464 		cmd->std.addr_hi);
465 
466 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
467 		BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
468 		cmd->u.fl_queue_params.block_use_after_flush);
469 
470 	if (cmd->u.fl_queue_params.block_use_after_flush) {
471 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
472 			BLOCK_RESOURCE_INDEX, cmd->u.fl_queue_params.index);
473 	}
474 
475 	hal_srng_access_end(soc, reo_ring);
476 	val = reo_desc[CMD_HEADER_DW_OFFSET];
477 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
478 				     val);
479 }
480 qdf_export_symbol(hal_reo_cmd_flush_queue);
481 
482 inline int hal_reo_cmd_flush_cache(void *reo_ring, struct hal_soc *soc,
483 				    struct hal_reo_cmd_params *cmd)
484 {
485 	uint32_t *reo_desc, val;
486 	struct hal_reo_cmd_flush_cache_params *cp;
487 	uint8_t index = 0;
488 
489 	cp = &cmd->u.fl_cache_params;
490 
491 	hal_srng_access_start(soc, reo_ring);
492 
493 	/* We need a cache block resource for this operation, and REO HW has
494 	 * only 4 such blocking resources. These resources are managed using
495 	 * reo_res_bitmap, and we return failure if none is available.
496 	 */
497 	if (cp->block_use_after_flush) {
498 		index = hal_find_zero_bit(soc->reo_res_bitmap);
499 		if (index > 3) {
500 			qdf_print("%s, No blocking resource available!",
501 				  __func__);
502 			hal_srng_access_end(soc, reo_ring);
503 			return -EBUSY;
504 		}
505 		soc->index = index;
506 	}
507 
508 	reo_desc = hal_srng_src_get_next(soc, reo_ring);
509 	if (!reo_desc) {
510 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
511 			"%s: Out of cmd ring entries", __func__);
512 		hal_srng_access_end(soc, reo_ring);
513 		hal_srng_dump(reo_ring);
514 		return -EBUSY;
515 	}
516 
517 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
518 			     sizeof(struct reo_flush_cache));
519 
520 	/* Offsets of descriptor fields defined in HW headers start from
521 	 * the field after TLV header */
522 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
523 	qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
524 		     sizeof(struct reo_flush_cache) -
525 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
526 
527 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
528 		REO_STATUS_REQUIRED, cmd->std.need_status);
529 
530 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_CACHE, cmd->std.addr_lo,
531 				   cmd->std.addr_hi);
532 
533 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
534 		FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
535 
536 	/* set it to 0 for now */
537 	cp->rel_block_index = 0;
538 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
539 		RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
540 
541 	if (cp->block_use_after_flush) {
542 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
543 			CACHE_BLOCK_RESOURCE_INDEX, index);
544 	}
545 
546 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
547 		FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
548 
549 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
550 		BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush);
551 
552 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
553 		cp->flush_all);
554 
555 	hal_srng_access_end(soc, reo_ring);
556 	val = reo_desc[CMD_HEADER_DW_OFFSET];
557 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
558 				     val);
559 }
560 qdf_export_symbol(hal_reo_cmd_flush_cache);
561 
562 inline int hal_reo_cmd_unblock_cache(void *reo_ring, struct hal_soc *soc,
563 				      struct hal_reo_cmd_params *cmd)
564 
565 {
566 	uint32_t *reo_desc, val;
567 	uint8_t index = 0;
568 
569 	hal_srng_access_start(soc, reo_ring);
570 
571 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
572 		index = hal_find_one_bit(soc->reo_res_bitmap);
573 		if (index > 3) {
574 			hal_srng_access_end(soc, reo_ring);
575 			qdf_print("%s: No blocking resource to unblock!",
576 				  __func__);
577 			return -EBUSY;
578 		}
579 	}
580 
581 	reo_desc = hal_srng_src_get_next(soc, reo_ring);
582 	if (!reo_desc) {
583 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
584 			"%s: Out of cmd ring entries", __func__);
585 		hal_srng_access_end(soc, reo_ring);
586 		return -EBUSY;
587 	}
588 
589 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
590 			     sizeof(struct reo_unblock_cache));
591 
592 	/* Offsets of descriptor fields defined in HW headers start from
593 	 * the field after TLV header */
594 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
595 	qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
596 		     sizeof(struct reo_unblock_cache) -
597 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
598 
599 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
600 		REO_STATUS_REQUIRED, cmd->std.need_status);
601 
602 	HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
603 		UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
604 
605 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
606 		HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
607 			CACHE_BLOCK_RESOURCE_INDEX,
608 			cmd->u.unblk_cache_params.index);
609 	}
610 
611 	hal_srng_access_end(soc, reo_ring);
612 	val = reo_desc[CMD_HEADER_DW_OFFSET];
613 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
614 				     val);
615 }
616 qdf_export_symbol(hal_reo_cmd_unblock_cache);
617 
618 inline int hal_reo_cmd_flush_timeout_list(void *reo_ring, struct hal_soc *soc,
619 					   struct hal_reo_cmd_params *cmd)
620 {
621 	uint32_t *reo_desc, val;
622 
623 	hal_srng_access_start(soc, reo_ring);
624 	reo_desc = hal_srng_src_get_next(soc, reo_ring);
625 	if (!reo_desc) {
626 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
627 			"%s: Out of cmd ring entries", __func__);
628 		hal_srng_access_end(soc, reo_ring);
629 		return -EBUSY;
630 	}
631 
632 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
633 			     sizeof(struct reo_flush_timeout_list));
634 
635 	/* Offsets of descriptor fields defined in HW headers start from
636 	 * the field after TLV header */
637 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
638 	qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
639 		     sizeof(struct reo_flush_timeout_list) -
640 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
641 
642 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
643 		REO_STATUS_REQUIRED, cmd->std.need_status);
644 
645 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
646 		cmd->u.fl_tim_list_params.ac_list);
647 
648 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
649 		MINIMUM_RELEASE_DESC_COUNT,
650 		cmd->u.fl_tim_list_params.min_rel_desc);
651 
652 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
653 		MINIMUM_FORWARD_BUF_COUNT,
654 		cmd->u.fl_tim_list_params.min_fwd_buf);
655 
656 	hal_srng_access_end(soc, reo_ring);
657 	val = reo_desc[CMD_HEADER_DW_OFFSET];
658 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
659 				     val);
660 }
661 qdf_export_symbol(hal_reo_cmd_flush_timeout_list);
662 
663 inline int hal_reo_cmd_update_rx_queue(void *reo_ring, struct hal_soc *soc,
664 					struct hal_reo_cmd_params *cmd)
665 {
666 	uint32_t *reo_desc, val;
667 	struct hal_reo_cmd_update_queue_params *p;
668 
669 	p = &cmd->u.upd_queue_params;
670 
671 	hal_srng_access_start(soc, reo_ring);
672 	reo_desc = hal_srng_src_get_next(soc, reo_ring);
673 	if (!reo_desc) {
674 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
675 			"%s: Out of cmd ring entries", __func__);
676 		hal_srng_access_end(soc, reo_ring);
677 		return -EBUSY;
678 	}
679 
680 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
681 			     sizeof(struct reo_update_rx_reo_queue));
682 
683 	/* Offsets of descriptor fields defined in HW headers start from
684 	 * the field after TLV header */
685 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
686 	qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
687 		     sizeof(struct reo_update_rx_reo_queue) -
688 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
689 
690 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
691 		REO_STATUS_REQUIRED, cmd->std.need_status);
692 
693 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
694 		cmd->std.addr_lo, cmd->std.addr_hi);
695 
696 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
697 		UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
698 
699 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
700 			      p->update_vld);
701 
702 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
703 		UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
704 		p->update_assoc_link_desc);
705 
706 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
707 		UPDATE_DISABLE_DUPLICATE_DETECTION,
708 		p->update_disable_dup_detect);
709 
710 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
711 		UPDATE_DISABLE_DUPLICATE_DETECTION,
712 		p->update_disable_dup_detect);
713 
714 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
715 		UPDATE_SOFT_REORDER_ENABLE,
716 		p->update_soft_reorder_enab);
717 
718 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
719 		UPDATE_AC, p->update_ac);
720 
721 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
722 		UPDATE_BAR, p->update_bar);
723 
724 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
725 		UPDATE_BAR, p->update_bar);
726 
727 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
728 		UPDATE_RTY, p->update_rty);
729 
730 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
731 		UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
732 
733 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
734 		UPDATE_OOR_MODE, p->update_oor_mode);
735 
736 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
737 		UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
738 
739 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
740 		UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
741 
742 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
743 		UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
744 
745 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
746 		UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
747 
748 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
749 		UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
750 
751 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
752 		UPDATE_PN_SIZE, p->update_pn_size);
753 
754 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
755 		UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
756 
757 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
758 		UPDATE_SVLD, p->update_svld);
759 
760 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
761 		UPDATE_SSN, p->update_ssn);
762 
763 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
764 		UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
765 		p->update_seq_2k_err_detect);
766 
767 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
768 		UPDATE_PN_VALID, p->update_pn_valid);
769 
770 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
771 		UPDATE_PN, p->update_pn);
772 
773 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
774 		RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
775 
776 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
777 		VLD, p->vld);
778 
779 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
780 		ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
781 		p->assoc_link_desc);
782 
783 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
784 		DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
785 
786 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
787 		SOFT_REORDER_ENABLE, p->soft_reorder_enab);
788 
789 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
790 
791 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
792 		BAR, p->bar);
793 
794 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
795 		CHK_2K_MODE, p->chk_2k_mode);
796 
797 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
798 		RTY, p->rty);
799 
800 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
801 		OOR_MODE, p->oor_mode);
802 
803 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
804 		PN_CHECK_NEEDED, p->pn_check_needed);
805 
806 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
807 		PN_SHALL_BE_EVEN, p->pn_even);
808 
809 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
810 		PN_SHALL_BE_UNEVEN, p->pn_uneven);
811 
812 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
813 		PN_HANDLING_ENABLE, p->pn_hand_enab);
814 
815 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
816 		IGNORE_AMPDU_FLAG, p->ignore_ampdu);
817 
818 	if (p->ba_window_size < 1)
819 		p->ba_window_size = 1;
820 	/*
821 	 * WAR to get 2k exception in Non BA case.
822 	 * Setting window size to 2 to get 2k jump exception
823 	 * when we receive aggregates in Non BA case
824 	 */
825 	if (p->ba_window_size == 1)
826 		p->ba_window_size++;
827 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
828 		BA_WINDOW_SIZE, p->ba_window_size - 1);
829 
830 	if (p->pn_size == 24)
831 		p->pn_size = PN_SIZE_24;
832 	else if (p->pn_size == 48)
833 		p->pn_size = PN_SIZE_48;
834 	else if (p->pn_size == 128)
835 		p->pn_size = PN_SIZE_128;
836 
837 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
838 		PN_SIZE, p->pn_size);
839 
840 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
841 		SVLD, p->svld);
842 
843 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
844 		SSN, p->ssn);
845 
846 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
847 		SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
848 
849 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
850 		PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
851 
852 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
853 		PN_31_0, p->pn_31_0);
854 
855 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
856 		PN_63_32, p->pn_63_32);
857 
858 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
859 		PN_95_64, p->pn_95_64);
860 
861 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
862 		PN_127_96, p->pn_127_96);
863 
864 	hal_srng_access_end(soc, reo_ring);
865 	val = reo_desc[CMD_HEADER_DW_OFFSET];
866 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
867 				     val);
868 }
869 qdf_export_symbol(hal_reo_cmd_update_rx_queue);
870 
871 inline void hal_reo_queue_stats_status(uint32_t *reo_desc,
872 			     struct hal_reo_queue_status *st,
873 			     struct hal_soc *hal_soc)
874 {
875 	uint32_t val;
876 
877 	/* Offsets of descriptor fields defined in HW headers start
878 	 * from the field after TLV header */
879 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
880 
881 	/* header */
882 	hal_reo_status_get_header(reo_desc, HAL_REO_QUEUE_STATS_STATUS_TLV,
883 					&(st->header), hal_soc);
884 
885 	/* SSN */
886 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
887 	st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
888 
889 	/* current index */
890 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
891 					 CURRENT_INDEX)];
892 	st->curr_idx =
893 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
894 			      CURRENT_INDEX, val);
895 
896 	/* PN bits */
897 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
898 					 PN_31_0)];
899 	st->pn_31_0 =
900 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
901 			      PN_31_0, val);
902 
903 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
904 					 PN_63_32)];
905 	st->pn_63_32 =
906 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
907 			      PN_63_32, val);
908 
909 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
910 					 PN_95_64)];
911 	st->pn_95_64 =
912 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
913 			      PN_95_64, val);
914 
915 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
916 					 PN_127_96)];
917 	st->pn_127_96 =
918 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
919 			      PN_127_96, val);
920 
921 	/* timestamps */
922 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
923 					 LAST_RX_ENQUEUE_TIMESTAMP)];
924 	st->last_rx_enq_tstamp =
925 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
926 			      LAST_RX_ENQUEUE_TIMESTAMP, val);
927 
928 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
929 					 LAST_RX_DEQUEUE_TIMESTAMP)];
930 	st->last_rx_deq_tstamp =
931 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
932 			      LAST_RX_DEQUEUE_TIMESTAMP, val);
933 
934 	/* rx bitmap */
935 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
936 					 RX_BITMAP_31_0)];
937 	st->rx_bitmap_31_0 =
938 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
939 			      RX_BITMAP_31_0, val);
940 
941 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
942 					 RX_BITMAP_63_32)];
943 	st->rx_bitmap_63_32 =
944 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
945 			      RX_BITMAP_63_32, val);
946 
947 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
948 					 RX_BITMAP_95_64)];
949 	st->rx_bitmap_95_64 =
950 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
951 			      RX_BITMAP_95_64, val);
952 
953 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
954 					 RX_BITMAP_127_96)];
955 	st->rx_bitmap_127_96 =
956 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
957 			      RX_BITMAP_127_96, val);
958 
959 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
960 					 RX_BITMAP_159_128)];
961 	st->rx_bitmap_159_128 =
962 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
963 			      RX_BITMAP_159_128, val);
964 
965 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
966 					 RX_BITMAP_191_160)];
967 	st->rx_bitmap_191_160 =
968 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
969 			      RX_BITMAP_191_160, val);
970 
971 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
972 					 RX_BITMAP_223_192)];
973 	st->rx_bitmap_223_192 =
974 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
975 			      RX_BITMAP_223_192, val);
976 
977 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
978 					 RX_BITMAP_255_224)];
979 	st->rx_bitmap_255_224 =
980 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
981 			      RX_BITMAP_255_224, val);
982 
983 	/* various counts */
984 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
985 					 CURRENT_MPDU_COUNT)];
986 	st->curr_mpdu_cnt =
987 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
988 			      CURRENT_MPDU_COUNT, val);
989 
990 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
991 					 CURRENT_MSDU_COUNT)];
992 	st->curr_msdu_cnt =
993 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
994 			      CURRENT_MSDU_COUNT, val);
995 
996 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
997 					 TIMEOUT_COUNT)];
998 	st->fwd_timeout_cnt =
999 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1000 			      TIMEOUT_COUNT, val);
1001 
1002 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1003 					 FORWARD_DUE_TO_BAR_COUNT)];
1004 	st->fwd_bar_cnt =
1005 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1006 			      FORWARD_DUE_TO_BAR_COUNT, val);
1007 
1008 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
1009 					 DUPLICATE_COUNT)];
1010 	st->dup_cnt =
1011 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
1012 			      DUPLICATE_COUNT, val);
1013 
1014 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1015 					 FRAMES_IN_ORDER_COUNT)];
1016 	st->frms_in_order_cnt =
1017 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1018 			      FRAMES_IN_ORDER_COUNT, val);
1019 
1020 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
1021 					 BAR_RECEIVED_COUNT)];
1022 	st->bar_rcvd_cnt =
1023 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
1024 			      BAR_RECEIVED_COUNT, val);
1025 
1026 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
1027 					 MPDU_FRAMES_PROCESSED_COUNT)];
1028 	st->mpdu_frms_cnt =
1029 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
1030 			      MPDU_FRAMES_PROCESSED_COUNT, val);
1031 
1032 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
1033 					 MSDU_FRAMES_PROCESSED_COUNT)];
1034 	st->msdu_frms_cnt =
1035 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
1036 			      MSDU_FRAMES_PROCESSED_COUNT, val);
1037 
1038 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
1039 					 TOTAL_PROCESSED_BYTE_COUNT)];
1040 	st->total_cnt =
1041 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
1042 			      TOTAL_PROCESSED_BYTE_COUNT, val);
1043 
1044 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1045 					 LATE_RECEIVE_MPDU_COUNT)];
1046 	st->late_recv_mpdu_cnt =
1047 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1048 			      LATE_RECEIVE_MPDU_COUNT, val);
1049 
1050 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1051 					 WINDOW_JUMP_2K)];
1052 	st->win_jump_2k =
1053 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1054 			      WINDOW_JUMP_2K, val);
1055 
1056 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
1057 					 HOLE_COUNT)];
1058 	st->hole_cnt =
1059 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
1060 			      HOLE_COUNT, val);
1061 }
1062 qdf_export_symbol(hal_reo_queue_stats_status);
1063 
1064 inline void hal_reo_flush_queue_status(uint32_t *reo_desc,
1065 				    struct hal_reo_flush_queue_status *st,
1066 				    struct hal_soc *hal_soc)
1067 {
1068 	uint32_t val;
1069 
1070 	/* Offsets of descriptor fields defined in HW headers start
1071 	 * from the field after TLV header */
1072 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1073 
1074 	/* header */
1075 	hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV,
1076 					&(st->header), hal_soc);
1077 
1078 	/* error bit */
1079 	val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
1080 					 ERROR_DETECTED)];
1081 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1082 				  val);
1083 }
1084 qdf_export_symbol(hal_reo_flush_queue_status);
1085 
1086 inline void hal_reo_flush_cache_status(uint32_t *reo_desc, struct hal_soc *soc,
1087 				    struct hal_reo_flush_cache_status *st,
1088 				    struct hal_soc *hal_soc)
1089 {
1090 	uint32_t val;
1091 
1092 	/* Offsets of descriptor fields defined in HW headers start
1093 	 * from the field after TLV header */
1094 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1095 
1096 	/* header */
1097 	hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV,
1098 					&(st->header), hal_soc);
1099 
1100 	/* error bit */
1101 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1102 					 ERROR_DETECTED)];
1103 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
1104 				  val);
1105 
1106 	/* block error */
1107 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1108 					 BLOCK_ERROR_DETAILS)];
1109 	st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1110 					BLOCK_ERROR_DETAILS,
1111 					val);
1112 	if (!st->block_error)
1113 		qdf_set_bit(soc->index, (unsigned long *)&soc->reo_res_bitmap);
1114 
1115 	/* cache flush status */
1116 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1117 					 CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
1118 	st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1119 					CACHE_CONTROLLER_FLUSH_STATUS_HIT,
1120 					val);
1121 
1122 	/* cache flush descriptor type */
1123 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1124 				  CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
1125 	st->cache_flush_status_desc_type =
1126 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1127 			      CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
1128 			      val);
1129 
1130 	/* cache flush count */
1131 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
1132 				  CACHE_CONTROLLER_FLUSH_COUNT)];
1133 	st->cache_flush_cnt =
1134 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
1135 			      CACHE_CONTROLLER_FLUSH_COUNT,
1136 			      val);
1137 
1138 }
1139 qdf_export_symbol(hal_reo_flush_cache_status);
1140 
1141 inline void hal_reo_unblock_cache_status(uint32_t *reo_desc,
1142 					 struct hal_soc *soc,
1143 					 struct hal_reo_unblk_cache_status *st)
1144 {
1145 	uint32_t val;
1146 
1147 	/* Offsets of descriptor fields defined in HW headers start
1148 	 * from the field after TLV header */
1149 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1150 
1151 	/* header */
1152 	hal_reo_status_get_header(reo_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV,
1153 					&(st->header), soc);
1154 
1155 	/* error bit */
1156 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1157 				  ERROR_DETECTED)];
1158 	st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1159 				  ERROR_DETECTED,
1160 				  val);
1161 
1162 	/* unblock type */
1163 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
1164 				  UNBLOCK_TYPE)];
1165 	st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
1166 					 UNBLOCK_TYPE,
1167 					 val);
1168 
1169 	if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
1170 		qdf_clear_bit(soc->index,
1171 			     (unsigned long *)&soc->reo_res_bitmap);
1172 }
1173 qdf_export_symbol(hal_reo_unblock_cache_status);
1174 
1175 inline void hal_reo_flush_timeout_list_status(
1176 			 uint32_t *reo_desc,
1177 			 struct hal_reo_flush_timeout_list_status *st,
1178 			 struct hal_soc *hal_soc)
1179 
1180 {
1181 	uint32_t val;
1182 
1183 	/* Offsets of descriptor fields defined in HW headers start
1184 	 * from the field after TLV header */
1185 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1186 
1187 	/* header */
1188 	hal_reo_status_get_header(reo_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV,
1189 					&(st->header), hal_soc);
1190 
1191 	/* error bit */
1192 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1193 					 ERROR_DETECTED)];
1194 	st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1195 				  ERROR_DETECTED,
1196 				  val);
1197 
1198 	/* list empty */
1199 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1200 					 TIMOUT_LIST_EMPTY)];
1201 	st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
1202 					TIMOUT_LIST_EMPTY,
1203 					val);
1204 
1205 	/* release descriptor count */
1206 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1207 					 RELEASE_DESC_COUNT)];
1208 	st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1209 				       RELEASE_DESC_COUNT,
1210 				       val);
1211 
1212 	/* forward buf count */
1213 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1214 					 FORWARD_BUF_COUNT)];
1215 	st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
1216 				       FORWARD_BUF_COUNT,
1217 				       val);
1218 }
1219 qdf_export_symbol(hal_reo_flush_timeout_list_status);
1220 
1221 inline void hal_reo_desc_thres_reached_status(
1222 			 uint32_t *reo_desc,
1223 			 struct hal_reo_desc_thres_reached_status *st,
1224 			 struct hal_soc *hal_soc)
1225 {
1226 	uint32_t val;
1227 
1228 	/* Offsets of descriptor fields defined in HW headers start
1229 	 * from the field after TLV header */
1230 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1231 
1232 	/* header */
1233 	hal_reo_status_get_header(reo_desc,
1234 			      HAL_REO_DESC_THRES_STATUS_TLV,
1235 			      &(st->header), hal_soc);
1236 
1237 	/* threshold index */
1238 	val = reo_desc[HAL_OFFSET_DW(
1239 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1240 				 THRESHOLD_INDEX)];
1241 	st->thres_index = HAL_GET_FIELD(
1242 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
1243 				THRESHOLD_INDEX,
1244 				val);
1245 
1246 	/* link desc counters */
1247 	val = reo_desc[HAL_OFFSET_DW(
1248 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1249 				 LINK_DESCRIPTOR_COUNTER0)];
1250 	st->link_desc_counter0 = HAL_GET_FIELD(
1251 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
1252 				LINK_DESCRIPTOR_COUNTER0,
1253 				val);
1254 
1255 	val = reo_desc[HAL_OFFSET_DW(
1256 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1257 				 LINK_DESCRIPTOR_COUNTER1)];
1258 	st->link_desc_counter1 = HAL_GET_FIELD(
1259 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
1260 				LINK_DESCRIPTOR_COUNTER1,
1261 				val);
1262 
1263 	val = reo_desc[HAL_OFFSET_DW(
1264 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1265 				 LINK_DESCRIPTOR_COUNTER2)];
1266 	st->link_desc_counter2 = HAL_GET_FIELD(
1267 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
1268 				LINK_DESCRIPTOR_COUNTER2,
1269 				val);
1270 
1271 	val = reo_desc[HAL_OFFSET_DW(
1272 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1273 				 LINK_DESCRIPTOR_COUNTER_SUM)];
1274 	st->link_desc_counter_sum = HAL_GET_FIELD(
1275 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
1276 				LINK_DESCRIPTOR_COUNTER_SUM,
1277 				val);
1278 }
1279 qdf_export_symbol(hal_reo_desc_thres_reached_status);
1280 
1281 inline void hal_reo_rx_update_queue_status(uint32_t *reo_desc,
1282 				      struct hal_reo_update_rx_queue_status *st,
1283 				      struct hal_soc *hal_soc)
1284 {
1285 	/* Offsets of descriptor fields defined in HW headers start
1286 	 * from the field after TLV header */
1287 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
1288 
1289 	/* header */
1290 	hal_reo_status_get_header(reo_desc,
1291 			      HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV,
1292 			      &(st->header), hal_soc);
1293 }
1294 qdf_export_symbol(hal_reo_rx_update_queue_status);
1295 
1296 /**
1297  * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG
1298  * with command number
1299  * @hal_soc: Handle to HAL SoC structure
1300  * @hal_ring: Handle to HAL SRNG structure
1301  *
1302  * Return: none
1303  */
1304 inline void hal_reo_init_cmd_ring(struct hal_soc *soc, void *hal_srng)
1305 {
1306 	int cmd_num;
1307 	uint32_t *desc_addr;
1308 	struct hal_srng_params srng_params;
1309 	uint32_t desc_size;
1310 	uint32_t num_desc;
1311 
1312 	hal_get_srng_params(soc, hal_srng, &srng_params);
1313 
1314 	desc_addr = (uint32_t *)(srng_params.ring_base_vaddr);
1315 	desc_addr += (sizeof(struct tlv_32_hdr) >> 2);
1316 	desc_size = hal_srng_get_entrysize(soc, REO_CMD) >> 2;
1317 	num_desc = srng_params.num_entries;
1318 	cmd_num = 1;
1319 	while (num_desc) {
1320 		/* Offsets of descriptor fields defined in HW headers start
1321 		 * from the field after TLV header */
1322 		HAL_DESC_SET_FIELD(desc_addr, UNIFORM_REO_CMD_HEADER_0,
1323 			REO_CMD_NUMBER, cmd_num);
1324 		desc_addr += desc_size;
1325 		num_desc--; cmd_num++;
1326 	}
1327 
1328 	soc->reo_res_bitmap = 0;
1329 }
1330 qdf_export_symbol(hal_reo_init_cmd_ring);
1331