xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_reo.c (revision 87a8e4458319c60b618522e263ed900e36aab528)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "hal_reo.h"
21 #include "hal_tx.h"
22 #include "qdf_module.h"
23 
24 #define BLOCK_RES_MASK		0xF
25 static inline uint8_t hal_find_one_bit(uint8_t x)
26 {
27 	uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK;
28 	uint8_t pos;
29 
30 	for (pos = 0; y; y >>= 1)
31 		pos++;
32 
33 	return pos-1;
34 }
35 
36 static inline uint8_t hal_find_zero_bit(uint8_t x)
37 {
38 	uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK;
39 	uint8_t pos;
40 
41 	for (pos = 0; y; y >>= 1)
42 		pos++;
43 
44 	return pos-1;
45 }
46 
47 inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc,
48 				       enum hal_reo_cmd_type type,
49 				       uint32_t paddr_lo,
50 				       uint8_t paddr_hi)
51 {
52 	switch (type) {
53 	case CMD_GET_QUEUE_STATS:
54 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
55 			RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
56 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
57 				    RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
58 		break;
59 	case CMD_FLUSH_QUEUE:
60 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
61 					FLUSH_DESC_ADDR_31_0, paddr_lo);
62 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
63 					FLUSH_DESC_ADDR_39_32, paddr_hi);
64 		break;
65 	case CMD_FLUSH_CACHE:
66 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
67 					FLUSH_ADDR_31_0, paddr_lo);
68 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
69 					FLUSH_ADDR_39_32, paddr_hi);
70 		break;
71 	case CMD_UPDATE_RX_REO_QUEUE:
72 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
73 					RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
74 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
75 					RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
76 		break;
77 	default:
78 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
79 			"%s: Invalid REO command type", __func__);
80 		break;
81 	}
82 }
83 
84 inline int hal_reo_cmd_queue_stats(void *reo_ring, struct hal_soc *soc,
85 				    struct hal_reo_cmd_params *cmd)
86 
87 {
88 	uint32_t *reo_desc, val;
89 
90 	hal_srng_access_start(soc, reo_ring);
91 	reo_desc = hal_srng_src_get_next(soc, reo_ring);
92 	if (!reo_desc) {
93 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
94 			"%s: Out of cmd ring entries", __func__);
95 		hal_srng_access_end(soc, reo_ring);
96 		return -EBUSY;
97 	}
98 
99 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
100 			     sizeof(struct reo_get_queue_stats));
101 
102 	/* Offsets of descriptor fields defined in HW headers start from
103 	 * the field after TLV header */
104 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
105 	qdf_mem_zero((void *)reo_desc, sizeof(struct reo_get_queue_stats));
106 
107 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
108 		REO_STATUS_REQUIRED, cmd->std.need_status);
109 
110 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_GET_QUEUE_STATS,
111 				   cmd->std.addr_lo,
112 				   cmd->std.addr_hi);
113 
114 	HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
115 			      cmd->u.stats_params.clear);
116 
117 	hal_srng_access_end(soc, reo_ring);
118 
119 	val = reo_desc[CMD_HEADER_DW_OFFSET];
120 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
121 				     val);
122 }
123 qdf_export_symbol(hal_reo_cmd_queue_stats);
124 
125 inline int hal_reo_cmd_flush_queue(void *reo_ring, struct hal_soc *soc,
126 				    struct hal_reo_cmd_params *cmd)
127 {
128 	uint32_t *reo_desc, val;
129 
130 	hal_srng_access_start(soc, reo_ring);
131 	reo_desc = hal_srng_src_get_next(soc, reo_ring);
132 	if (!reo_desc) {
133 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
134 			"%s: Out of cmd ring entries", __func__);
135 		hal_srng_access_end(soc, reo_ring);
136 		return -EBUSY;
137 	}
138 
139 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
140 			     sizeof(struct reo_flush_queue));
141 
142 	/* Offsets of descriptor fields defined in HW headers start from
143 	 * the field after TLV header */
144 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
145 	qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_queue));
146 
147 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
148 		REO_STATUS_REQUIRED, cmd->std.need_status);
149 
150 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_QUEUE, cmd->std.addr_lo,
151 		cmd->std.addr_hi);
152 
153 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
154 		BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
155 		cmd->u.fl_queue_params.block_use_after_flush);
156 
157 	if (cmd->u.fl_queue_params.block_use_after_flush) {
158 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
159 			BLOCK_RESOURCE_INDEX, cmd->u.fl_queue_params.index);
160 	}
161 
162 	hal_srng_access_end(soc, reo_ring);
163 	val = reo_desc[CMD_HEADER_DW_OFFSET];
164 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
165 				     val);
166 }
167 qdf_export_symbol(hal_reo_cmd_flush_queue);
168 
169 inline int hal_reo_cmd_flush_cache(void *reo_ring, struct hal_soc *soc,
170 				    struct hal_reo_cmd_params *cmd)
171 {
172 	uint32_t *reo_desc, val;
173 	struct hal_reo_cmd_flush_cache_params *cp;
174 	uint8_t index = 0;
175 
176 	cp = &cmd->u.fl_cache_params;
177 
178 	hal_srng_access_start(soc, reo_ring);
179 
180 	/* We need a cache block resource for this operation, and REO HW has
181 	 * only 4 such blocking resources. These resources are managed using
182 	 * reo_res_bitmap, and we return failure if none is available.
183 	 */
184 	if (cp->block_use_after_flush) {
185 		index = hal_find_zero_bit(soc->reo_res_bitmap);
186 		if (index > 3) {
187 			qdf_print("%s, No blocking resource available!",
188 				  __func__);
189 			hal_srng_access_end(soc, reo_ring);
190 			return -EBUSY;
191 		}
192 		soc->index = index;
193 	}
194 
195 	reo_desc = hal_srng_src_get_next(soc, reo_ring);
196 	if (!reo_desc) {
197 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
198 			"%s: Out of cmd ring entries", __func__);
199 		hal_srng_access_end(soc, reo_ring);
200 		hal_srng_dump(reo_ring);
201 		return -EBUSY;
202 	}
203 
204 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
205 			     sizeof(struct reo_flush_cache));
206 
207 	/* Offsets of descriptor fields defined in HW headers start from
208 	 * the field after TLV header */
209 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
210 	qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_cache));
211 
212 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
213 		REO_STATUS_REQUIRED, cmd->std.need_status);
214 
215 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_CACHE, cmd->std.addr_lo,
216 				   cmd->std.addr_hi);
217 
218 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
219 		FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
220 
221 	/* set it to 0 for now */
222 	cp->rel_block_index = 0;
223 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
224 		RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
225 
226 	if (cp->block_use_after_flush) {
227 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
228 			CACHE_BLOCK_RESOURCE_INDEX, index);
229 	}
230 
231 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
232 		FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
233 
234 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
235 		BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush);
236 
237 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
238 		cp->flush_all);
239 
240 	hal_srng_access_end(soc, reo_ring);
241 	val = reo_desc[CMD_HEADER_DW_OFFSET];
242 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
243 				     val);
244 }
245 qdf_export_symbol(hal_reo_cmd_flush_cache);
246 
247 inline int hal_reo_cmd_unblock_cache(void *reo_ring, struct hal_soc *soc,
248 				      struct hal_reo_cmd_params *cmd)
249 
250 {
251 	uint32_t *reo_desc, val;
252 	uint8_t index = 0;
253 
254 	hal_srng_access_start(soc, reo_ring);
255 
256 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
257 		index = hal_find_one_bit(soc->reo_res_bitmap);
258 		if (index > 3) {
259 			hal_srng_access_end(soc, reo_ring);
260 			qdf_print("%s: No blocking resource to unblock!",
261 				  __func__);
262 			return -EBUSY;
263 		}
264 	}
265 
266 	reo_desc = hal_srng_src_get_next(soc, reo_ring);
267 	if (!reo_desc) {
268 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
269 			"%s: Out of cmd ring entries", __func__);
270 		hal_srng_access_end(soc, reo_ring);
271 		return -EBUSY;
272 	}
273 
274 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
275 			     sizeof(struct reo_unblock_cache));
276 
277 	/* Offsets of descriptor fields defined in HW headers start from
278 	 * the field after TLV header */
279 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
280 	qdf_mem_zero((void *)reo_desc, sizeof(struct reo_unblock_cache));
281 
282 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
283 		REO_STATUS_REQUIRED, cmd->std.need_status);
284 
285 	HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
286 		UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
287 
288 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
289 		HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
290 			CACHE_BLOCK_RESOURCE_INDEX,
291 			cmd->u.unblk_cache_params.index);
292 	}
293 
294 	hal_srng_access_end(soc, reo_ring);
295 	val = reo_desc[CMD_HEADER_DW_OFFSET];
296 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
297 				     val);
298 }
299 qdf_export_symbol(hal_reo_cmd_unblock_cache);
300 
301 inline int hal_reo_cmd_flush_timeout_list(void *reo_ring, struct hal_soc *soc,
302 					   struct hal_reo_cmd_params *cmd)
303 {
304 	uint32_t *reo_desc, val;
305 
306 	hal_srng_access_start(soc, reo_ring);
307 	reo_desc = hal_srng_src_get_next(soc, reo_ring);
308 	if (!reo_desc) {
309 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
310 			"%s: Out of cmd ring entries", __func__);
311 		hal_srng_access_end(soc, reo_ring);
312 		return -EBUSY;
313 	}
314 
315 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
316 			     sizeof(struct reo_flush_timeout_list));
317 
318 	/* Offsets of descriptor fields defined in HW headers start from
319 	 * the field after TLV header */
320 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
321 	qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_timeout_list));
322 
323 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
324 		REO_STATUS_REQUIRED, cmd->std.need_status);
325 
326 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
327 		cmd->u.fl_tim_list_params.ac_list);
328 
329 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
330 		MINIMUM_RELEASE_DESC_COUNT,
331 		cmd->u.fl_tim_list_params.min_rel_desc);
332 
333 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
334 		MINIMUM_FORWARD_BUF_COUNT,
335 		cmd->u.fl_tim_list_params.min_fwd_buf);
336 
337 	hal_srng_access_end(soc, reo_ring);
338 	val = reo_desc[CMD_HEADER_DW_OFFSET];
339 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
340 				     val);
341 }
342 qdf_export_symbol(hal_reo_cmd_flush_timeout_list);
343 
344 inline int hal_reo_cmd_update_rx_queue(void *reo_ring, struct hal_soc *soc,
345 					struct hal_reo_cmd_params *cmd)
346 {
347 	uint32_t *reo_desc, val;
348 	struct hal_reo_cmd_update_queue_params *p;
349 
350 	p = &cmd->u.upd_queue_params;
351 
352 	hal_srng_access_start(soc, reo_ring);
353 	reo_desc = hal_srng_src_get_next(soc, reo_ring);
354 	if (!reo_desc) {
355 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
356 			"%s: Out of cmd ring entries", __func__);
357 		hal_srng_access_end(soc, reo_ring);
358 		return -EBUSY;
359 	}
360 
361 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
362 			     sizeof(struct reo_update_rx_reo_queue));
363 
364 	/* Offsets of descriptor fields defined in HW headers start from
365 	 * the field after TLV header */
366 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
367 	qdf_mem_zero((void *)reo_desc, sizeof(struct reo_update_rx_reo_queue));
368 
369 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
370 		REO_STATUS_REQUIRED, cmd->std.need_status);
371 
372 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
373 		cmd->std.addr_lo, cmd->std.addr_hi);
374 
375 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
376 		UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
377 
378 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
379 			      p->update_vld);
380 
381 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
382 		UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
383 		p->update_assoc_link_desc);
384 
385 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
386 		UPDATE_DISABLE_DUPLICATE_DETECTION,
387 		p->update_disable_dup_detect);
388 
389 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
390 		UPDATE_DISABLE_DUPLICATE_DETECTION,
391 		p->update_disable_dup_detect);
392 
393 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
394 		UPDATE_SOFT_REORDER_ENABLE,
395 		p->update_soft_reorder_enab);
396 
397 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
398 		UPDATE_AC, p->update_ac);
399 
400 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
401 		UPDATE_BAR, p->update_bar);
402 
403 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
404 		UPDATE_BAR, p->update_bar);
405 
406 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
407 		UPDATE_RTY, p->update_rty);
408 
409 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
410 		UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
411 
412 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
413 		UPDATE_OOR_MODE, p->update_oor_mode);
414 
415 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
416 		UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
417 
418 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
419 		UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
420 
421 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
422 		UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
423 
424 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
425 		UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
426 
427 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
428 		UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
429 
430 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
431 		UPDATE_PN_SIZE, p->update_pn_size);
432 
433 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
434 		UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
435 
436 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
437 		UPDATE_SVLD, p->update_svld);
438 
439 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
440 		UPDATE_SSN, p->update_ssn);
441 
442 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
443 		UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
444 		p->update_seq_2k_err_detect);
445 
446 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
447 		UPDATE_PN_VALID, p->update_pn_valid);
448 
449 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
450 		UPDATE_PN, p->update_pn);
451 
452 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
453 		RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
454 
455 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
456 		VLD, p->vld);
457 
458 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
459 		ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
460 		p->assoc_link_desc);
461 
462 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
463 		DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
464 
465 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
466 		SOFT_REORDER_ENABLE, p->soft_reorder_enab);
467 
468 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
469 
470 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
471 		BAR, p->bar);
472 
473 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
474 		CHK_2K_MODE, p->chk_2k_mode);
475 
476 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
477 		RTY, p->rty);
478 
479 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
480 		OOR_MODE, p->oor_mode);
481 
482 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
483 		PN_CHECK_NEEDED, p->pn_check_needed);
484 
485 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
486 		PN_SHALL_BE_EVEN, p->pn_even);
487 
488 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
489 		PN_SHALL_BE_UNEVEN, p->pn_uneven);
490 
491 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
492 		PN_HANDLING_ENABLE, p->pn_hand_enab);
493 
494 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
495 		IGNORE_AMPDU_FLAG, p->ignore_ampdu);
496 
497 	if (p->ba_window_size < 1)
498 		p->ba_window_size = 1;
499 	/*
500 	 * WAR to get 2k exception in Non BA case.
501 	 * Setting window size to 2 to get 2k jump exception
502 	 * when we receive aggregates in Non BA case
503 	 */
504 	if (p->ba_window_size == 1)
505 		p->ba_window_size++;
506 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
507 		BA_WINDOW_SIZE, p->ba_window_size - 1);
508 
509 	if (p->pn_size == 24)
510 		p->pn_size = PN_SIZE_24;
511 	else if (p->pn_size == 48)
512 		p->pn_size = PN_SIZE_48;
513 	else if (p->pn_size == 128)
514 		p->pn_size = PN_SIZE_128;
515 
516 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
517 		PN_SIZE, p->pn_size);
518 
519 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
520 		SVLD, p->svld);
521 
522 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
523 		SSN, p->ssn);
524 
525 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
526 		SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
527 
528 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
529 		PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
530 
531 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
532 		PN_31_0, p->pn_31_0);
533 
534 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
535 		PN_63_32, p->pn_63_32);
536 
537 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
538 		PN_95_64, p->pn_95_64);
539 
540 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
541 		PN_127_96, p->pn_127_96);
542 
543 	hal_srng_access_end(soc, reo_ring);
544 	val = reo_desc[CMD_HEADER_DW_OFFSET];
545 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
546 				     val);
547 }
548 qdf_export_symbol(hal_reo_cmd_update_rx_queue);
549 
550 inline void hal_reo_queue_stats_status(uint32_t *reo_desc,
551 			     struct hal_reo_queue_status *st)
552 {
553 	uint32_t val;
554 
555 	/* Offsets of descriptor fields defined in HW headers start
556 	 * from the field after TLV header */
557 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
558 
559 	/* header */
560 	HAL_REO_STATUS_GET_HEADER(reo_desc, REO_GET_QUEUE_STATS, st->header);
561 
562 	/* SSN */
563 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
564 	st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
565 
566 	/* current index */
567 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
568 					 CURRENT_INDEX)];
569 	st->curr_idx =
570 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
571 			      CURRENT_INDEX, val);
572 
573 	/* PN bits */
574 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
575 					 PN_31_0)];
576 	st->pn_31_0 =
577 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
578 			      PN_31_0, val);
579 
580 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
581 					 PN_63_32)];
582 	st->pn_63_32 =
583 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
584 			      PN_63_32, val);
585 
586 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
587 					 PN_95_64)];
588 	st->pn_95_64 =
589 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
590 			      PN_95_64, val);
591 
592 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
593 					 PN_127_96)];
594 	st->pn_127_96 =
595 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
596 			      PN_127_96, val);
597 
598 	/* timestamps */
599 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
600 					 LAST_RX_ENQUEUE_TIMESTAMP)];
601 	st->last_rx_enq_tstamp =
602 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
603 			      LAST_RX_ENQUEUE_TIMESTAMP, val);
604 
605 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
606 					 LAST_RX_DEQUEUE_TIMESTAMP)];
607 	st->last_rx_deq_tstamp =
608 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
609 			      LAST_RX_DEQUEUE_TIMESTAMP, val);
610 
611 	/* rx bitmap */
612 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
613 					 RX_BITMAP_31_0)];
614 	st->rx_bitmap_31_0 =
615 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
616 			      RX_BITMAP_31_0, val);
617 
618 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
619 					 RX_BITMAP_63_32)];
620 	st->rx_bitmap_63_32 =
621 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
622 			      RX_BITMAP_63_32, val);
623 
624 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
625 					 RX_BITMAP_95_64)];
626 	st->rx_bitmap_95_64 =
627 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
628 			      RX_BITMAP_95_64, val);
629 
630 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
631 					 RX_BITMAP_127_96)];
632 	st->rx_bitmap_127_96 =
633 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
634 			      RX_BITMAP_127_96, val);
635 
636 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
637 					 RX_BITMAP_159_128)];
638 	st->rx_bitmap_159_128 =
639 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
640 			      RX_BITMAP_159_128, val);
641 
642 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
643 					 RX_BITMAP_191_160)];
644 	st->rx_bitmap_191_160 =
645 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
646 			      RX_BITMAP_191_160, val);
647 
648 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
649 					 RX_BITMAP_223_192)];
650 	st->rx_bitmap_223_192 =
651 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
652 			      RX_BITMAP_223_192, val);
653 
654 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
655 					 RX_BITMAP_255_224)];
656 	st->rx_bitmap_255_224 =
657 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
658 			      RX_BITMAP_255_224, val);
659 
660 	/* various counts */
661 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
662 					 CURRENT_MPDU_COUNT)];
663 	st->curr_mpdu_cnt =
664 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
665 			      CURRENT_MPDU_COUNT, val);
666 
667 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
668 					 CURRENT_MSDU_COUNT)];
669 	st->curr_msdu_cnt =
670 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
671 			      CURRENT_MSDU_COUNT, val);
672 
673 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
674 					 TIMEOUT_COUNT)];
675 	st->fwd_timeout_cnt =
676 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
677 			      TIMEOUT_COUNT, val);
678 
679 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
680 					 FORWARD_DUE_TO_BAR_COUNT)];
681 	st->fwd_bar_cnt =
682 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
683 			      FORWARD_DUE_TO_BAR_COUNT, val);
684 
685 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
686 					 DUPLICATE_COUNT)];
687 	st->dup_cnt =
688 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
689 			      DUPLICATE_COUNT, val);
690 
691 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
692 					 FRAMES_IN_ORDER_COUNT)];
693 	st->frms_in_order_cnt =
694 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
695 			      FRAMES_IN_ORDER_COUNT, val);
696 
697 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
698 					 BAR_RECEIVED_COUNT)];
699 	st->bar_rcvd_cnt =
700 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
701 			      BAR_RECEIVED_COUNT, val);
702 
703 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
704 					 MPDU_FRAMES_PROCESSED_COUNT)];
705 	st->mpdu_frms_cnt =
706 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
707 			      MPDU_FRAMES_PROCESSED_COUNT, val);
708 
709 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
710 					 MSDU_FRAMES_PROCESSED_COUNT)];
711 	st->msdu_frms_cnt =
712 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
713 			      MSDU_FRAMES_PROCESSED_COUNT, val);
714 
715 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
716 					 TOTAL_PROCESSED_BYTE_COUNT)];
717 	st->total_cnt =
718 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
719 			      TOTAL_PROCESSED_BYTE_COUNT, val);
720 
721 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
722 					 LATE_RECEIVE_MPDU_COUNT)];
723 	st->late_recv_mpdu_cnt =
724 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
725 			      LATE_RECEIVE_MPDU_COUNT, val);
726 
727 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
728 					 WINDOW_JUMP_2K)];
729 	st->win_jump_2k =
730 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
731 			      WINDOW_JUMP_2K, val);
732 
733 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
734 					 HOLE_COUNT)];
735 	st->hole_cnt =
736 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
737 			      HOLE_COUNT, val);
738 }
739 qdf_export_symbol(hal_reo_queue_stats_status);
740 
741 inline void hal_reo_flush_queue_status(uint32_t *reo_desc,
742 				    struct hal_reo_flush_queue_status *st)
743 {
744 	uint32_t val;
745 
746 	/* Offsets of descriptor fields defined in HW headers start
747 	 * from the field after TLV header */
748 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
749 
750 	/* header */
751 	HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_QUEUE, st->header);
752 
753 	/* error bit */
754 	val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
755 					 ERROR_DETECTED)];
756 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
757 				  val);
758 }
759 qdf_export_symbol(hal_reo_flush_queue_status);
760 
761 inline void hal_reo_flush_cache_status(uint32_t *reo_desc, struct hal_soc *soc,
762 				    struct hal_reo_flush_cache_status *st)
763 {
764 	uint32_t val;
765 
766 	/* Offsets of descriptor fields defined in HW headers start
767 	 * from the field after TLV header */
768 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
769 
770 	/* header */
771 	HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_CACHE, st->header);
772 
773 	/* error bit */
774 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
775 					 ERROR_DETECTED)];
776 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
777 				  val);
778 
779 	/* block error */
780 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
781 					 BLOCK_ERROR_DETAILS)];
782 	st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
783 					BLOCK_ERROR_DETAILS,
784 					val);
785 	if (!st->block_error)
786 		qdf_set_bit(soc->index, (unsigned long *)&soc->reo_res_bitmap);
787 
788 	/* cache flush status */
789 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
790 					 CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
791 	st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
792 					CACHE_CONTROLLER_FLUSH_STATUS_HIT,
793 					val);
794 
795 	/* cache flush descriptor type */
796 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
797 				  CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
798 	st->cache_flush_status_desc_type =
799 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
800 			      CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
801 			      val);
802 
803 	/* cache flush count */
804 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
805 				  CACHE_CONTROLLER_FLUSH_COUNT)];
806 	st->cache_flush_cnt =
807 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
808 			      CACHE_CONTROLLER_FLUSH_COUNT,
809 			      val);
810 
811 }
812 qdf_export_symbol(hal_reo_flush_cache_status);
813 
814 inline void hal_reo_unblock_cache_status(uint32_t *reo_desc,
815 					 struct hal_soc *soc,
816 					 struct hal_reo_unblk_cache_status *st)
817 {
818 	uint32_t val;
819 
820 	/* Offsets of descriptor fields defined in HW headers start
821 	 * from the field after TLV header */
822 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
823 
824 	/* header */
825 	HAL_REO_STATUS_GET_HEADER(reo_desc, REO_UNBLOCK_CACHE, st->header);
826 
827 	/* error bit */
828 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
829 				  ERROR_DETECTED)];
830 	st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
831 				  ERROR_DETECTED,
832 				  val);
833 
834 	/* unblock type */
835 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
836 				  UNBLOCK_TYPE)];
837 	st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
838 					 UNBLOCK_TYPE,
839 					 val);
840 
841 	if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
842 		qdf_clear_bit(soc->index,
843 			     (unsigned long *)&soc->reo_res_bitmap);
844 }
845 qdf_export_symbol(hal_reo_unblock_cache_status);
846 
847 inline void hal_reo_flush_timeout_list_status(
848 			 uint32_t *reo_desc,
849 			 struct hal_reo_flush_timeout_list_status *st)
850 
851 {
852 	uint32_t val;
853 
854 	/* Offsets of descriptor fields defined in HW headers start
855 	 * from the field after TLV header */
856 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
857 
858 	/* header */
859 	HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_TIMEOUT_LIST, st->header);
860 
861 	/* error bit */
862 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
863 					 ERROR_DETECTED)];
864 	st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
865 				  ERROR_DETECTED,
866 				  val);
867 
868 	/* list empty */
869 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
870 					 TIMOUT_LIST_EMPTY)];
871 	st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
872 					TIMOUT_LIST_EMPTY,
873 					val);
874 
875 	/* release descriptor count */
876 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
877 					 RELEASE_DESC_COUNT)];
878 	st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
879 				       RELEASE_DESC_COUNT,
880 				       val);
881 
882 	/* forward buf count */
883 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
884 					 FORWARD_BUF_COUNT)];
885 	st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
886 				       FORWARD_BUF_COUNT,
887 				       val);
888 }
889 qdf_export_symbol(hal_reo_flush_timeout_list_status);
890 
891 inline void hal_reo_desc_thres_reached_status(
892 			 uint32_t *reo_desc,
893 			 struct hal_reo_desc_thres_reached_status *st)
894 {
895 	uint32_t val;
896 
897 	/* Offsets of descriptor fields defined in HW headers start
898 	 * from the field after TLV header */
899 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
900 
901 	/* header */
902 	HAL_REO_STATUS_GET_HEADER(reo_desc,
903 			      REO_DESCRIPTOR_THRESHOLD_REACHED, st->header);
904 
905 	/* threshold index */
906 	val = reo_desc[HAL_OFFSET_DW(
907 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
908 				 THRESHOLD_INDEX)];
909 	st->thres_index = HAL_GET_FIELD(
910 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
911 				THRESHOLD_INDEX,
912 				val);
913 
914 	/* link desc counters */
915 	val = reo_desc[HAL_OFFSET_DW(
916 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
917 				 LINK_DESCRIPTOR_COUNTER0)];
918 	st->link_desc_counter0 = HAL_GET_FIELD(
919 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
920 				LINK_DESCRIPTOR_COUNTER0,
921 				val);
922 
923 	val = reo_desc[HAL_OFFSET_DW(
924 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
925 				 LINK_DESCRIPTOR_COUNTER1)];
926 	st->link_desc_counter1 = HAL_GET_FIELD(
927 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
928 				LINK_DESCRIPTOR_COUNTER1,
929 				val);
930 
931 	val = reo_desc[HAL_OFFSET_DW(
932 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
933 				 LINK_DESCRIPTOR_COUNTER2)];
934 	st->link_desc_counter2 = HAL_GET_FIELD(
935 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
936 				LINK_DESCRIPTOR_COUNTER2,
937 				val);
938 
939 	val = reo_desc[HAL_OFFSET_DW(
940 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
941 				 LINK_DESCRIPTOR_COUNTER_SUM)];
942 	st->link_desc_counter_sum = HAL_GET_FIELD(
943 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
944 				LINK_DESCRIPTOR_COUNTER_SUM,
945 				val);
946 }
947 qdf_export_symbol(hal_reo_desc_thres_reached_status);
948 
949 inline void hal_reo_rx_update_queue_status(uint32_t *reo_desc,
950 				      struct hal_reo_update_rx_queue_status *st)
951 {
952 	/* Offsets of descriptor fields defined in HW headers start
953 	 * from the field after TLV header */
954 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
955 
956 	/* header */
957 	HAL_REO_STATUS_GET_HEADER(reo_desc,
958 			      REO_UPDATE_RX_REO_QUEUE, st->header);
959 }
960 qdf_export_symbol(hal_reo_rx_update_queue_status);
961 
962 /**
963  * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG
964  * with command number
965  * @hal_soc: Handle to HAL SoC structure
966  * @hal_ring: Handle to HAL SRNG structure
967  *
968  * Return: none
969  */
970 inline void hal_reo_init_cmd_ring(struct hal_soc *soc, void *hal_srng)
971 {
972 	int cmd_num;
973 	uint32_t *desc_addr;
974 	struct hal_srng_params srng_params;
975 	uint32_t desc_size;
976 	uint32_t num_desc;
977 
978 	hal_get_srng_params(soc, hal_srng, &srng_params);
979 
980 	desc_addr = (uint32_t *)(srng_params.ring_base_vaddr);
981 	desc_addr += (sizeof(struct tlv_32_hdr) >> 2);
982 	desc_size = hal_srng_get_entrysize(soc, REO_CMD) >> 2;
983 	num_desc = srng_params.num_entries;
984 	cmd_num = 1;
985 	while (num_desc) {
986 		/* Offsets of descriptor fields defined in HW headers start
987 		 * from the field after TLV header */
988 		HAL_DESC_SET_FIELD(desc_addr, UNIFORM_REO_CMD_HEADER_0,
989 			REO_CMD_NUMBER, cmd_num);
990 		desc_addr += desc_size;
991 		num_desc--; cmd_num++;
992 	}
993 
994 	soc->reo_res_bitmap = 0;
995 }
996 qdf_export_symbol(hal_reo_init_cmd_ring);
997