xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_reo.c (revision 3149adf58a329e17232a4c0e58d460d025edd55a)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_reo.h"
20 #include "hal_tx.h"
21 #include "qdf_module.h"
22 
23 #define BLOCK_RES_MASK		0xF
24 static inline uint8_t hal_find_one_bit(uint8_t x)
25 {
26 	uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK;
27 	uint8_t pos;
28 
29 	for (pos = 0; y; y >>= 1)
30 		pos++;
31 
32 	return pos-1;
33 }
34 
35 static inline uint8_t hal_find_zero_bit(uint8_t x)
36 {
37 	uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK;
38 	uint8_t pos;
39 
40 	for (pos = 0; y; y >>= 1)
41 		pos++;
42 
43 	return pos-1;
44 }
45 
46 inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc,
47 				       enum hal_reo_cmd_type type,
48 				       uint32_t paddr_lo,
49 				       uint8_t paddr_hi)
50 {
51 	switch (type) {
52 	case CMD_GET_QUEUE_STATS:
53 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
54 			RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
55 		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
56 				    RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
57 		break;
58 	case CMD_FLUSH_QUEUE:
59 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
60 					FLUSH_DESC_ADDR_31_0, paddr_lo);
61 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
62 					FLUSH_DESC_ADDR_39_32, paddr_hi);
63 		break;
64 	case CMD_FLUSH_CACHE:
65 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
66 					FLUSH_ADDR_31_0, paddr_lo);
67 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
68 					FLUSH_ADDR_39_32, paddr_hi);
69 		break;
70 	case CMD_UPDATE_RX_REO_QUEUE:
71 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
72 					RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
73 		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
74 					RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
75 		break;
76 	default:
77 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
78 			"%s: Invalid REO command type\n", __func__);
79 		break;
80 	}
81 }
82 
83 inline int hal_reo_cmd_queue_stats(void *reo_ring, struct hal_soc *soc,
84 				    struct hal_reo_cmd_params *cmd)
85 
86 {
87 	uint32_t *reo_desc, val;
88 
89 	hal_srng_access_start(soc, reo_ring);
90 	reo_desc = hal_srng_src_get_next(soc, reo_ring);
91 	if (!reo_desc) {
92 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
93 			"%s: Out of cmd ring entries\n", __func__);
94 		hal_srng_access_end(soc, reo_ring);
95 		return -EBUSY;
96 	}
97 
98 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
99 			     sizeof(struct reo_get_queue_stats));
100 
101 	/* Offsets of descriptor fields defined in HW headers start from
102 	 * the field after TLV header */
103 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
104 	qdf_mem_zero((void *)reo_desc, sizeof(struct reo_get_queue_stats));
105 
106 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
107 		REO_STATUS_REQUIRED, cmd->std.need_status);
108 
109 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_GET_QUEUE_STATS,
110 				   cmd->std.addr_lo,
111 				   cmd->std.addr_hi);
112 
113 	HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
114 			      cmd->u.stats_params.clear);
115 
116 	hal_srng_access_end(soc, reo_ring);
117 
118 	val = reo_desc[CMD_HEADER_DW_OFFSET];
119 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
120 				     val);
121 }
122 qdf_export_symbol(hal_reo_cmd_queue_stats);
123 
124 inline int hal_reo_cmd_flush_queue(void *reo_ring, struct hal_soc *soc,
125 				    struct hal_reo_cmd_params *cmd)
126 {
127 	uint32_t *reo_desc, val;
128 
129 	hal_srng_access_start(soc, reo_ring);
130 	reo_desc = hal_srng_src_get_next(soc, reo_ring);
131 	if (!reo_desc) {
132 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
133 			"%s: Out of cmd ring entries\n", __func__);
134 		hal_srng_access_end(soc, reo_ring);
135 		return -EBUSY;
136 	}
137 
138 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
139 			     sizeof(struct reo_flush_queue));
140 
141 	/* Offsets of descriptor fields defined in HW headers start from
142 	 * the field after TLV header */
143 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
144 	qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_queue));
145 
146 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
147 		REO_STATUS_REQUIRED, cmd->std.need_status);
148 
149 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_QUEUE, cmd->std.addr_lo,
150 		cmd->std.addr_hi);
151 
152 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
153 		BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
154 		cmd->u.fl_queue_params.block_use_after_flush);
155 
156 	if (cmd->u.fl_queue_params.block_use_after_flush) {
157 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
158 			BLOCK_RESOURCE_INDEX, cmd->u.fl_queue_params.index);
159 	}
160 
161 	hal_srng_access_end(soc, reo_ring);
162 	val = reo_desc[CMD_HEADER_DW_OFFSET];
163 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
164 				     val);
165 }
166 qdf_export_symbol(hal_reo_cmd_flush_queue);
167 
168 inline int hal_reo_cmd_flush_cache(void *reo_ring, struct hal_soc *soc,
169 				    struct hal_reo_cmd_params *cmd)
170 {
171 	uint32_t *reo_desc, val;
172 	struct hal_reo_cmd_flush_cache_params *cp;
173 	uint8_t index = 0;
174 
175 	cp = &cmd->u.fl_cache_params;
176 
177 	hal_srng_access_start(soc, reo_ring);
178 
179 	/* We need a cache block resource for this operation, and REO HW has
180 	 * only 4 such blocking resources. These resources are managed using
181 	 * reo_res_bitmap, and we return failure if none is available.
182 	 */
183 	if (cp->block_use_after_flush) {
184 		index = hal_find_zero_bit(soc->reo_res_bitmap);
185 		if (index > 3) {
186 			qdf_print("%s, No blocking resource available!\n", __func__);
187 			hal_srng_access_end(soc, reo_ring);
188 			return -EBUSY;
189 		}
190 		soc->index = index;
191 	}
192 
193 	reo_desc = hal_srng_src_get_next(soc, reo_ring);
194 	if (!reo_desc) {
195 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
196 			"%s: Out of cmd ring entries\n", __func__);
197 		hal_srng_access_end(soc, reo_ring);
198 		hal_srng_dump(reo_ring);
199 		return -EBUSY;
200 	}
201 
202 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
203 			     sizeof(struct reo_flush_cache));
204 
205 	/* Offsets of descriptor fields defined in HW headers start from
206 	 * the field after TLV header */
207 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
208 	qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_cache));
209 
210 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
211 		REO_STATUS_REQUIRED, cmd->std.need_status);
212 
213 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_CACHE, cmd->std.addr_lo,
214 				   cmd->std.addr_hi);
215 
216 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
217 		FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
218 
219 	/* set it to 0 for now */
220 	cp->rel_block_index = 0;
221 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
222 		RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
223 
224 	if (cp->block_use_after_flush) {
225 		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
226 			CACHE_BLOCK_RESOURCE_INDEX, index);
227 	}
228 
229 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
230 		FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
231 
232 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
233 		BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush);
234 
235 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
236 		cp->flush_all);
237 
238 	hal_srng_access_end(soc, reo_ring);
239 	val = reo_desc[CMD_HEADER_DW_OFFSET];
240 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
241 				     val);
242 }
243 qdf_export_symbol(hal_reo_cmd_flush_cache);
244 
245 inline int hal_reo_cmd_unblock_cache(void *reo_ring, struct hal_soc *soc,
246 				      struct hal_reo_cmd_params *cmd)
247 
248 {
249 	uint32_t *reo_desc, val;
250 	uint8_t index = 0;
251 
252 	hal_srng_access_start(soc, reo_ring);
253 
254 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
255 		index = hal_find_one_bit(soc->reo_res_bitmap);
256 		if (index > 3) {
257 			hal_srng_access_end(soc, reo_ring);
258 			qdf_print("%s: No blocking resource to unblock!\n",
259 				  __func__);
260 			return -EBUSY;
261 		}
262 	}
263 
264 	reo_desc = hal_srng_src_get_next(soc, reo_ring);
265 	if (!reo_desc) {
266 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
267 			"%s: Out of cmd ring entries\n", __func__);
268 		hal_srng_access_end(soc, reo_ring);
269 		return -EBUSY;
270 	}
271 
272 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
273 			     sizeof(struct reo_unblock_cache));
274 
275 	/* Offsets of descriptor fields defined in HW headers start from
276 	 * the field after TLV header */
277 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
278 	qdf_mem_zero((void *)reo_desc, sizeof(struct reo_unblock_cache));
279 
280 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
281 		REO_STATUS_REQUIRED, cmd->std.need_status);
282 
283 	HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
284 		UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
285 
286 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
287 		HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
288 			CACHE_BLOCK_RESOURCE_INDEX,
289 			cmd->u.unblk_cache_params.index);
290 	}
291 
292 	hal_srng_access_end(soc, reo_ring);
293 	val = reo_desc[CMD_HEADER_DW_OFFSET];
294 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
295 				     val);
296 }
297 qdf_export_symbol(hal_reo_cmd_unblock_cache);
298 
299 inline int hal_reo_cmd_flush_timeout_list(void *reo_ring, struct hal_soc *soc,
300 					   struct hal_reo_cmd_params *cmd)
301 {
302 	uint32_t *reo_desc, val;
303 
304 	hal_srng_access_start(soc, reo_ring);
305 	reo_desc = hal_srng_src_get_next(soc, reo_ring);
306 	if (!reo_desc) {
307 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
308 			"%s: Out of cmd ring entries\n", __func__);
309 		hal_srng_access_end(soc, reo_ring);
310 		return -EBUSY;
311 	}
312 
313 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
314 			     sizeof(struct reo_flush_timeout_list));
315 
316 	/* Offsets of descriptor fields defined in HW headers start from
317 	 * the field after TLV header */
318 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
319 	qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_timeout_list));
320 
321 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
322 		REO_STATUS_REQUIRED, cmd->std.need_status);
323 
324 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
325 		cmd->u.fl_tim_list_params.ac_list);
326 
327 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
328 		MINIMUM_RELEASE_DESC_COUNT,
329 		cmd->u.fl_tim_list_params.min_rel_desc);
330 
331 	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
332 		MINIMUM_FORWARD_BUF_COUNT,
333 		cmd->u.fl_tim_list_params.min_fwd_buf);
334 
335 	hal_srng_access_end(soc, reo_ring);
336 	val = reo_desc[CMD_HEADER_DW_OFFSET];
337 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
338 				     val);
339 }
340 qdf_export_symbol(hal_reo_cmd_flush_timeout_list);
341 
342 inline int hal_reo_cmd_update_rx_queue(void *reo_ring, struct hal_soc *soc,
343 					struct hal_reo_cmd_params *cmd)
344 {
345 	uint32_t *reo_desc, val;
346 	struct hal_reo_cmd_update_queue_params *p;
347 
348 	p = &cmd->u.upd_queue_params;
349 
350 	hal_srng_access_start(soc, reo_ring);
351 	reo_desc = hal_srng_src_get_next(soc, reo_ring);
352 	if (!reo_desc) {
353 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
354 			"%s: Out of cmd ring entries\n", __func__);
355 		hal_srng_access_end(soc, reo_ring);
356 		return -EBUSY;
357 	}
358 
359 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
360 			     sizeof(struct reo_update_rx_reo_queue));
361 
362 	/* Offsets of descriptor fields defined in HW headers start from
363 	 * the field after TLV header */
364 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
365 	qdf_mem_zero((void *)reo_desc, sizeof(struct reo_update_rx_reo_queue));
366 
367 	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
368 		REO_STATUS_REQUIRED, cmd->std.need_status);
369 
370 	hal_reo_cmd_set_descr_addr(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
371 		cmd->std.addr_lo, cmd->std.addr_hi);
372 
373 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
374 		UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
375 
376 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
377 			      p->update_vld);
378 
379 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
380 		UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
381 		p->update_assoc_link_desc);
382 
383 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
384 		UPDATE_DISABLE_DUPLICATE_DETECTION,
385 		p->update_disable_dup_detect);
386 
387 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
388 		UPDATE_DISABLE_DUPLICATE_DETECTION,
389 		p->update_disable_dup_detect);
390 
391 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
392 		UPDATE_SOFT_REORDER_ENABLE,
393 		p->update_soft_reorder_enab);
394 
395 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
396 		UPDATE_AC, p->update_ac);
397 
398 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
399 		UPDATE_BAR, p->update_bar);
400 
401 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
402 		UPDATE_BAR, p->update_bar);
403 
404 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
405 		UPDATE_RTY, p->update_rty);
406 
407 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
408 		UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
409 
410 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
411 		UPDATE_OOR_MODE, p->update_oor_mode);
412 
413 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
414 		UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
415 
416 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
417 		UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
418 
419 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
420 		UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
421 
422 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
423 		UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
424 
425 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
426 		UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
427 
428 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
429 		UPDATE_PN_SIZE, p->update_pn_size);
430 
431 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
432 		UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
433 
434 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
435 		UPDATE_SVLD, p->update_svld);
436 
437 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
438 		UPDATE_SSN, p->update_ssn);
439 
440 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
441 		UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
442 		p->update_seq_2k_err_detect);
443 
444 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
445 		UPDATE_PN_VALID, p->update_pn_valid);
446 
447 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
448 		UPDATE_PN, p->update_pn);
449 
450 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
451 		RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
452 
453 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
454 		VLD, p->vld);
455 
456 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
457 		ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
458 		p->assoc_link_desc);
459 
460 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
461 		DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
462 
463 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
464 		SOFT_REORDER_ENABLE, p->soft_reorder_enab);
465 
466 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
467 
468 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
469 		BAR, p->bar);
470 
471 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
472 		CHK_2K_MODE, p->chk_2k_mode);
473 
474 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
475 		RTY, p->rty);
476 
477 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
478 		OOR_MODE, p->oor_mode);
479 
480 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
481 		PN_CHECK_NEEDED, p->pn_check_needed);
482 
483 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
484 		PN_SHALL_BE_EVEN, p->pn_even);
485 
486 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
487 		PN_SHALL_BE_UNEVEN, p->pn_uneven);
488 
489 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
490 		PN_HANDLING_ENABLE, p->pn_hand_enab);
491 
492 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
493 		IGNORE_AMPDU_FLAG, p->ignore_ampdu);
494 
495 	if (p->ba_window_size < 1)
496 		p->ba_window_size = 1;
497 
498 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
499 		BA_WINDOW_SIZE, p->ba_window_size - 1);
500 
501 	if (p->pn_size == 24)
502 		p->pn_size = PN_SIZE_24;
503 	else if (p->pn_size == 48)
504 		p->pn_size = PN_SIZE_48;
505 	else if (p->pn_size == 128)
506 		p->pn_size = PN_SIZE_128;
507 
508 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
509 		PN_SIZE, p->pn_size);
510 
511 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
512 		SVLD, p->svld);
513 
514 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
515 		SSN, p->ssn);
516 
517 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
518 		SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
519 
520 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
521 		PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
522 
523 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
524 		PN_31_0, p->pn_31_0);
525 
526 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
527 		PN_63_32, p->pn_63_32);
528 
529 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
530 		PN_95_64, p->pn_95_64);
531 
532 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
533 		PN_127_96, p->pn_127_96);
534 
535 	hal_srng_access_end(soc, reo_ring);
536 	val = reo_desc[CMD_HEADER_DW_OFFSET];
537 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
538 				     val);
539 }
540 qdf_export_symbol(hal_reo_cmd_update_rx_queue);
541 
542 inline void hal_reo_queue_stats_status(uint32_t *reo_desc,
543 			     struct hal_reo_queue_status *st)
544 {
545 	uint32_t val;
546 
547 	/* Offsets of descriptor fields defined in HW headers start
548 	 * from the field after TLV header */
549 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
550 
551 	/* header */
552 	HAL_REO_STATUS_GET_HEADER(reo_desc, REO_GET_QUEUE_STATS, st->header);
553 
554 	/* SSN */
555 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
556 	st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
557 
558 	/* current index */
559 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
560 					 CURRENT_INDEX)];
561 	st->curr_idx =
562 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
563 			      CURRENT_INDEX, val);
564 
565 	/* PN bits */
566 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
567 					 PN_31_0)];
568 	st->pn_31_0 =
569 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
570 			      PN_31_0, val);
571 
572 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
573 					 PN_63_32)];
574 	st->pn_63_32 =
575 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
576 			      PN_63_32, val);
577 
578 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
579 					 PN_95_64)];
580 	st->pn_95_64 =
581 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
582 			      PN_95_64, val);
583 
584 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
585 					 PN_127_96)];
586 	st->pn_127_96 =
587 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
588 			      PN_127_96, val);
589 
590 	/* timestamps */
591 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
592 					 LAST_RX_ENQUEUE_TIMESTAMP)];
593 	st->last_rx_enq_tstamp =
594 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
595 			      LAST_RX_ENQUEUE_TIMESTAMP, val);
596 
597 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
598 					 LAST_RX_DEQUEUE_TIMESTAMP)];
599 	st->last_rx_deq_tstamp =
600 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
601 			      LAST_RX_DEQUEUE_TIMESTAMP, val);
602 
603 	/* rx bitmap */
604 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
605 					 RX_BITMAP_31_0)];
606 	st->rx_bitmap_31_0 =
607 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
608 			      RX_BITMAP_31_0, val);
609 
610 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
611 					 RX_BITMAP_63_32)];
612 	st->rx_bitmap_63_32 =
613 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
614 			      RX_BITMAP_63_32, val);
615 
616 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
617 					 RX_BITMAP_95_64)];
618 	st->rx_bitmap_95_64 =
619 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
620 			      RX_BITMAP_95_64, val);
621 
622 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
623 					 RX_BITMAP_127_96)];
624 	st->rx_bitmap_127_96 =
625 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
626 			      RX_BITMAP_127_96, val);
627 
628 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
629 					 RX_BITMAP_159_128)];
630 	st->rx_bitmap_159_128 =
631 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
632 			      RX_BITMAP_159_128, val);
633 
634 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
635 					 RX_BITMAP_191_160)];
636 	st->rx_bitmap_191_160 =
637 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
638 			      RX_BITMAP_191_160, val);
639 
640 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
641 					 RX_BITMAP_223_192)];
642 	st->rx_bitmap_223_192 =
643 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
644 			      RX_BITMAP_223_192, val);
645 
646 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
647 					 RX_BITMAP_255_224)];
648 	st->rx_bitmap_255_224 =
649 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
650 			      RX_BITMAP_255_224, val);
651 
652 	/* various counts */
653 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
654 					 CURRENT_MPDU_COUNT)];
655 	st->curr_mpdu_cnt =
656 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
657 			      CURRENT_MPDU_COUNT, val);
658 
659 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
660 					 CURRENT_MSDU_COUNT)];
661 	st->curr_msdu_cnt =
662 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
663 			      CURRENT_MSDU_COUNT, val);
664 
665 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
666 					 TIMEOUT_COUNT)];
667 	st->fwd_timeout_cnt =
668 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
669 			      TIMEOUT_COUNT, val);
670 
671 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
672 					 FORWARD_DUE_TO_BAR_COUNT)];
673 	st->fwd_bar_cnt =
674 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
675 			      FORWARD_DUE_TO_BAR_COUNT, val);
676 
677 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
678 					 DUPLICATE_COUNT)];
679 	st->dup_cnt =
680 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
681 			      DUPLICATE_COUNT, val);
682 
683 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
684 					 FRAMES_IN_ORDER_COUNT)];
685 	st->frms_in_order_cnt =
686 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
687 			      FRAMES_IN_ORDER_COUNT, val);
688 
689 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
690 					 BAR_RECEIVED_COUNT)];
691 	st->bar_rcvd_cnt =
692 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
693 			      BAR_RECEIVED_COUNT, val);
694 
695 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
696 					 MPDU_FRAMES_PROCESSED_COUNT)];
697 	st->mpdu_frms_cnt =
698 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
699 			      MPDU_FRAMES_PROCESSED_COUNT, val);
700 
701 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
702 					 MSDU_FRAMES_PROCESSED_COUNT)];
703 	st->msdu_frms_cnt =
704 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
705 			      MSDU_FRAMES_PROCESSED_COUNT, val);
706 
707 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
708 					 TOTAL_PROCESSED_BYTE_COUNT)];
709 	st->total_cnt =
710 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
711 			      TOTAL_PROCESSED_BYTE_COUNT, val);
712 
713 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
714 					 LATE_RECEIVE_MPDU_COUNT)];
715 	st->late_recv_mpdu_cnt =
716 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
717 			      LATE_RECEIVE_MPDU_COUNT, val);
718 
719 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
720 					 WINDOW_JUMP_2K)];
721 	st->win_jump_2k =
722 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
723 			      WINDOW_JUMP_2K, val);
724 
725 	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
726 					 HOLE_COUNT)];
727 	st->hole_cnt =
728 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
729 			      HOLE_COUNT, val);
730 }
731 qdf_export_symbol(hal_reo_queue_stats_status);
732 
733 inline void hal_reo_flush_queue_status(uint32_t *reo_desc,
734 				    struct hal_reo_flush_queue_status *st)
735 {
736 	uint32_t val;
737 
738 	/* Offsets of descriptor fields defined in HW headers start
739 	 * from the field after TLV header */
740 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
741 
742 	/* header */
743 	HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_QUEUE, st->header);
744 
745 	/* error bit */
746 	val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
747 					 ERROR_DETECTED)];
748 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
749 				  val);
750 }
751 qdf_export_symbol(hal_reo_flush_queue_status);
752 
753 inline void hal_reo_flush_cache_status(uint32_t *reo_desc, struct hal_soc *soc,
754 				    struct hal_reo_flush_cache_status *st)
755 {
756 	uint32_t val;
757 
758 	/* Offsets of descriptor fields defined in HW headers start
759 	 * from the field after TLV header */
760 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
761 
762 	/* header */
763 	HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_CACHE, st->header);
764 
765 	/* error bit */
766 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
767 					 ERROR_DETECTED)];
768 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
769 				  val);
770 
771 	/* block error */
772 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
773 					 BLOCK_ERROR_DETAILS)];
774 	st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
775 					BLOCK_ERROR_DETAILS,
776 					val);
777 	if (!st->block_error)
778 		qdf_set_bit(soc->index, (unsigned long *)&soc->reo_res_bitmap);
779 
780 	/* cache flush status */
781 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
782 					 CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
783 	st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
784 					CACHE_CONTROLLER_FLUSH_STATUS_HIT,
785 					val);
786 
787 	/* cache flush descriptor type */
788 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
789 				  CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
790 	st->cache_flush_status_desc_type =
791 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
792 			      CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
793 			      val);
794 
795 	/* cache flush count */
796 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
797 				  CACHE_CONTROLLER_FLUSH_COUNT)];
798 	st->cache_flush_cnt =
799 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
800 			      CACHE_CONTROLLER_FLUSH_COUNT,
801 			      val);
802 
803 }
804 qdf_export_symbol(hal_reo_flush_cache_status);
805 
806 inline void hal_reo_unblock_cache_status(uint32_t *reo_desc,
807 					 struct hal_soc *soc,
808 					 struct hal_reo_unblk_cache_status *st)
809 {
810 	uint32_t val;
811 
812 	/* Offsets of descriptor fields defined in HW headers start
813 	 * from the field after TLV header */
814 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
815 
816 	/* header */
817 	HAL_REO_STATUS_GET_HEADER(reo_desc, REO_UNBLOCK_CACHE, st->header);
818 
819 	/* error bit */
820 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
821 				  ERROR_DETECTED)];
822 	st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
823 				  ERROR_DETECTED,
824 				  val);
825 
826 	/* unblock type */
827 	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
828 				  UNBLOCK_TYPE)];
829 	st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
830 					 UNBLOCK_TYPE,
831 					 val);
832 
833 	if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
834 		qdf_clear_bit(soc->index,
835 			     (unsigned long *)&soc->reo_res_bitmap);
836 }
837 qdf_export_symbol(hal_reo_unblock_cache_status);
838 
839 inline void hal_reo_flush_timeout_list_status(
840 			 uint32_t *reo_desc,
841 			 struct hal_reo_flush_timeout_list_status *st)
842 
843 {
844 	uint32_t val;
845 
846 	/* Offsets of descriptor fields defined in HW headers start
847 	 * from the field after TLV header */
848 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
849 
850 	/* header */
851 	HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_TIMEOUT_LIST, st->header);
852 
853 	/* error bit */
854 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
855 					 ERROR_DETECTED)];
856 	st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
857 				  ERROR_DETECTED,
858 				  val);
859 
860 	/* list empty */
861 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
862 					 TIMOUT_LIST_EMPTY)];
863 	st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
864 					TIMOUT_LIST_EMPTY,
865 					val);
866 
867 	/* release descriptor count */
868 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
869 					 RELEASE_DESC_COUNT)];
870 	st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
871 				       RELEASE_DESC_COUNT,
872 				       val);
873 
874 	/* forward buf count */
875 	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
876 					 FORWARD_BUF_COUNT)];
877 	st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
878 				       FORWARD_BUF_COUNT,
879 				       val);
880 }
881 qdf_export_symbol(hal_reo_flush_timeout_list_status);
882 
883 inline void hal_reo_desc_thres_reached_status(
884 			 uint32_t *reo_desc,
885 			 struct hal_reo_desc_thres_reached_status *st)
886 {
887 	uint32_t val;
888 
889 	/* Offsets of descriptor fields defined in HW headers start
890 	 * from the field after TLV header */
891 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
892 
893 	/* header */
894 	HAL_REO_STATUS_GET_HEADER(reo_desc,
895 			      REO_DESCRIPTOR_THRESHOLD_REACHED, st->header);
896 
897 	/* threshold index */
898 	val = reo_desc[HAL_OFFSET_DW(
899 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
900 				 THRESHOLD_INDEX)];
901 	st->thres_index = HAL_GET_FIELD(
902 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
903 				THRESHOLD_INDEX,
904 				val);
905 
906 	/* link desc counters */
907 	val = reo_desc[HAL_OFFSET_DW(
908 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
909 				 LINK_DESCRIPTOR_COUNTER0)];
910 	st->link_desc_counter0 = HAL_GET_FIELD(
911 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
912 				LINK_DESCRIPTOR_COUNTER0,
913 				val);
914 
915 	val = reo_desc[HAL_OFFSET_DW(
916 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
917 				 LINK_DESCRIPTOR_COUNTER1)];
918 	st->link_desc_counter1 = HAL_GET_FIELD(
919 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
920 				LINK_DESCRIPTOR_COUNTER1,
921 				val);
922 
923 	val = reo_desc[HAL_OFFSET_DW(
924 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
925 				 LINK_DESCRIPTOR_COUNTER2)];
926 	st->link_desc_counter2 = HAL_GET_FIELD(
927 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
928 				LINK_DESCRIPTOR_COUNTER2,
929 				val);
930 
931 	val = reo_desc[HAL_OFFSET_DW(
932 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
933 				 LINK_DESCRIPTOR_COUNTER_SUM)];
934 	st->link_desc_counter_sum = HAL_GET_FIELD(
935 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
936 				LINK_DESCRIPTOR_COUNTER_SUM,
937 				val);
938 }
939 qdf_export_symbol(hal_reo_desc_thres_reached_status);
940 
941 inline void hal_reo_rx_update_queue_status(uint32_t *reo_desc,
942 				      struct hal_reo_update_rx_queue_status *st)
943 {
944 	/* Offsets of descriptor fields defined in HW headers start
945 	 * from the field after TLV header */
946 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
947 
948 	/* header */
949 	HAL_REO_STATUS_GET_HEADER(reo_desc,
950 			      REO_UPDATE_RX_REO_QUEUE, st->header);
951 }
952 qdf_export_symbol(hal_reo_rx_update_queue_status);
953 
954 /**
955  * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG
956  * with command number
957  * @hal_soc: Handle to HAL SoC structure
958  * @hal_ring: Handle to HAL SRNG structure
959  *
960  * Return: none
961  */
962 inline void hal_reo_init_cmd_ring(struct hal_soc *soc, void *hal_srng)
963 {
964 	int cmd_num;
965 	uint32_t *desc_addr;
966 	struct hal_srng_params srng_params;
967 	uint32_t desc_size;
968 	uint32_t num_desc;
969 
970 	hal_get_srng_params(soc, hal_srng, &srng_params);
971 
972 	desc_addr = (uint32_t *)(srng_params.ring_base_vaddr);
973 	desc_addr += (sizeof(struct tlv_32_hdr) >> 2);
974 	desc_size = hal_srng_get_entrysize(soc, REO_CMD) >> 2;
975 	num_desc = srng_params.num_entries;
976 	cmd_num = 1;
977 	while (num_desc) {
978 		/* Offsets of descriptor fields defined in HW headers start
979 		 * from the field after TLV header */
980 		HAL_DESC_SET_FIELD(desc_addr, UNIFORM_REO_CMD_HEADER_0,
981 			REO_CMD_NUMBER, cmd_num);
982 		desc_addr += desc_size;
983 		num_desc--; cmd_num++;
984 	}
985 
986 	soc->reo_res_bitmap = 0;
987 }
988 qdf_export_symbol(hal_reo_init_cmd_ring);
989