1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "qdf_module.h"
21 #include "hal_hw_headers.h"
22 #include "hal_be_hw_headers.h"
23 #include "hal_reo.h"
24 #include "hal_be_reo.h"
25 #include "hal_be_api.h"
26 
hal_get_reo_reg_base_offset_be(void)27 uint32_t hal_get_reo_reg_base_offset_be(void)
28 {
29 	return REO_REG_REG_BASE;
30 }
31 
hal_reo_qdesc_setup_be(hal_soc_handle_t hal_soc_hdl,int tid,uint32_t ba_window_size,uint32_t start_seq,void * hw_qdesc_vaddr,qdf_dma_addr_t hw_qdesc_paddr,int pn_type,uint8_t vdev_stats_id)32 void hal_reo_qdesc_setup_be(hal_soc_handle_t hal_soc_hdl, int tid,
33 			    uint32_t ba_window_size,
34 			    uint32_t start_seq, void *hw_qdesc_vaddr,
35 			    qdf_dma_addr_t hw_qdesc_paddr,
36 			    int pn_type, uint8_t vdev_stats_id)
37 {
38 	uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
39 	uint32_t *reo_queue_ext_desc;
40 	uint32_t reg_val;
41 	uint32_t pn_enable;
42 	uint32_t pn_size = 0;
43 
44 	qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
45 
46 	hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
47 				   HAL_REO_QUEUE_DESC);
48 	/* Fixed pattern in reserved bits for debugging */
49 	HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER,
50 			   RESERVED_0A, 0xDDBEEF);
51 
52 	/* This a just a SW meta data and will be copied to REO destination
53 	 * descriptors indicated by hardware.
54 	 * TODO: Setting TID in this field. See if we should set something else.
55 	 */
56 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
57 			   RECEIVE_QUEUE_NUMBER, tid);
58 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
59 			   VLD, 1);
60 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
61 			   ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
62 			   HAL_RX_LINK_DESC_CNTR);
63 
64 	/*
65 	 * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
66 	 */
67 
68 	reg_val = TID_TO_WME_AC(tid);
69 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, AC, reg_val);
70 
71 	if (ba_window_size < 1)
72 		ba_window_size = 1;
73 
74 	/* WAR to get 2k exception in Non BA case.
75 	 * Setting window size to 2 to get 2k jump exception
76 	 * when we receive aggregates in Non BA case
77 	 */
78 	ba_window_size = hal_update_non_ba_win_size(tid, ba_window_size);
79 
80 	/* Set RTY bit for non-BA case. Duplicate detection is currently not
81 	 * done by HW in non-BA case if RTY bit is not set.
82 	 * TODO: This is a temporary War and should be removed once HW fix is
83 	 * made to check and discard duplicates even if RTY bit is not set.
84 	 */
85 	if (ba_window_size == 1)
86 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, RTY, 1);
87 
88 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, BA_WINDOW_SIZE,
89 			   ba_window_size - 1);
90 
91 	switch (pn_type) {
92 	case HAL_PN_WPA:
93 		pn_enable = 1;
94 		pn_size = PN_SIZE_48;
95 		break;
96 	case HAL_PN_WAPI_EVEN:
97 	case HAL_PN_WAPI_UNEVEN:
98 		pn_enable = 1;
99 		pn_size = PN_SIZE_128;
100 		break;
101 	default:
102 		pn_enable = 0;
103 		break;
104 	}
105 
106 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, PN_CHECK_NEEDED,
107 			   pn_enable);
108 
109 	if (pn_type == HAL_PN_WAPI_EVEN)
110 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
111 				   PN_SHALL_BE_EVEN, 1);
112 	else if (pn_type == HAL_PN_WAPI_UNEVEN)
113 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
114 				   PN_SHALL_BE_UNEVEN, 1);
115 
116 	/*
117 	 *  TODO: Need to check if PN handling in SW needs to be enabled
118 	 *  So far this is not a requirement
119 	 */
120 
121 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, PN_SIZE,
122 			   pn_size);
123 
124 	/* TODO: Check if RX_REO_QUEUE_IGNORE_AMPDU_FLAG need to be set
125 	 * based on BA window size and/or AMPDU capabilities
126 	 */
127 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
128 			   IGNORE_AMPDU_FLAG, 1);
129 
130 	if (start_seq <= 0xfff)
131 		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, SSN,
132 				   start_seq);
133 
134 	/* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA,
135 	 * but REO is not delivering packets if we set it to 1. Need to enable
136 	 * this once the issue is resolved
137 	 */
138 	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, SVLD, 0);
139 
140 	hal_update_stats_counter_index(reo_queue_desc, vdev_stats_id);
141 
142 	/* TODO: Check if we should set start PN for WAPI */
143 
144 	/* TODO: HW queue descriptors are currently allocated for max BA
145 	 * window size for all QOS TIDs so that same descriptor can be used
146 	 * later when ADDBA request is received. This should be changed to
147 	 * allocate HW queue descriptors based on BA window size being
148 	 * negotiated (0 for non BA cases), and reallocate when BA window
149 	 * size changes and also send WMI message to FW to change the REO
150 	 * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
151 	 */
152 	if (tid == HAL_NON_QOS_TID)
153 		return;
154 
155 	reo_queue_ext_desc = (uint32_t *)
156 		(((struct rx_reo_queue *)reo_queue_desc) + 1);
157 	qdf_mem_zero(reo_queue_ext_desc, 3 *
158 		     sizeof(struct rx_reo_queue_ext));
159 	/* Initialize first reo queue extension descriptor */
160 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
161 				   HAL_DESC_REO_OWNED,
162 				   HAL_REO_QUEUE_EXT_DESC);
163 	/* Fixed pattern in reserved bits for debugging */
164 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
165 			   UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A,
166 			   0xADBEEF);
167 	/* Initialize second reo queue extension descriptor */
168 	reo_queue_ext_desc = (uint32_t *)
169 		(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
170 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
171 				   HAL_DESC_REO_OWNED,
172 				   HAL_REO_QUEUE_EXT_DESC);
173 	/* Fixed pattern in reserved bits for debugging */
174 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
175 			   UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A,
176 			   0xBDBEEF);
177 	/* Initialize third reo queue extension descriptor */
178 	reo_queue_ext_desc = (uint32_t *)
179 		(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
180 	hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
181 				   HAL_DESC_REO_OWNED,
182 				   HAL_REO_QUEUE_EXT_DESC);
183 	/* Fixed pattern in reserved bits for debugging */
184 	HAL_DESC_SET_FIELD(reo_queue_ext_desc,
185 			   UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A,
186 			   0xCDBEEF);
187 }
188 
189 qdf_export_symbol(hal_reo_qdesc_setup_be);
190 
191 static void
hal_reo_cmd_set_descr_addr_be(uint32_t * reo_desc,enum hal_reo_cmd_type type,uint32_t paddr_lo,uint8_t paddr_hi)192 hal_reo_cmd_set_descr_addr_be(uint32_t *reo_desc,
193 			      enum hal_reo_cmd_type type,
194 			      uint32_t paddr_lo,
195 			      uint8_t paddr_hi)
196 {
197 	struct reo_get_queue_stats *reo_get_queue_stats;
198 	struct reo_flush_queue *reo_flush_queue;
199 	struct reo_flush_cache *reo_flush_cache;
200 	struct reo_update_rx_reo_queue *reo_update_rx_reo_queue;
201 
202 	switch (type) {
203 	case CMD_GET_QUEUE_STATS:
204 		reo_get_queue_stats = (struct reo_get_queue_stats *)reo_desc;
205 		reo_get_queue_stats->rx_reo_queue_desc_addr_31_0 = paddr_lo;
206 		reo_get_queue_stats->rx_reo_queue_desc_addr_39_32 = paddr_hi;
207 		break;
208 	case CMD_FLUSH_QUEUE:
209 		reo_flush_queue = (struct reo_flush_queue *)reo_desc;
210 		reo_flush_queue->flush_desc_addr_31_0 = paddr_lo;
211 		reo_flush_queue->flush_desc_addr_39_32 = paddr_hi;
212 		break;
213 	case CMD_FLUSH_CACHE:
214 		reo_flush_cache = (struct reo_flush_cache *)reo_desc;
215 		reo_flush_cache->flush_addr_31_0 = paddr_lo;
216 		reo_flush_cache->flush_addr_39_32 = paddr_hi;
217 		break;
218 	case CMD_UPDATE_RX_REO_QUEUE:
219 		reo_update_rx_reo_queue =
220 				(struct reo_update_rx_reo_queue *)reo_desc;
221 		reo_update_rx_reo_queue->rx_reo_queue_desc_addr_31_0 = paddr_lo;
222 		reo_update_rx_reo_queue->rx_reo_queue_desc_addr_39_32 =
223 								paddr_hi;
224 		break;
225 	default:
226 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
227 			  "%s: Invalid REO command type", __func__);
228 		break;
229 	}
230 }
231 
232 static int
hal_reo_cmd_queue_stats_be(hal_ring_handle_t hal_ring_hdl,hal_soc_handle_t hal_soc_hdl,struct hal_reo_cmd_params * cmd)233 hal_reo_cmd_queue_stats_be(hal_ring_handle_t  hal_ring_hdl,
234 			   hal_soc_handle_t hal_soc_hdl,
235 			   struct hal_reo_cmd_params *cmd)
236 {
237 	uint32_t *reo_desc, val;
238 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
239 	struct reo_get_queue_stats *reo_get_queue_stats;
240 
241 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
242 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
243 	if (!reo_desc) {
244 		hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
245 		hal_warn_rl("Out of cmd ring entries");
246 		return -EBUSY;
247 	}
248 
249 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
250 			sizeof(struct reo_get_queue_stats));
251 
252 	/*
253 	 * Offsets of descriptor fields defined in HW headers start from
254 	 * the field after TLV header
255 	 */
256 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
257 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
258 		     sizeof(struct reo_get_queue_stats) -
259 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
260 
261 	reo_get_queue_stats = (struct reo_get_queue_stats *)reo_desc;
262 	reo_get_queue_stats->cmd_header.reo_status_required =
263 							cmd->std.need_status;
264 
265 	hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_GET_QUEUE_STATS,
266 				      cmd->std.addr_lo,
267 				      cmd->std.addr_hi);
268 
269 	reo_get_queue_stats->clear_stats = cmd->u.stats_params.clear;
270 
271 	hal_srng_access_end_v1(hal_soc_hdl, hal_ring_hdl,
272 			       HIF_RTPM_ID_HAL_REO_CMD);
273 
274 	val = reo_desc[CMD_HEADER_DW_OFFSET];
275 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
276 				     val);
277 }
278 
279 static int
hal_reo_cmd_flush_queue_be(hal_ring_handle_t hal_ring_hdl,hal_soc_handle_t hal_soc_hdl,struct hal_reo_cmd_params * cmd)280 hal_reo_cmd_flush_queue_be(hal_ring_handle_t hal_ring_hdl,
281 			   hal_soc_handle_t hal_soc_hdl,
282 			   struct hal_reo_cmd_params *cmd)
283 {
284 	uint32_t *reo_desc, val;
285 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
286 	struct reo_flush_queue *reo_flush_queue;
287 
288 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
289 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
290 	if (!reo_desc) {
291 		hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
292 		hal_warn_rl("Out of cmd ring entries");
293 		return -EBUSY;
294 	}
295 
296 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
297 			sizeof(struct reo_flush_queue));
298 
299 	/*
300 	 * Offsets of descriptor fields defined in HW headers start from
301 	 * the field after TLV header
302 	 */
303 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
304 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
305 		     sizeof(struct reo_flush_queue) -
306 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
307 
308 	reo_flush_queue = (struct reo_flush_queue *)reo_desc;
309 	reo_flush_queue->cmd_header.reo_status_required = cmd->std.need_status;
310 
311 	hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_FLUSH_QUEUE,
312 				      cmd->std.addr_lo, cmd->std.addr_hi);
313 
314 	reo_flush_queue->block_desc_addr_usage_after_flush =
315 				cmd->u.fl_queue_params.block_use_after_flush;
316 
317 	if (cmd->u.fl_queue_params.block_use_after_flush)
318 		reo_flush_queue->block_resource_index =
319 						cmd->u.fl_queue_params.index;
320 
321 	hal_srng_access_end_v1(hal_soc_hdl, hal_ring_hdl,
322 			       HIF_RTPM_ID_HAL_REO_CMD);
323 
324 	val = reo_desc[CMD_HEADER_DW_OFFSET];
325 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
326 				     val);
327 }
328 
329 static int
hal_reo_cmd_flush_cache_be(hal_ring_handle_t hal_ring_hdl,hal_soc_handle_t hal_soc_hdl,struct hal_reo_cmd_params * cmd)330 hal_reo_cmd_flush_cache_be(hal_ring_handle_t hal_ring_hdl,
331 			   hal_soc_handle_t hal_soc_hdl,
332 			   struct hal_reo_cmd_params *cmd)
333 {
334 	uint32_t *reo_desc, val;
335 	struct hal_reo_cmd_flush_cache_params *cp;
336 	uint8_t index = 0;
337 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
338 	struct reo_flush_cache *reo_flush_cache;
339 
340 	cp = &cmd->u.fl_cache_params;
341 
342 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
343 
344 	/* We need a cache block resource for this operation, and REO HW has
345 	 * only 4 such blocking resources. These resources are managed using
346 	 * reo_res_bitmap, and we return failure if none is available.
347 	 */
348 	if (cp->block_use_after_flush) {
349 		index = hal_find_zero_bit(hal_soc->reo_res_bitmap);
350 		if (index > 3) {
351 			hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
352 			hal_warn_rl("No blocking resource available!");
353 			return -EBUSY;
354 		}
355 		hal_soc->index = index;
356 	}
357 
358 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
359 	if (!reo_desc) {
360 		hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
361 		hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl));
362 		return -EBUSY;
363 	}
364 
365 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
366 			sizeof(struct reo_flush_cache));
367 
368 	/*
369 	 * Offsets of descriptor fields defined in HW headers start from
370 	 * the field after TLV header
371 	 */
372 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
373 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
374 		     sizeof(struct reo_flush_cache) -
375 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
376 
377 	reo_flush_cache = (struct reo_flush_cache *)reo_desc;
378 	reo_flush_cache->cmd_header.reo_status_required = cmd->std.need_status;
379 
380 	hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_FLUSH_CACHE,
381 				      cmd->std.addr_lo, cmd->std.addr_hi);
382 
383 	reo_flush_cache->forward_all_mpdus_in_queue = cp->fwd_mpdus_in_queue;
384 
385 	/* set it to 0 for now */
386 	cp->rel_block_index = 0;
387 	reo_flush_cache->release_cache_block_index = cp->rel_block_index;
388 
389 	if (cp->block_use_after_flush) {
390 		reo_flush_cache->cache_block_resource_index = index;
391 	}
392 
393 	reo_flush_cache->flush_without_invalidate = cp->flush_no_inval;
394 	reo_flush_cache->flush_queue_1k_desc = cp->flush_q_1k_desc;
395 	reo_flush_cache->block_cache_usage_after_flush =
396 						cp->block_use_after_flush;
397 	reo_flush_cache->flush_entire_cache = cp->flush_entire_cache;
398 
399 	hal_srng_access_end_v1(hal_soc_hdl, hal_ring_hdl,
400 			       HIF_RTPM_ID_HAL_REO_CMD);
401 
402 	val = reo_desc[CMD_HEADER_DW_OFFSET];
403 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
404 				     val);
405 }
406 
407 static int
hal_reo_cmd_unblock_cache_be(hal_ring_handle_t hal_ring_hdl,hal_soc_handle_t hal_soc_hdl,struct hal_reo_cmd_params * cmd)408 hal_reo_cmd_unblock_cache_be(hal_ring_handle_t hal_ring_hdl,
409 			     hal_soc_handle_t hal_soc_hdl,
410 			     struct hal_reo_cmd_params *cmd)
411 
412 {
413 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
414 	uint32_t *reo_desc, val;
415 	uint8_t index = 0;
416 	struct reo_unblock_cache *reo_unblock_cache;
417 
418 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
419 
420 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
421 		index = hal_find_one_bit(hal_soc->reo_res_bitmap);
422 		if (index > 3) {
423 			hal_srng_access_end(hal_soc, hal_ring_hdl);
424 			qdf_print("No blocking resource to unblock!");
425 			return -EBUSY;
426 		}
427 	}
428 
429 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
430 	if (!reo_desc) {
431 		hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
432 		hal_warn_rl("Out of cmd ring entries");
433 		return -EBUSY;
434 	}
435 
436 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
437 			sizeof(struct reo_unblock_cache));
438 
439 	/*
440 	 * Offsets of descriptor fields defined in HW headers start from
441 	 * the field after TLV header
442 	 */
443 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
444 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
445 		     sizeof(struct reo_unblock_cache) -
446 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
447 
448 	reo_unblock_cache = (struct reo_unblock_cache *)reo_desc;
449 	reo_unblock_cache->cmd_header.reo_status_required =
450 							cmd->std.need_status;
451 	reo_unblock_cache->unblock_type = cmd->u.unblk_cache_params.type;
452 
453 	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX)
454 		reo_unblock_cache->cache_block_resource_index =
455 						cmd->u.unblk_cache_params.index;
456 
457 	hal_srng_access_end(hal_soc, hal_ring_hdl);
458 	val = reo_desc[CMD_HEADER_DW_OFFSET];
459 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
460 				     val);
461 }
462 
463 static int
hal_reo_cmd_flush_timeout_list_be(hal_ring_handle_t hal_ring_hdl,hal_soc_handle_t hal_soc_hdl,struct hal_reo_cmd_params * cmd)464 hal_reo_cmd_flush_timeout_list_be(hal_ring_handle_t hal_ring_hdl,
465 				  hal_soc_handle_t hal_soc_hdl,
466 				  struct hal_reo_cmd_params *cmd)
467 {
468 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
469 	uint32_t *reo_desc, val;
470 	struct reo_flush_timeout_list *reo_flush_timeout_list;
471 
472 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
473 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
474 	if (!reo_desc) {
475 		hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
476 		hal_warn_rl("Out of cmd ring entries");
477 		return -EBUSY;
478 	}
479 
480 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
481 			sizeof(struct reo_flush_timeout_list));
482 
483 	/*
484 	 * Offsets of descriptor fields defined in HW headers start from
485 	 * the field after TLV header
486 	 */
487 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
488 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
489 		     sizeof(struct reo_flush_timeout_list) -
490 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
491 
492 	reo_flush_timeout_list = (struct reo_flush_timeout_list *)reo_desc;
493 	reo_flush_timeout_list->cmd_header.reo_status_required =
494 							cmd->std.need_status;
495 	reo_flush_timeout_list->ac_timout_list =
496 					cmd->u.fl_tim_list_params.ac_list;
497 	reo_flush_timeout_list->minimum_release_desc_count =
498 					cmd->u.fl_tim_list_params.min_rel_desc;
499 	reo_flush_timeout_list->minimum_forward_buf_count =
500 					cmd->u.fl_tim_list_params.min_fwd_buf;
501 
502 	hal_srng_access_end(hal_soc, hal_ring_hdl);
503 	val = reo_desc[CMD_HEADER_DW_OFFSET];
504 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
505 				     val);
506 }
507 
508 static int
hal_reo_cmd_update_rx_queue_be(hal_ring_handle_t hal_ring_hdl,hal_soc_handle_t hal_soc_hdl,struct hal_reo_cmd_params * cmd)509 hal_reo_cmd_update_rx_queue_be(hal_ring_handle_t hal_ring_hdl,
510 			       hal_soc_handle_t hal_soc_hdl,
511 			       struct hal_reo_cmd_params *cmd)
512 {
513 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
514 	uint32_t *reo_desc, val;
515 	struct hal_reo_cmd_update_queue_params *p;
516 	struct reo_update_rx_reo_queue *reo_update_rx_reo_queue;
517 
518 	p = &cmd->u.upd_queue_params;
519 
520 	hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
521 	reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
522 	if (!reo_desc) {
523 		hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
524 		hal_warn_rl("Out of cmd ring entries");
525 		return -EBUSY;
526 	}
527 
528 	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
529 			sizeof(struct reo_update_rx_reo_queue));
530 
531 	/*
532 	 * Offsets of descriptor fields defined in HW headers start from
533 	 * the field after TLV header
534 	 */
535 	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
536 	qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
537 		     sizeof(struct reo_update_rx_reo_queue) -
538 		     (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
539 
540 	reo_update_rx_reo_queue = (struct reo_update_rx_reo_queue *)reo_desc;
541 	reo_update_rx_reo_queue->cmd_header.reo_status_required =
542 							cmd->std.need_status;
543 
544 	hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
545 				      cmd->std.addr_lo, cmd->std.addr_hi);
546 
547 	reo_update_rx_reo_queue->update_receive_queue_number =
548 							p->update_rx_queue_num;
549 	reo_update_rx_reo_queue->update_vld = p->update_vld;
550 	reo_update_rx_reo_queue->update_associated_link_descriptor_counter =
551 						p->update_assoc_link_desc;
552 	reo_update_rx_reo_queue->update_disable_duplicate_detection =
553 						p->update_disable_dup_detect;
554 	reo_update_rx_reo_queue->update_soft_reorder_enable =
555 						p->update_soft_reorder_enab;
556 	reo_update_rx_reo_queue->update_ac = p->update_ac;
557 	reo_update_rx_reo_queue->update_bar = p->update_bar;
558 	reo_update_rx_reo_queue->update_rty = p->update_rty;
559 	reo_update_rx_reo_queue->update_chk_2k_mode = p->update_chk_2k_mode;
560 	reo_update_rx_reo_queue->update_oor_mode = p->update_oor_mode;
561 	reo_update_rx_reo_queue->update_ba_window_size =
562 						p->update_ba_window_size;
563 	reo_update_rx_reo_queue->update_pn_check_needed =
564 						p->update_pn_check_needed;
565 	reo_update_rx_reo_queue->update_pn_shall_be_even = p->update_pn_even;
566 	reo_update_rx_reo_queue->update_pn_shall_be_uneven =
567 							p->update_pn_uneven;
568 	reo_update_rx_reo_queue->update_pn_handling_enable =
569 							p->update_pn_hand_enab;
570 	reo_update_rx_reo_queue->update_pn_size = p->update_pn_size;
571 	reo_update_rx_reo_queue->update_ignore_ampdu_flag =
572 							p->update_ignore_ampdu;
573 	reo_update_rx_reo_queue->update_svld = p->update_svld;
574 	reo_update_rx_reo_queue->update_ssn = p->update_ssn;
575 	reo_update_rx_reo_queue->update_seq_2k_error_detected_flag =
576 						p->update_seq_2k_err_detect;
577 	reo_update_rx_reo_queue->update_pn_valid = p->update_pn_valid;
578 	reo_update_rx_reo_queue->update_pn = p->update_pn;
579 	reo_update_rx_reo_queue->receive_queue_number = p->rx_queue_num;
580 	reo_update_rx_reo_queue->vld = p->vld;
581 	reo_update_rx_reo_queue->associated_link_descriptor_counter =
582 							p->assoc_link_desc;
583 	reo_update_rx_reo_queue->disable_duplicate_detection =
584 							p->disable_dup_detect;
585 	reo_update_rx_reo_queue->soft_reorder_enable = p->soft_reorder_enab;
586 	reo_update_rx_reo_queue->ac = p->ac;
587 	reo_update_rx_reo_queue->bar = p->bar;
588 	reo_update_rx_reo_queue->chk_2k_mode = p->chk_2k_mode;
589 	reo_update_rx_reo_queue->rty = p->rty;
590 	reo_update_rx_reo_queue->oor_mode = p->oor_mode;
591 	reo_update_rx_reo_queue->pn_check_needed = p->pn_check_needed;
592 	reo_update_rx_reo_queue->pn_shall_be_even = p->pn_even;
593 	reo_update_rx_reo_queue->pn_shall_be_uneven = p->pn_uneven;
594 	reo_update_rx_reo_queue->pn_handling_enable = p->pn_hand_enab;
595 	reo_update_rx_reo_queue->ignore_ampdu_flag = p->ignore_ampdu;
596 
597 	if (p->ba_window_size < 1)
598 		p->ba_window_size = 1;
599 	/*
600 	 * WAR to get 2k exception in Non BA case.
601 	 * Setting window size to 2 to get 2k jump exception
602 	 * when we receive aggregates in Non BA case
603 	 */
604 	if (p->ba_window_size == 1)
605 		p->ba_window_size++;
606 
607 	reo_update_rx_reo_queue->ba_window_size = p->ba_window_size - 1;
608 	reo_update_rx_reo_queue->pn_size = p->pn_size;
609 	reo_update_rx_reo_queue->svld = p->svld;
610 	reo_update_rx_reo_queue->ssn = p->ssn;
611 	reo_update_rx_reo_queue->seq_2k_error_detected_flag =
612 							p->seq_2k_err_detect;
613 	reo_update_rx_reo_queue->pn_error_detected_flag = p->pn_err_detect;
614 	reo_update_rx_reo_queue->pn_31_0 = p->pn_31_0;
615 	reo_update_rx_reo_queue->pn_63_32 = p->pn_63_32;
616 	reo_update_rx_reo_queue->pn_95_64 = p->pn_95_64;
617 	reo_update_rx_reo_queue->pn_127_96 = p->pn_127_96;
618 
619 	hal_srng_access_end_v1(hal_soc_hdl, hal_ring_hdl,
620 			       HIF_RTPM_ID_HAL_REO_CMD);
621 
622 	val = reo_desc[CMD_HEADER_DW_OFFSET];
623 	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
624 				     val);
625 }
626 
hal_reo_send_cmd_be(hal_soc_handle_t hal_soc_hdl,hal_ring_handle_t hal_ring_hdl,enum hal_reo_cmd_type cmd,void * params)627 int hal_reo_send_cmd_be(hal_soc_handle_t hal_soc_hdl,
628 			hal_ring_handle_t  hal_ring_hdl,
629 			enum hal_reo_cmd_type cmd,
630 			void *params)
631 {
632 	struct hal_reo_cmd_params *cmd_params =
633 			(struct hal_reo_cmd_params *)params;
634 	int num = 0;
635 
636 	switch (cmd) {
637 	case CMD_GET_QUEUE_STATS:
638 		num = hal_reo_cmd_queue_stats_be(hal_ring_hdl,
639 						 hal_soc_hdl, cmd_params);
640 		break;
641 	case CMD_FLUSH_QUEUE:
642 		num = hal_reo_cmd_flush_queue_be(hal_ring_hdl,
643 						 hal_soc_hdl, cmd_params);
644 		break;
645 	case CMD_FLUSH_CACHE:
646 		num = hal_reo_cmd_flush_cache_be(hal_ring_hdl,
647 						 hal_soc_hdl, cmd_params);
648 		break;
649 	case CMD_UNBLOCK_CACHE:
650 		num = hal_reo_cmd_unblock_cache_be(hal_ring_hdl,
651 						   hal_soc_hdl, cmd_params);
652 		break;
653 	case CMD_FLUSH_TIMEOUT_LIST:
654 		num = hal_reo_cmd_flush_timeout_list_be(hal_ring_hdl,
655 							hal_soc_hdl,
656 							cmd_params);
657 		break;
658 	case CMD_UPDATE_RX_REO_QUEUE:
659 		num = hal_reo_cmd_update_rx_queue_be(hal_ring_hdl,
660 						     hal_soc_hdl, cmd_params);
661 		break;
662 	default:
663 		hal_err("Invalid REO command type: %d", cmd);
664 		return -EINVAL;
665 	};
666 
667 	return num;
668 }
669 
670 void
hal_reo_queue_stats_status_be(hal_ring_desc_t ring_desc,void * st_handle,hal_soc_handle_t hal_soc_hdl)671 hal_reo_queue_stats_status_be(hal_ring_desc_t ring_desc,
672 			      void *st_handle,
673 			      hal_soc_handle_t hal_soc_hdl)
674 {
675 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
676 	struct hal_reo_queue_status *st =
677 		(struct hal_reo_queue_status *)st_handle;
678 	uint64_t *reo_desc = (uint64_t *)ring_desc;
679 	uint64_t val;
680 
681 	/*
682 	 * Offsets of descriptor fields defined in HW headers start
683 	 * from the field after TLV header
684 	 */
685 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
686 
687 	/* header */
688 	hal_reo_status_get_header(ring_desc, HAL_REO_QUEUE_STATS_STATUS_TLV,
689 				  &(st->header), hal_soc);
690 
691 	/* SSN */
692 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, SSN)];
693 	st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, SSN, val);
694 
695 	/* current index */
696 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
697 					 CURRENT_INDEX)];
698 	st->curr_idx =
699 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
700 			      CURRENT_INDEX, val);
701 
702 	/* PN bits */
703 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
704 					 PN_31_0)];
705 	st->pn_31_0 =
706 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
707 			      PN_31_0, val);
708 
709 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
710 					 PN_63_32)];
711 	st->pn_63_32 =
712 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
713 			      PN_63_32, val);
714 
715 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
716 					 PN_95_64)];
717 	st->pn_95_64 =
718 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
719 			      PN_95_64, val);
720 
721 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
722 					 PN_127_96)];
723 	st->pn_127_96 =
724 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
725 			      PN_127_96, val);
726 
727 	/* timestamps */
728 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
729 					 LAST_RX_ENQUEUE_TIMESTAMP)];
730 	st->last_rx_enq_tstamp =
731 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
732 			      LAST_RX_ENQUEUE_TIMESTAMP, val);
733 
734 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
735 					 LAST_RX_DEQUEUE_TIMESTAMP)];
736 	st->last_rx_deq_tstamp =
737 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
738 			      LAST_RX_DEQUEUE_TIMESTAMP, val);
739 
740 	/* rx bitmap */
741 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
742 					 RX_BITMAP_31_0)];
743 	st->rx_bitmap_31_0 =
744 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
745 			      RX_BITMAP_31_0, val);
746 
747 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
748 					 RX_BITMAP_63_32)];
749 	st->rx_bitmap_63_32 =
750 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
751 			      RX_BITMAP_63_32, val);
752 
753 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
754 					 RX_BITMAP_95_64)];
755 	st->rx_bitmap_95_64 =
756 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
757 			      RX_BITMAP_95_64, val);
758 
759 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
760 					 RX_BITMAP_127_96)];
761 	st->rx_bitmap_127_96 =
762 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
763 			      RX_BITMAP_127_96, val);
764 
765 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
766 					 RX_BITMAP_159_128)];
767 	st->rx_bitmap_159_128 =
768 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
769 			      RX_BITMAP_159_128, val);
770 
771 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
772 					 RX_BITMAP_191_160)];
773 	st->rx_bitmap_191_160 =
774 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
775 			      RX_BITMAP_191_160, val);
776 
777 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
778 					 RX_BITMAP_223_192)];
779 	st->rx_bitmap_223_192 =
780 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
781 			      RX_BITMAP_223_192, val);
782 
783 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
784 					 RX_BITMAP_255_224)];
785 	st->rx_bitmap_255_224 =
786 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
787 			      RX_BITMAP_255_224, val);
788 
789 	/* various counts */
790 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
791 					 CURRENT_MPDU_COUNT)];
792 	st->curr_mpdu_cnt =
793 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
794 			      CURRENT_MPDU_COUNT, val);
795 
796 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
797 					 CURRENT_MSDU_COUNT)];
798 	st->curr_msdu_cnt =
799 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
800 			      CURRENT_MSDU_COUNT, val);
801 
802 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
803 					 TIMEOUT_COUNT)];
804 	st->fwd_timeout_cnt =
805 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
806 			      TIMEOUT_COUNT, val);
807 
808 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
809 					 FORWARD_DUE_TO_BAR_COUNT)];
810 	st->fwd_bar_cnt =
811 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
812 			      FORWARD_DUE_TO_BAR_COUNT, val);
813 
814 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
815 					 DUPLICATE_COUNT)];
816 	st->dup_cnt =
817 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
818 			      DUPLICATE_COUNT, val);
819 
820 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
821 					 FRAMES_IN_ORDER_COUNT)];
822 	st->frms_in_order_cnt =
823 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
824 			      FRAMES_IN_ORDER_COUNT, val);
825 
826 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
827 					 BAR_RECEIVED_COUNT)];
828 	st->bar_rcvd_cnt =
829 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
830 			      BAR_RECEIVED_COUNT, val);
831 
832 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
833 					 MPDU_FRAMES_PROCESSED_COUNT)];
834 	st->mpdu_frms_cnt =
835 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
836 			      MPDU_FRAMES_PROCESSED_COUNT, val);
837 
838 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
839 					 MSDU_FRAMES_PROCESSED_COUNT)];
840 	st->msdu_frms_cnt =
841 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
842 			      MSDU_FRAMES_PROCESSED_COUNT, val);
843 
844 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
845 					 TOTAL_PROCESSED_BYTE_COUNT)];
846 	st->total_cnt =
847 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
848 			      TOTAL_PROCESSED_BYTE_COUNT, val);
849 
850 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
851 					 LATE_RECEIVE_MPDU_COUNT)];
852 	st->late_recv_mpdu_cnt =
853 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
854 			      LATE_RECEIVE_MPDU_COUNT, val);
855 
856 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
857 					 WINDOW_JUMP_2K)];
858 	st->win_jump_2k =
859 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
860 			      WINDOW_JUMP_2K, val);
861 
862 	val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
863 					 HOLE_COUNT)];
864 	st->hole_cnt =
865 		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
866 			      HOLE_COUNT, val);
867 }
868 
869 void
hal_reo_flush_queue_status_be(hal_ring_desc_t ring_desc,void * st_handle,hal_soc_handle_t hal_soc_hdl)870 hal_reo_flush_queue_status_be(hal_ring_desc_t ring_desc,
871 			      void *st_handle,
872 			      hal_soc_handle_t hal_soc_hdl)
873 {
874 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
875 	struct hal_reo_flush_queue_status *st =
876 			(struct hal_reo_flush_queue_status *)st_handle;
877 	uint64_t *reo_desc = (uint64_t *)ring_desc;
878 	uint64_t val;
879 
880 	/*
881 	 * Offsets of descriptor fields defined in HW headers start
882 	 * from the field after TLV header
883 	 */
884 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
885 
886 	/* header */
887 	hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV,
888 				  &(st->header), hal_soc);
889 
890 	/* error bit */
891 	val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS,
892 					 ERROR_DETECTED)];
893 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS, ERROR_DETECTED,
894 				  val);
895 }
896 
897 void
hal_reo_flush_cache_status_be(hal_ring_desc_t ring_desc,void * st_handle,hal_soc_handle_t hal_soc_hdl)898 hal_reo_flush_cache_status_be(hal_ring_desc_t ring_desc,
899 			      void *st_handle,
900 			      hal_soc_handle_t hal_soc_hdl)
901 {
902 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
903 	struct hal_reo_flush_cache_status *st =
904 			(struct hal_reo_flush_cache_status *)st_handle;
905 	uint64_t *reo_desc = (uint64_t *)ring_desc;
906 	uint64_t val;
907 
908 	/*
909 	 * Offsets of descriptor fields defined in HW headers start
910 	 * from the field after TLV header
911 	 */
912 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
913 
914 	/* header */
915 	hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV,
916 				  &(st->header), hal_soc);
917 
918 	/* error bit */
919 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
920 					 ERROR_DETECTED)];
921 	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS, ERROR_DETECTED,
922 				  val);
923 
924 	/* block error */
925 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
926 					 BLOCK_ERROR_DETAILS)];
927 	st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
928 					BLOCK_ERROR_DETAILS,
929 					val);
930 	if (!st->block_error)
931 		qdf_set_bit(hal_soc->index,
932 			    (unsigned long *)&hal_soc->reo_res_bitmap);
933 
934 	/* cache flush status */
935 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
936 					 CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
937 	st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
938 					CACHE_CONTROLLER_FLUSH_STATUS_HIT,
939 					val);
940 
941 	/* cache flush descriptor type */
942 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
943 				  CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
944 	st->cache_flush_status_desc_type =
945 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
946 			      CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
947 			      val);
948 
949 	/* cache flush count */
950 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
951 				  CACHE_CONTROLLER_FLUSH_COUNT)];
952 	st->cache_flush_cnt =
953 		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
954 			      CACHE_CONTROLLER_FLUSH_COUNT,
955 			      val);
956 }
957 
958 void
hal_reo_unblock_cache_status_be(hal_ring_desc_t ring_desc,hal_soc_handle_t hal_soc_hdl,void * st_handle)959 hal_reo_unblock_cache_status_be(hal_ring_desc_t ring_desc,
960 				hal_soc_handle_t hal_soc_hdl,
961 				void *st_handle)
962 {
963 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
964 	struct hal_reo_unblk_cache_status *st =
965 			(struct hal_reo_unblk_cache_status *)st_handle;
966 	uint64_t *reo_desc = (uint64_t *)ring_desc;
967 	uint64_t val;
968 
969 	/*
970 	 * Offsets of descriptor fields defined in HW headers start
971 	 * from the field after TLV header
972 	 */
973 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
974 
975 	/* header */
976 	hal_reo_status_get_header(ring_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV,
977 				  &st->header, hal_soc);
978 
979 	/* error bit */
980 	val = reo_desc[HAL_OFFSET_QW(REO_UNBLOCK_CACHE_STATUS,
981 				  ERROR_DETECTED)];
982 	st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS,
983 				  ERROR_DETECTED,
984 				  val);
985 
986 	/* unblock type */
987 	val = reo_desc[HAL_OFFSET_QW(REO_UNBLOCK_CACHE_STATUS,
988 				  UNBLOCK_TYPE)];
989 	st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS,
990 					 UNBLOCK_TYPE,
991 					 val);
992 
993 	if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
994 		qdf_clear_bit(hal_soc->index,
995 			      (unsigned long *)&hal_soc->reo_res_bitmap);
996 }
997 
hal_reo_flush_timeout_list_status_be(hal_ring_desc_t ring_desc,void * st_handle,hal_soc_handle_t hal_soc_hdl)998 void hal_reo_flush_timeout_list_status_be(hal_ring_desc_t ring_desc,
999 					  void *st_handle,
1000 					  hal_soc_handle_t hal_soc_hdl)
1001 {
1002 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1003 	struct hal_reo_flush_timeout_list_status *st =
1004 			(struct hal_reo_flush_timeout_list_status *)st_handle;
1005 	uint64_t *reo_desc = (uint64_t *)ring_desc;
1006 	uint64_t val;
1007 
1008 	/*
1009 	 * Offsets of descriptor fields defined in HW headers start
1010 	 * from the field after TLV header
1011 	 */
1012 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
1013 
1014 	/* header */
1015 	hal_reo_status_get_header(ring_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV,
1016 				  &(st->header), hal_soc);
1017 
1018 	/* error bit */
1019 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
1020 					 ERROR_DETECTED)];
1021 	st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
1022 				  ERROR_DETECTED,
1023 				  val);
1024 
1025 	/* list empty */
1026 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
1027 					 TIMOUT_LIST_EMPTY)];
1028 	st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
1029 				       TIMOUT_LIST_EMPTY,
1030 				       val);
1031 
1032 	/* release descriptor count */
1033 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
1034 					 RELEASE_DESC_COUNT)];
1035 	st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
1036 					 RELEASE_DESC_COUNT,
1037 					 val);
1038 
1039 	/* forward buf count */
1040 	val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
1041 					 FORWARD_BUF_COUNT)];
1042 	st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
1043 					FORWARD_BUF_COUNT,
1044 					val);
1045 }
1046 
hal_reo_desc_thres_reached_status_be(hal_ring_desc_t ring_desc,void * st_handle,hal_soc_handle_t hal_soc_hdl)1047 void hal_reo_desc_thres_reached_status_be(hal_ring_desc_t ring_desc,
1048 					  void *st_handle,
1049 					  hal_soc_handle_t hal_soc_hdl)
1050 {
1051 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1052 	struct hal_reo_desc_thres_reached_status *st =
1053 			(struct hal_reo_desc_thres_reached_status *)st_handle;
1054 	uint64_t *reo_desc = (uint64_t *)ring_desc;
1055 	uint64_t val;
1056 
1057 	/*
1058 	 * Offsets of descriptor fields defined in HW headers start
1059 	 * from the field after TLV header
1060 	 */
1061 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
1062 
1063 	/* header */
1064 	hal_reo_status_get_header(ring_desc,
1065 				  HAL_REO_DESC_THRES_STATUS_TLV,
1066 				  &(st->header), hal_soc);
1067 
1068 	/* threshold index */
1069 	val = reo_desc[HAL_OFFSET_QW(
1070 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1071 				 THRESHOLD_INDEX)];
1072 	st->thres_index = HAL_GET_FIELD(
1073 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1074 				THRESHOLD_INDEX,
1075 				val);
1076 
1077 	/* link desc counters */
1078 	val = reo_desc[HAL_OFFSET_QW(
1079 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1080 				 LINK_DESCRIPTOR_COUNTER0)];
1081 	st->link_desc_counter0 = HAL_GET_FIELD(
1082 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1083 				LINK_DESCRIPTOR_COUNTER0,
1084 				val);
1085 
1086 	val = reo_desc[HAL_OFFSET_QW(
1087 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1088 				 LINK_DESCRIPTOR_COUNTER1)];
1089 	st->link_desc_counter1 = HAL_GET_FIELD(
1090 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1091 				LINK_DESCRIPTOR_COUNTER1,
1092 				val);
1093 
1094 	val = reo_desc[HAL_OFFSET_QW(
1095 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1096 				 LINK_DESCRIPTOR_COUNTER2)];
1097 	st->link_desc_counter2 = HAL_GET_FIELD(
1098 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1099 				LINK_DESCRIPTOR_COUNTER2,
1100 				val);
1101 
1102 	val = reo_desc[HAL_OFFSET_QW(
1103 				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1104 				 LINK_DESCRIPTOR_COUNTER_SUM)];
1105 	st->link_desc_counter_sum = HAL_GET_FIELD(
1106 				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
1107 				LINK_DESCRIPTOR_COUNTER_SUM,
1108 				val);
1109 }
1110 
1111 void
hal_reo_rx_update_queue_status_be(hal_ring_desc_t ring_desc,void * st_handle,hal_soc_handle_t hal_soc_hdl)1112 hal_reo_rx_update_queue_status_be(hal_ring_desc_t ring_desc,
1113 				  void *st_handle,
1114 				  hal_soc_handle_t hal_soc_hdl)
1115 {
1116 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1117 	struct hal_reo_update_rx_queue_status *st =
1118 			(struct hal_reo_update_rx_queue_status *)st_handle;
1119 	uint64_t *reo_desc = (uint64_t *)ring_desc;
1120 
1121 	/*
1122 	 * Offsets of descriptor fields defined in HW headers start
1123 	 * from the field after TLV header
1124 	 */
1125 	reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
1126 
1127 	/* header */
1128 	hal_reo_status_get_header(ring_desc,
1129 				  HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV,
1130 				  &(st->header), hal_soc);
1131 }
1132 
hal_get_tlv_hdr_size_be(void)1133 uint8_t hal_get_tlv_hdr_size_be(void)
1134 {
1135 	return sizeof(struct tlv_32_hdr);
1136 }
1137