xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service_srng.c (revision 503663c6daafffe652fa360bde17243568cd6d2a)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 #include "hif_io32.h"
19 #include "reg_struct.h"
20 #include "ce_api.h"
21 #include "ce_main.h"
22 #include "ce_internal.h"
23 #include "ce_reg.h"
24 #include "qdf_lock.h"
25 #include "regtable.h"
26 #include "hif_main.h"
27 #include "hif_debug.h"
28 #include "hal_api.h"
29 #include "pld_common.h"
30 #include "qdf_module.h"
31 #include "hif.h"
32 
33 /*
34  * Support for Copy Engine hardware, which is mainly used for
35  * communication between Host and Target over a PCIe interconnect.
36  */
37 
38 /*
39  * A single CopyEngine (CE) comprises two "rings":
40  *   a source ring
41  *   a destination ring
42  *
43  * Each ring consists of a number of descriptors which specify
44  * an address, length, and meta-data.
45  *
46  * Typically, one side of the PCIe interconnect (Host or Target)
47  * controls one ring and the other side controls the other ring.
48  * The source side chooses when to initiate a transfer and it
49  * chooses what to send (buffer address, length). The destination
50  * side keeps a supply of "anonymous receive buffers" available and
51  * it handles incoming data as it arrives (when the destination
52  * receives an interrupt).
53  *
54  * The sender may send a simple buffer (address/length) or it may
55  * send a small list of buffers.  When a small list is sent, hardware
56  * "gathers" these and they end up in a single destination buffer
57  * with a single interrupt.
58  *
59  * There are several "contexts" managed by this layer -- more, it
60  * may seem -- than should be needed. These are provided mainly for
61  * maximum flexibility and especially to facilitate a simpler HIF
62  * implementation. There are per-CopyEngine recv, send, and watermark
63  * contexts. These are supplied by the caller when a recv, send,
64  * or watermark handler is established and they are echoed back to
65  * the caller when the respective callbacks are invoked. There is
66  * also a per-transfer context supplied by the caller when a buffer
67  * (or sendlist) is sent and when a buffer is enqueued for recv.
68  * These per-transfer contexts are echoed back to the caller when
69  * the buffer is sent/received.
70  * Target TX harsh result toeplitz_hash_result
71  */
72 
73 #define CE_ADDR_COPY(desc, dma_addr) do {\
74 		(desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\
75 							  0xFFFFFFFF);\
76 		(desc)->buffer_addr_hi =\
77 			(uint32_t)(((dma_addr) >> 32) & 0xFF);\
78 	} while (0)
79 
80 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
81 void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
82 				   enum hif_ce_event_type type,
83 				   union ce_srng_desc *descriptor,
84 				   void *memory, int index,
85 				   int len, void *hal_ring)
86 {
87 	int record_index;
88 	struct hif_ce_desc_event *event;
89 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
90 	struct hif_ce_desc_event *hist_ev = NULL;
91 
92 	if (ce_id < CE_COUNT_MAX)
93 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
94 	else
95 		return;
96 
97 	if (ce_id >= CE_COUNT_MAX)
98 		return;
99 
100 	if (!ce_hist->enable[ce_id])
101 		return;
102 
103 	if (!hist_ev)
104 		return;
105 
106 	record_index = get_next_record_index(
107 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
108 
109 	event = &hist_ev[record_index];
110 
111 	hif_clear_ce_desc_debug_data(event);
112 
113 	event->type = type;
114 	event->time = qdf_get_log_timestamp();
115 
116 	if (descriptor)
117 		qdf_mem_copy(&event->descriptor, descriptor,
118 			     hal_get_entrysize_from_srng(hal_ring));
119 
120 	if (hal_ring)
121 		hal_get_sw_hptp(scn->hal_soc, hal_ring, &event->current_tp,
122 				&event->current_hp);
123 
124 	event->memory = memory;
125 	event->index = index;
126 
127 	if (event->type == HIF_CE_SRC_RING_BUFFER_POST)
128 		hif_ce_desc_record_rx_paddr(scn, event, memory);
129 
130 	if (ce_hist->data_enable[ce_id])
131 		hif_ce_desc_data_record(event, len);
132 }
133 #endif /* HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
134 
135 static int
136 ce_send_nolock_srng(struct CE_handle *copyeng,
137 			   void *per_transfer_context,
138 			   qdf_dma_addr_t buffer,
139 			   uint32_t nbytes,
140 			   uint32_t transfer_id,
141 			   uint32_t flags,
142 			   uint32_t user_flags)
143 {
144 	int status;
145 	struct CE_state *CE_state = (struct CE_state *)copyeng;
146 	struct CE_ring_state *src_ring = CE_state->src_ring;
147 	unsigned int nentries_mask = src_ring->nentries_mask;
148 	unsigned int write_index = src_ring->write_index;
149 	uint64_t dma_addr = buffer;
150 	struct hif_softc *scn = CE_state->scn;
151 
152 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
153 		return QDF_STATUS_E_FAILURE;
154 	if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx,
155 					false) <= 0)) {
156 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
157 		Q_TARGET_ACCESS_END(scn);
158 		return QDF_STATUS_E_FAILURE;
159 	}
160 	{
161 		enum hif_ce_event_type event_type = HIF_CE_SRC_RING_BUFFER_POST;
162 		struct ce_srng_src_desc *src_desc;
163 
164 		if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
165 			Q_TARGET_ACCESS_END(scn);
166 			return QDF_STATUS_E_FAILURE;
167 		}
168 
169 		src_desc = hal_srng_src_get_next_reaped(scn->hal_soc,
170 				src_ring->srng_ctx);
171 		if (!src_desc) {
172 			Q_TARGET_ACCESS_END(scn);
173 			return QDF_STATUS_E_INVAL;
174 		}
175 
176 		/* Update low 32 bits source descriptor address */
177 		src_desc->buffer_addr_lo =
178 			(uint32_t)(dma_addr & 0xFFFFFFFF);
179 		src_desc->buffer_addr_hi =
180 			(uint32_t)((dma_addr >> 32) & 0xFF);
181 
182 		src_desc->meta_data = transfer_id;
183 
184 		/*
185 		 * Set the swap bit if:
186 		 * typical sends on this CE are swapped (host is big-endian)
187 		 * and this send doesn't disable the swapping
188 		 * (data is not bytestream)
189 		 */
190 		src_desc->byte_swap =
191 			(((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
192 			  != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
193 		src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
194 		src_desc->nbytes = nbytes;
195 
196 		src_ring->per_transfer_context[write_index] =
197 			per_transfer_context;
198 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
199 
200 		hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
201 
202 		/* src_ring->write index hasn't been updated event though
203 		 * the register has allready been written to.
204 		 */
205 		hif_record_ce_srng_desc_event(scn, CE_state->id, event_type,
206 					      (union ce_srng_desc *)src_desc,
207 					      per_transfer_context,
208 					      src_ring->write_index, nbytes,
209 					      src_ring->srng_ctx);
210 
211 		src_ring->write_index = write_index;
212 		status = QDF_STATUS_SUCCESS;
213 	}
214 	Q_TARGET_ACCESS_END(scn);
215 	return status;
216 }
217 
218 static int
219 ce_sendlist_send_srng(struct CE_handle *copyeng,
220 		 void *per_transfer_context,
221 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
222 {
223 	int status = -ENOMEM;
224 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
225 	struct CE_state *CE_state = (struct CE_state *)copyeng;
226 	struct CE_ring_state *src_ring = CE_state->src_ring;
227 	unsigned int num_items = sl->num_items;
228 	unsigned int sw_index;
229 	unsigned int write_index;
230 	struct hif_softc *scn = CE_state->scn;
231 
232 	QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
233 
234 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
235 	sw_index = src_ring->sw_index;
236 	write_index = src_ring->write_index;
237 
238 	if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >=
239 	    num_items) {
240 		struct ce_sendlist_item *item;
241 		int i;
242 
243 		/* handle all but the last item uniformly */
244 		for (i = 0; i < num_items - 1; i++) {
245 			item = &sl->item[i];
246 			/* TBDXXX: Support extensible sendlist_types? */
247 			QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
248 			status = ce_send_nolock_srng(copyeng,
249 					CE_SENDLIST_ITEM_CTXT,
250 				(qdf_dma_addr_t) item->data,
251 				item->u.nbytes, transfer_id,
252 				item->flags | CE_SEND_FLAG_GATHER,
253 				item->user_flags);
254 			QDF_ASSERT(status == QDF_STATUS_SUCCESS);
255 		}
256 		/* provide valid context pointer for final item */
257 		item = &sl->item[i];
258 		/* TBDXXX: Support extensible sendlist_types? */
259 		QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
260 		status = ce_send_nolock_srng(copyeng, per_transfer_context,
261 					(qdf_dma_addr_t) item->data,
262 					item->u.nbytes,
263 					transfer_id, item->flags,
264 					item->user_flags);
265 		QDF_ASSERT(status == QDF_STATUS_SUCCESS);
266 		QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
267 					QDF_NBUF_TX_PKT_CE);
268 		DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
269 			QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
270 			QDF_TRACE_DEFAULT_PDEV_ID,
271 			(uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
272 			sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX));
273 	} else {
274 		/*
275 		 * Probably not worth the additional complexity to support
276 		 * partial sends with continuation or notification.  We expect
277 		 * to use large rings and small sendlists. If we can't handle
278 		 * the entire request at once, punt it back to the caller.
279 		 */
280 	}
281 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
282 
283 	return status;
284 }
285 
286 #define SLOTS_PER_DATAPATH_TX 2
287 
288 #ifndef AH_NEED_TX_DATA_SWAP
289 #define AH_NEED_TX_DATA_SWAP 0
290 #endif
291 /**
292  * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine
293  * @coyeng: copy engine handle
294  * @per_recv_context: virtual address of the nbuf
295  * @buffer: physical address of the nbuf
296  *
297  * Return: 0 if the buffer is enqueued
298  */
299 static int
300 ce_recv_buf_enqueue_srng(struct CE_handle *copyeng,
301 		    void *per_recv_context, qdf_dma_addr_t buffer)
302 {
303 	int status;
304 	struct CE_state *CE_state = (struct CE_state *)copyeng;
305 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
306 	unsigned int nentries_mask = dest_ring->nentries_mask;
307 	unsigned int write_index;
308 	unsigned int sw_index;
309 	uint64_t dma_addr = buffer;
310 	struct hif_softc *scn = CE_state->scn;
311 	struct ce_srng_dest_desc *dest_desc = NULL;
312 
313 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
314 	write_index = dest_ring->write_index;
315 	sw_index = dest_ring->sw_index;
316 
317 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
318 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
319 		return -EIO;
320 	}
321 
322 	if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) {
323 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
324 		return QDF_STATUS_E_FAILURE;
325 	}
326 
327 	if ((hal_srng_src_num_avail(scn->hal_soc,
328 					dest_ring->srng_ctx, false) > 0)) {
329 		dest_desc = hal_srng_src_get_next(scn->hal_soc,
330 						  dest_ring->srng_ctx);
331 
332 		if (!dest_desc) {
333 			status = QDF_STATUS_E_FAILURE;
334 		} else {
335 
336 			CE_ADDR_COPY(dest_desc, dma_addr);
337 
338 			dest_ring->per_transfer_context[write_index] =
339 				per_recv_context;
340 
341 			/* Update Destination Ring Write Index */
342 			write_index = CE_RING_IDX_INCR(nentries_mask,
343 								write_index);
344 			status = QDF_STATUS_SUCCESS;
345 		}
346 	} else {
347 		dest_desc = NULL;
348 		status = QDF_STATUS_E_FAILURE;
349 	}
350 
351 	dest_ring->write_index = write_index;
352 	hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx);
353 	hif_record_ce_srng_desc_event(scn, CE_state->id,
354 				      HIF_CE_DEST_RING_BUFFER_POST,
355 				      (union ce_srng_desc *)dest_desc,
356 				      per_recv_context,
357 				      dest_ring->write_index, 0,
358 				      dest_ring->srng_ctx);
359 
360 	Q_TARGET_ACCESS_END(scn);
361 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
362 	return status;
363 }
364 
365 /*
366  * Guts of ce_recv_entries_done.
367  * The caller takes responsibility for any necessary locking.
368  */
369 static unsigned int
370 ce_recv_entries_done_nolock_srng(struct hif_softc *scn,
371 			    struct CE_state *CE_state)
372 {
373 	struct CE_ring_state *status_ring = CE_state->status_ring;
374 
375 	return hal_srng_dst_num_valid(scn->hal_soc,
376 				status_ring->srng_ctx, false);
377 }
378 
379 /*
380  * Guts of ce_send_entries_done.
381  * The caller takes responsibility for any necessary locking.
382  */
383 static unsigned int
384 ce_send_entries_done_nolock_srng(struct hif_softc *scn,
385 					struct CE_state *CE_state)
386 {
387 
388 	struct CE_ring_state *src_ring = CE_state->src_ring;
389 	int count = 0;
390 
391 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx))
392 		return 0;
393 
394 	count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx);
395 
396 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
397 
398 	return count;
399 }
400 
401 /*
402  * Guts of ce_completed_recv_next.
403  * The caller takes responsibility for any necessary locking.
404  */
405 static int
406 ce_completed_recv_next_nolock_srng(struct CE_state *CE_state,
407 			      void **per_CE_contextp,
408 			      void **per_transfer_contextp,
409 			      qdf_dma_addr_t *bufferp,
410 			      unsigned int *nbytesp,
411 			      unsigned int *transfer_idp,
412 			      unsigned int *flagsp)
413 {
414 	int status;
415 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
416 	struct CE_ring_state *status_ring = CE_state->status_ring;
417 	unsigned int nentries_mask = dest_ring->nentries_mask;
418 	unsigned int sw_index = dest_ring->sw_index;
419 	struct hif_softc *scn = CE_state->scn;
420 	struct ce_srng_dest_status_desc *dest_status;
421 	int nbytes;
422 	struct ce_srng_dest_status_desc dest_status_info;
423 
424 	if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx)) {
425 		status = QDF_STATUS_E_FAILURE;
426 		goto done;
427 	}
428 
429 	dest_status = hal_srng_dst_get_next(scn->hal_soc,
430 						status_ring->srng_ctx);
431 
432 	if (!dest_status) {
433 		status = QDF_STATUS_E_FAILURE;
434 		goto done;
435 	}
436 	/*
437 	 * By copying the dest_desc_info element to local memory, we could
438 	 * avoid extra memory read from non-cachable memory.
439 	 */
440 	dest_status_info = *dest_status;
441 	nbytes = dest_status_info.nbytes;
442 	if (nbytes == 0) {
443 		/*
444 		 * This closes a relatively unusual race where the Host
445 		 * sees the updated DRRI before the update to the
446 		 * corresponding descriptor has completed. We treat this
447 		 * as a descriptor that is not yet done.
448 		 */
449 		status = QDF_STATUS_E_FAILURE;
450 		goto done;
451 	}
452 
453 	dest_status->nbytes = 0;
454 
455 	*nbytesp = nbytes;
456 	*transfer_idp = dest_status_info.meta_data;
457 	*flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
458 
459 	if (per_CE_contextp)
460 		*per_CE_contextp = CE_state->recv_context;
461 
462 	/* NOTE: sw_index is more like a read_index in this context. It has a
463 	 * one-to-one mapping with status ring.
464 	 * Get the per trasnfer context from dest_ring.
465 	 */
466 	if (per_transfer_contextp)
467 		*per_transfer_contextp =
468 			dest_ring->per_transfer_context[sw_index];
469 
470 	dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
471 
472 	/* Update sw_index */
473 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
474 	dest_ring->sw_index = sw_index;
475 	status = QDF_STATUS_SUCCESS;
476 
477 done:
478 	hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx);
479 
480 	if (status == QDF_STATUS_SUCCESS) {
481 		hif_record_ce_srng_desc_event(scn, CE_state->id,
482 					      HIF_CE_DEST_RING_BUFFER_REAP,
483 					      NULL,
484 					      dest_ring->
485 					      per_transfer_context[sw_index],
486 					      dest_ring->sw_index, nbytes,
487 					      dest_ring->srng_ctx);
488 
489 		hif_record_ce_srng_desc_event(scn, CE_state->id,
490 					      HIF_CE_DEST_STATUS_RING_REAP,
491 					      (union ce_srng_desc *)dest_status,
492 					      NULL,
493 					      -1, 0,
494 					      status_ring->srng_ctx);
495 	}
496 
497 	return status;
498 }
499 
500 static QDF_STATUS
501 ce_revoke_recv_next_srng(struct CE_handle *copyeng,
502 		    void **per_CE_contextp,
503 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
504 {
505 	struct CE_state *CE_state = (struct CE_state *)copyeng;
506 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
507 	unsigned int sw_index;
508 
509 	if (!dest_ring)
510 		return QDF_STATUS_E_FAILURE;
511 
512 	sw_index = dest_ring->sw_index;
513 
514 	if (per_CE_contextp)
515 		*per_CE_contextp = CE_state->recv_context;
516 
517 	/* NOTE: sw_index is more like a read_index in this context. It has a
518 	 * one-to-one mapping with status ring.
519 	 * Get the per trasnfer context from dest_ring.
520 	 */
521 	if (per_transfer_contextp)
522 		*per_transfer_contextp =
523 			dest_ring->per_transfer_context[sw_index];
524 
525 	if (!dest_ring->per_transfer_context[sw_index])
526 		return QDF_STATUS_E_FAILURE;
527 
528 	/* provide end condition */
529 	dest_ring->per_transfer_context[sw_index] = NULL;
530 
531 	/* Update sw_index */
532 	sw_index = CE_RING_IDX_INCR(dest_ring->nentries_mask, sw_index);
533 	dest_ring->sw_index = sw_index;
534 	return QDF_STATUS_SUCCESS;
535 }
536 
537 /*
538  * Guts of ce_completed_send_next.
539  * The caller takes responsibility for any necessary locking.
540  */
541 static int
542 ce_completed_send_next_nolock_srng(struct CE_state *CE_state,
543 			      void **per_CE_contextp,
544 			      void **per_transfer_contextp,
545 			      qdf_dma_addr_t *bufferp,
546 			      unsigned int *nbytesp,
547 			      unsigned int *transfer_idp,
548 			      unsigned int *sw_idx,
549 			      unsigned int *hw_idx,
550 			      uint32_t *toeplitz_hash_result)
551 {
552 	int status = QDF_STATUS_E_FAILURE;
553 	struct CE_ring_state *src_ring = CE_state->src_ring;
554 	unsigned int nentries_mask = src_ring->nentries_mask;
555 	unsigned int sw_index = src_ring->sw_index;
556 	unsigned int swi = src_ring->sw_index;
557 	struct hif_softc *scn = CE_state->scn;
558 	struct ce_srng_src_desc *src_desc;
559 
560 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
561 		status = QDF_STATUS_E_FAILURE;
562 		return status;
563 	}
564 
565 	src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx);
566 	if (src_desc) {
567 		hif_record_ce_srng_desc_event(scn, CE_state->id,
568 					      HIF_TX_DESC_COMPLETION,
569 					      (union ce_srng_desc *)src_desc,
570 					      src_ring->
571 					      per_transfer_context[swi],
572 					      swi, src_desc->nbytes,
573 					      src_ring->srng_ctx);
574 
575 		/* Return data from completed source descriptor */
576 		*bufferp = (qdf_dma_addr_t)
577 			(((uint64_t)(src_desc)->buffer_addr_lo +
578 			  ((uint64_t)((src_desc)->buffer_addr_hi &
579 				  0xFF) << 32)));
580 		*nbytesp = src_desc->nbytes;
581 		*transfer_idp = src_desc->meta_data;
582 		*toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
583 
584 		if (per_CE_contextp)
585 			*per_CE_contextp = CE_state->send_context;
586 
587 		/* sw_index is used more like read index */
588 		if (per_transfer_contextp)
589 			*per_transfer_contextp =
590 				src_ring->per_transfer_context[sw_index];
591 
592 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
593 
594 		/* Update sw_index */
595 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
596 		src_ring->sw_index = sw_index;
597 		status = QDF_STATUS_SUCCESS;
598 	}
599 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
600 
601 	return status;
602 }
603 
604 /* NB: Modelled after ce_completed_send_next */
605 static QDF_STATUS
606 ce_cancel_send_next_srng(struct CE_handle *copyeng,
607 		void **per_CE_contextp,
608 		void **per_transfer_contextp,
609 		qdf_dma_addr_t *bufferp,
610 		unsigned int *nbytesp,
611 		unsigned int *transfer_idp,
612 		uint32_t *toeplitz_hash_result)
613 {
614 	struct CE_state *CE_state;
615 	int status = QDF_STATUS_E_FAILURE;
616 	struct CE_ring_state *src_ring;
617 	unsigned int nentries_mask;
618 	unsigned int sw_index;
619 	struct hif_softc *scn;
620 	struct ce_srng_src_desc *src_desc;
621 
622 	CE_state = (struct CE_state *)copyeng;
623 	src_ring = CE_state->src_ring;
624 	if (!src_ring)
625 		return QDF_STATUS_E_FAILURE;
626 
627 	nentries_mask = src_ring->nentries_mask;
628 	sw_index = src_ring->sw_index;
629 	scn = CE_state->scn;
630 
631 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
632 		status = QDF_STATUS_E_FAILURE;
633 		return status;
634 	}
635 
636 	src_desc = hal_srng_src_pending_reap_next(scn->hal_soc,
637 			src_ring->srng_ctx);
638 	if (src_desc) {
639 		/* Return data from completed source descriptor */
640 		*bufferp = (qdf_dma_addr_t)
641 			(((uint64_t)(src_desc)->buffer_addr_lo +
642 			  ((uint64_t)((src_desc)->buffer_addr_hi &
643 				  0xFF) << 32)));
644 		*nbytesp = src_desc->nbytes;
645 		*transfer_idp = src_desc->meta_data;
646 		*toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
647 
648 		if (per_CE_contextp)
649 			*per_CE_contextp = CE_state->send_context;
650 
651 		/* sw_index is used more like read index */
652 		if (per_transfer_contextp)
653 			*per_transfer_contextp =
654 				src_ring->per_transfer_context[sw_index];
655 
656 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
657 
658 		/* Update sw_index */
659 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
660 		src_ring->sw_index = sw_index;
661 		status = QDF_STATUS_SUCCESS;
662 	}
663 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
664 
665 	return status;
666 }
667 
668 /*
669  * Adjust interrupts for the copy complete handler.
670  * If it's needed for either send or recv, then unmask
671  * this interrupt; otherwise, mask it.
672  *
673  * Called with target_lock held.
674  */
675 static void
676 ce_per_engine_handler_adjust_srng(struct CE_state *CE_state,
677 			     int disable_copy_compl_intr)
678 {
679 }
680 
681 static bool ce_check_int_watermark_srng(struct CE_state *CE_state,
682 					unsigned int *flags)
683 {
684 	/*TODO*/
685 	return false;
686 }
687 
688 static uint32_t ce_get_desc_size_srng(uint8_t ring_type)
689 {
690 	switch (ring_type) {
691 	case CE_RING_SRC:
692 		return sizeof(struct ce_srng_src_desc);
693 	case CE_RING_DEST:
694 		return sizeof(struct ce_srng_dest_desc);
695 	case CE_RING_STATUS:
696 		return sizeof(struct ce_srng_dest_status_desc);
697 	default:
698 		return 0;
699 	}
700 	return 0;
701 }
702 
703 static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id,
704 			      struct hal_srng_params *ring_params)
705 {
706 	uint32_t addr_low;
707 	uint32_t addr_high;
708 	uint32_t msi_data_start;
709 	uint32_t msi_data_count;
710 	uint32_t msi_irq_start;
711 	int ret;
712 
713 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
714 					  &msi_data_count, &msi_data_start,
715 					  &msi_irq_start);
716 
717 	/* msi config not found */
718 	if (ret)
719 		return;
720 
721 	pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high);
722 
723 	ring_params->msi_addr = addr_low;
724 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
725 	ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start;
726 	ring_params->flags |= HAL_SRNG_MSI_INTR;
727 
728 	HIF_DBG("%s: ce_id %d, msi_addr %pK, msi_data %d", __func__, ce_id,
729 		  (void *)ring_params->msi_addr, ring_params->msi_data);
730 }
731 
732 static void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
733 			struct CE_ring_state *src_ring,
734 			struct CE_attr *attr)
735 {
736 	struct hal_srng_params ring_params = {0};
737 
738 	hif_debug("%s: ce_id %d", __func__, ce_id);
739 
740 	ring_params.ring_base_paddr = src_ring->base_addr_CE_space;
741 	ring_params.ring_base_vaddr = src_ring->base_addr_owner_space;
742 	ring_params.num_entries = src_ring->nentries;
743 	/*
744 	 * The minimum increment for the timer is 8us
745 	 * A default value of 0 disables the timer
746 	 * A valid default value caused continuous interrupts to
747 	 * fire with MSI enabled. Need to revisit usage of the timer
748 	 */
749 
750 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
751 		ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
752 
753 		ring_params.intr_timer_thres_us = 0;
754 		ring_params.intr_batch_cntr_thres_entries = 1;
755 	}
756 
757 	src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0,
758 			&ring_params);
759 }
760 
761 /**
762  * ce_srng_initialize_dest_timer_interrupt_war() - war initialization
763  * @dest_ring: ring being initialized
764  * @ring_params: pointer to initialized parameters
765  *
766  * For Napier & Hawkeye v1, the status ring timer interrupts do not work
767  * As a work arround host configures the destination rings to be a proxy for
768  * work needing to be done.
769  *
770  * The interrupts are setup such that if the destination ring is less than fully
771  * posted, there is likely undone work for the status ring that the host should
772  * process.
773  *
774  * There is a timing bug in srng based copy engines such that a fully posted
775  * srng based copy engine has 2 empty entries instead of just one.  The copy
776  * engine data sturctures work with 1 empty entry, but the software frequently
777  * fails to post the last entry due to the race condition.
778  */
779 static void ce_srng_initialize_dest_timer_interrupt_war(
780 		struct CE_ring_state *dest_ring,
781 		struct hal_srng_params *ring_params) {
782 	int num_buffers_when_fully_posted = dest_ring->nentries - 2;
783 
784 	ring_params->low_threshold = num_buffers_when_fully_posted - 1;
785 	ring_params->intr_timer_thres_us = 1024;
786 	ring_params->intr_batch_cntr_thres_entries = 0;
787 	ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
788 }
789 
790 static void ce_srng_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
791 				struct CE_ring_state *dest_ring,
792 				struct CE_attr *attr)
793 {
794 	struct hal_srng_params ring_params = {0};
795 	bool status_ring_timer_thresh_work_arround = true;
796 
797 	HIF_INFO("%s: ce_id %d", __func__, ce_id);
798 
799 	ring_params.ring_base_paddr = dest_ring->base_addr_CE_space;
800 	ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space;
801 	ring_params.num_entries = dest_ring->nentries;
802 	ring_params.max_buffer_length = attr->src_sz_max;
803 
804 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
805 		ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
806 		if (status_ring_timer_thresh_work_arround) {
807 			ce_srng_initialize_dest_timer_interrupt_war(
808 					dest_ring, &ring_params);
809 		} else {
810 			/* normal behavior for future chips */
811 			ring_params.low_threshold = dest_ring->nentries >> 3;
812 			ring_params.intr_timer_thres_us = 100000;
813 			ring_params.intr_batch_cntr_thres_entries = 0;
814 			ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
815 		}
816 	}
817 
818 	/*Dest ring is also source ring*/
819 	dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0,
820 			&ring_params);
821 }
822 
823 static void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id,
824 				struct CE_ring_state *status_ring,
825 				struct CE_attr *attr)
826 {
827 	struct hal_srng_params ring_params = {0};
828 
829 	HIF_INFO("%s: ce_id %d", __func__, ce_id);
830 
831 	ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
832 
833 	ring_params.ring_base_paddr = status_ring->base_addr_CE_space;
834 	ring_params.ring_base_vaddr = status_ring->base_addr_owner_space;
835 	ring_params.num_entries = status_ring->nentries;
836 
837 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
838 		ring_params.intr_timer_thres_us = 0x1000;
839 		ring_params.intr_batch_cntr_thres_entries = 0x1;
840 	}
841 
842 	status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS,
843 			ce_id, 0, &ring_params);
844 }
845 
846 static int ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type,
847 		uint32_t ce_id, struct CE_ring_state *ring,
848 		struct CE_attr *attr)
849 {
850 	switch (ring_type) {
851 	case CE_RING_SRC:
852 		ce_srng_src_ring_setup(scn, ce_id, ring, attr);
853 		break;
854 	case CE_RING_DEST:
855 		ce_srng_dest_ring_setup(scn, ce_id, ring, attr);
856 		break;
857 	case CE_RING_STATUS:
858 		ce_srng_status_ring_setup(scn, ce_id, ring, attr);
859 		break;
860 	default:
861 		qdf_assert(0);
862 		break;
863 	}
864 
865 	return 0;
866 }
867 
868 static void ce_construct_shadow_config_srng(struct hif_softc *scn)
869 {
870 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
871 	int ce_id;
872 
873 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
874 		if (hif_state->host_ce_config[ce_id].src_nentries)
875 			hal_set_one_shadow_config(scn->hal_soc,
876 						  CE_SRC, ce_id);
877 
878 		if (hif_state->host_ce_config[ce_id].dest_nentries) {
879 			hal_set_one_shadow_config(scn->hal_soc,
880 						  CE_DST, ce_id);
881 
882 			hal_set_one_shadow_config(scn->hal_soc,
883 						  CE_DST_STATUS, ce_id);
884 		}
885 	}
886 }
887 
888 static void ce_prepare_shadow_register_v2_cfg_srng(struct hif_softc *scn,
889 		struct pld_shadow_reg_v2_cfg **shadow_config,
890 		int *num_shadow_registers_configured)
891 {
892 	if (!scn->hal_soc) {
893 		HIF_ERROR("%s: hal not initialized: not initializing shadow config",
894 			  __func__);
895 		return;
896 	}
897 
898 	hal_get_shadow_config(scn->hal_soc, shadow_config,
899 			      num_shadow_registers_configured);
900 
901 	if (*num_shadow_registers_configured != 0) {
902 		HIF_ERROR("%s: hal shadow register configuration allready constructed",
903 			  __func__);
904 
905 		/* return with original configuration*/
906 		return;
907 	}
908 
909 	hal_construct_shadow_config(scn->hal_soc);
910 	ce_construct_shadow_config_srng(scn);
911 
912 	/* get updated configuration */
913 	hal_get_shadow_config(scn->hal_soc, shadow_config,
914 			      num_shadow_registers_configured);
915 }
916 
917 static struct ce_ops ce_service_srng = {
918 	.ce_get_desc_size = ce_get_desc_size_srng,
919 	.ce_ring_setup = ce_ring_setup_srng,
920 	.ce_sendlist_send = ce_sendlist_send_srng,
921 	.ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng,
922 	.ce_revoke_recv_next = ce_revoke_recv_next_srng,
923 	.ce_cancel_send_next = ce_cancel_send_next_srng,
924 	.ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng,
925 	.ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng,
926 	.ce_send_nolock = ce_send_nolock_srng,
927 	.watermark_int = ce_check_int_watermark_srng,
928 	.ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng,
929 	.ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng,
930 	.ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng,
931 	.ce_prepare_shadow_register_v2_cfg =
932 		ce_prepare_shadow_register_v2_cfg_srng,
933 };
934 
935 struct ce_ops *ce_services_srng()
936 {
937 	return &ce_service_srng;
938 }
939 qdf_export_symbol(ce_services_srng);
940 
941 void ce_service_srng_init(void)
942 {
943 	ce_service_register_module(CE_SVC_SRNG, &ce_services_srng);
944 }
945