xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service_srng.c (revision 3149adf58a329e17232a4c0e58d460d025edd55a)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hif.h"
20 #include "hif_io32.h"
21 #include "reg_struct.h"
22 #include "ce_api.h"
23 #include "ce_main.h"
24 #include "ce_internal.h"
25 #include "ce_reg.h"
26 #include "qdf_lock.h"
27 #include "regtable.h"
28 #include "hif_main.h"
29 #include "hif_debug.h"
30 #include "hal_api.h"
31 #include "pld_common.h"
32 #include "qdf_module.h"
33 
34 /*
35  * Support for Copy Engine hardware, which is mainly used for
36  * communication between Host and Target over a PCIe interconnect.
37  */
38 
39 /*
40  * A single CopyEngine (CE) comprises two "rings":
41  *   a source ring
42  *   a destination ring
43  *
44  * Each ring consists of a number of descriptors which specify
45  * an address, length, and meta-data.
46  *
47  * Typically, one side of the PCIe interconnect (Host or Target)
48  * controls one ring and the other side controls the other ring.
49  * The source side chooses when to initiate a transfer and it
50  * chooses what to send (buffer address, length). The destination
51  * side keeps a supply of "anonymous receive buffers" available and
52  * it handles incoming data as it arrives (when the destination
53  * receives an interrupt).
54  *
55  * The sender may send a simple buffer (address/length) or it may
56  * send a small list of buffers.  When a small list is sent, hardware
57  * "gathers" these and they end up in a single destination buffer
58  * with a single interrupt.
59  *
60  * There are several "contexts" managed by this layer -- more, it
61  * may seem -- than should be needed. These are provided mainly for
62  * maximum flexibility and especially to facilitate a simpler HIF
63  * implementation. There are per-CopyEngine recv, send, and watermark
64  * contexts. These are supplied by the caller when a recv, send,
65  * or watermark handler is established and they are echoed back to
66  * the caller when the respective callbacks are invoked. There is
67  * also a per-transfer context supplied by the caller when a buffer
68  * (or sendlist) is sent and when a buffer is enqueued for recv.
69  * These per-transfer contexts are echoed back to the caller when
70  * the buffer is sent/received.
71  * Target TX harsh result toeplitz_hash_result
72  */
73 
74 #define CE_ADDR_COPY(desc, dma_addr) do {\
75 		(desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\
76 							  0xFFFFFFFF);\
77 		(desc)->buffer_addr_hi =\
78 			(uint32_t)(((dma_addr) >> 32) & 0xFF);\
79 	} while (0)
80 
81 static int
82 ce_send_nolock_srng(struct CE_handle *copyeng,
83 			   void *per_transfer_context,
84 			   qdf_dma_addr_t buffer,
85 			   uint32_t nbytes,
86 			   uint32_t transfer_id,
87 			   uint32_t flags,
88 			   uint32_t user_flags)
89 {
90 	int status;
91 	struct CE_state *CE_state = (struct CE_state *)copyeng;
92 	struct CE_ring_state *src_ring = CE_state->src_ring;
93 	unsigned int nentries_mask = src_ring->nentries_mask;
94 	unsigned int write_index = src_ring->write_index;
95 	uint64_t dma_addr = buffer;
96 	struct hif_softc *scn = CE_state->scn;
97 
98 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
99 		return QDF_STATUS_E_FAILURE;
100 	if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx,
101 					false) <= 0)) {
102 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
103 		Q_TARGET_ACCESS_END(scn);
104 		return QDF_STATUS_E_FAILURE;
105 	}
106 	{
107 		enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST;
108 		struct ce_srng_src_desc *src_desc;
109 
110 		if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
111 			Q_TARGET_ACCESS_END(scn);
112 			return QDF_STATUS_E_FAILURE;
113 		}
114 
115 		src_desc = hal_srng_src_get_next_reaped(scn->hal_soc,
116 				src_ring->srng_ctx);
117 
118 		/* Update low 32 bits source descriptor address */
119 		src_desc->buffer_addr_lo =
120 			(uint32_t)(dma_addr & 0xFFFFFFFF);
121 		src_desc->buffer_addr_hi =
122 			(uint32_t)((dma_addr >> 32) & 0xFF);
123 
124 		src_desc->meta_data = transfer_id;
125 
126 		/*
127 		 * Set the swap bit if:
128 		 * typical sends on this CE are swapped (host is big-endian)
129 		 * and this send doesn't disable the swapping
130 		 * (data is not bytestream)
131 		 */
132 		src_desc->byte_swap =
133 			(((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
134 			  != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
135 		src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
136 		src_desc->nbytes = nbytes;
137 
138 		src_ring->per_transfer_context[write_index] =
139 			per_transfer_context;
140 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
141 
142 		hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
143 
144 		/* src_ring->write index hasn't been updated event though
145 		 * the register has allready been written to.
146 		 */
147 		hif_record_ce_desc_event(scn, CE_state->id, event_type,
148 			(union ce_desc *) src_desc, per_transfer_context,
149 			src_ring->write_index, nbytes);
150 
151 		src_ring->write_index = write_index;
152 		status = QDF_STATUS_SUCCESS;
153 	}
154 	Q_TARGET_ACCESS_END(scn);
155 	return status;
156 }
157 
158 static int
159 ce_sendlist_send_srng(struct CE_handle *copyeng,
160 		 void *per_transfer_context,
161 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
162 {
163 	int status = -ENOMEM;
164 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
165 	struct CE_state *CE_state = (struct CE_state *)copyeng;
166 	struct CE_ring_state *src_ring = CE_state->src_ring;
167 	unsigned int num_items = sl->num_items;
168 	unsigned int sw_index;
169 	unsigned int write_index;
170 	struct hif_softc *scn = CE_state->scn;
171 
172 	QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
173 
174 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
175 	sw_index = src_ring->sw_index;
176 	write_index = src_ring->write_index;
177 
178 	if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >=
179 	    num_items) {
180 		struct ce_sendlist_item *item;
181 		int i;
182 
183 		/* handle all but the last item uniformly */
184 		for (i = 0; i < num_items - 1; i++) {
185 			item = &sl->item[i];
186 			/* TBDXXX: Support extensible sendlist_types? */
187 			QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
188 			status = ce_send_nolock_srng(copyeng,
189 					CE_SENDLIST_ITEM_CTXT,
190 				(qdf_dma_addr_t) item->data,
191 				item->u.nbytes, transfer_id,
192 				item->flags | CE_SEND_FLAG_GATHER,
193 				item->user_flags);
194 			QDF_ASSERT(status == QDF_STATUS_SUCCESS);
195 		}
196 		/* provide valid context pointer for final item */
197 		item = &sl->item[i];
198 		/* TBDXXX: Support extensible sendlist_types? */
199 		QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
200 		status = ce_send_nolock_srng(copyeng, per_transfer_context,
201 					(qdf_dma_addr_t) item->data,
202 					item->u.nbytes,
203 					transfer_id, item->flags,
204 					item->user_flags);
205 		QDF_ASSERT(status == QDF_STATUS_SUCCESS);
206 		QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
207 					QDF_NBUF_TX_PKT_CE);
208 		DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
209 			QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
210 			QDF_TRACE_DEFAULT_PDEV_ID,
211 			(uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
212 			sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX));
213 	} else {
214 		/*
215 		 * Probably not worth the additional complexity to support
216 		 * partial sends with continuation or notification.  We expect
217 		 * to use large rings and small sendlists. If we can't handle
218 		 * the entire request at once, punt it back to the caller.
219 		 */
220 	}
221 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
222 
223 	return status;
224 }
225 
226 #define SLOTS_PER_DATAPATH_TX 2
227 
228 #ifndef AH_NEED_TX_DATA_SWAP
229 #define AH_NEED_TX_DATA_SWAP 0
230 #endif
231 /**
232  * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine
233  * @coyeng: copy engine handle
234  * @per_recv_context: virtual address of the nbuf
235  * @buffer: physical address of the nbuf
236  *
237  * Return: 0 if the buffer is enqueued
238  */
239 static int
240 ce_recv_buf_enqueue_srng(struct CE_handle *copyeng,
241 		    void *per_recv_context, qdf_dma_addr_t buffer)
242 {
243 	int status;
244 	struct CE_state *CE_state = (struct CE_state *)copyeng;
245 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
246 	unsigned int nentries_mask = dest_ring->nentries_mask;
247 	unsigned int write_index;
248 	unsigned int sw_index;
249 	uint64_t dma_addr = buffer;
250 	struct hif_softc *scn = CE_state->scn;
251 
252 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
253 	write_index = dest_ring->write_index;
254 	sw_index = dest_ring->sw_index;
255 
256 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
257 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
258 		return -EIO;
259 	}
260 
261 	if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) {
262 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
263 		return QDF_STATUS_E_FAILURE;
264 	}
265 
266 	if ((hal_srng_src_num_avail(scn->hal_soc,
267 					dest_ring->srng_ctx, false) > 0)) {
268 		struct ce_srng_dest_desc *dest_desc =
269 				hal_srng_src_get_next(scn->hal_soc,
270 							dest_ring->srng_ctx);
271 
272 		if (dest_desc == NULL) {
273 			status = QDF_STATUS_E_FAILURE;
274 		} else {
275 
276 			CE_ADDR_COPY(dest_desc, dma_addr);
277 
278 			dest_ring->per_transfer_context[write_index] =
279 				per_recv_context;
280 
281 			/* Update Destination Ring Write Index */
282 			write_index = CE_RING_IDX_INCR(nentries_mask,
283 								write_index);
284 			status = QDF_STATUS_SUCCESS;
285 		}
286 	} else
287 		status = QDF_STATUS_E_FAILURE;
288 
289 	dest_ring->write_index = write_index;
290 	hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx);
291 	Q_TARGET_ACCESS_END(scn);
292 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
293 	return status;
294 }
295 
296 /*
297  * Guts of ce_recv_entries_done.
298  * The caller takes responsibility for any necessary locking.
299  */
300 static unsigned int
301 ce_recv_entries_done_nolock_srng(struct hif_softc *scn,
302 			    struct CE_state *CE_state)
303 {
304 	struct CE_ring_state *status_ring = CE_state->status_ring;
305 
306 	return hal_srng_dst_num_valid(scn->hal_soc,
307 				status_ring->srng_ctx, false);
308 }
309 
310 /*
311  * Guts of ce_send_entries_done.
312  * The caller takes responsibility for any necessary locking.
313  */
314 static unsigned int
315 ce_send_entries_done_nolock_srng(struct hif_softc *scn,
316 					struct CE_state *CE_state)
317 {
318 
319 	struct CE_ring_state *src_ring = CE_state->src_ring;
320 	int count = 0;
321 
322 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx))
323 		return 0;
324 
325 	count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx);
326 
327 	hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
328 
329 	return count;
330 }
331 
332 /*
333  * Guts of ce_completed_recv_next.
334  * The caller takes responsibility for any necessary locking.
335  */
336 static int
337 ce_completed_recv_next_nolock_srng(struct CE_state *CE_state,
338 			      void **per_CE_contextp,
339 			      void **per_transfer_contextp,
340 			      qdf_dma_addr_t *bufferp,
341 			      unsigned int *nbytesp,
342 			      unsigned int *transfer_idp,
343 			      unsigned int *flagsp)
344 {
345 	int status;
346 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
347 	struct CE_ring_state *status_ring = CE_state->status_ring;
348 	unsigned int nentries_mask = dest_ring->nentries_mask;
349 	unsigned int sw_index = dest_ring->sw_index;
350 	struct hif_softc *scn = CE_state->scn;
351 	struct ce_srng_dest_status_desc *dest_status;
352 	int nbytes;
353 	struct ce_srng_dest_status_desc dest_status_info;
354 
355 	if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx)) {
356 		status = QDF_STATUS_E_FAILURE;
357 		goto done;
358 	}
359 
360 	dest_status = hal_srng_dst_get_next(scn->hal_soc,
361 						status_ring->srng_ctx);
362 
363 	if (dest_status == NULL) {
364 		status = QDF_STATUS_E_FAILURE;
365 		goto done;
366 	}
367 	/*
368 	 * By copying the dest_desc_info element to local memory, we could
369 	 * avoid extra memory read from non-cachable memory.
370 	 */
371 	dest_status_info = *dest_status;
372 	nbytes = dest_status_info.nbytes;
373 	if (nbytes == 0) {
374 		/*
375 		 * This closes a relatively unusual race where the Host
376 		 * sees the updated DRRI before the update to the
377 		 * corresponding descriptor has completed. We treat this
378 		 * as a descriptor that is not yet done.
379 		 */
380 		status = QDF_STATUS_E_FAILURE;
381 		goto done;
382 	}
383 
384 	dest_status->nbytes = 0;
385 
386 	*nbytesp = nbytes;
387 	*transfer_idp = dest_status_info.meta_data;
388 	*flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
389 
390 	if (per_CE_contextp)
391 		*per_CE_contextp = CE_state->recv_context;
392 
393 	/* NOTE: sw_index is more like a read_index in this context. It has a
394 	 * one-to-one mapping with status ring.
395 	 * Get the per trasnfer context from dest_ring.
396 	 */
397 	if (per_transfer_contextp)
398 		*per_transfer_contextp =
399 			dest_ring->per_transfer_context[sw_index];
400 
401 	dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
402 
403 	/* Update sw_index */
404 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
405 	dest_ring->sw_index = sw_index;
406 	status = QDF_STATUS_SUCCESS;
407 
408 done:
409 	hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx);
410 
411 	return status;
412 }
413 
414 static QDF_STATUS
415 ce_revoke_recv_next_srng(struct CE_handle *copyeng,
416 		    void **per_CE_contextp,
417 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
418 {
419 	struct CE_state *CE_state = (struct CE_state *)copyeng;
420 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
421 	unsigned int sw_index;
422 
423 	if (!dest_ring)
424 		return QDF_STATUS_E_FAILURE;
425 
426 	sw_index = dest_ring->sw_index;
427 
428 	if (per_CE_contextp)
429 		*per_CE_contextp = CE_state->recv_context;
430 
431 	/* NOTE: sw_index is more like a read_index in this context. It has a
432 	 * one-to-one mapping with status ring.
433 	 * Get the per trasnfer context from dest_ring.
434 	 */
435 	if (per_transfer_contextp)
436 		*per_transfer_contextp =
437 			dest_ring->per_transfer_context[sw_index];
438 
439 	if (dest_ring->per_transfer_context[sw_index] == NULL)
440 		return QDF_STATUS_E_FAILURE;
441 
442 	/* provide end condition */
443 	dest_ring->per_transfer_context[sw_index] = NULL;
444 
445 	/* Update sw_index */
446 	sw_index = CE_RING_IDX_INCR(dest_ring->nentries_mask, sw_index);
447 	dest_ring->sw_index = sw_index;
448 	return QDF_STATUS_SUCCESS;
449 }
450 
451 /*
452  * Guts of ce_completed_send_next.
453  * The caller takes responsibility for any necessary locking.
454  */
455 static int
456 ce_completed_send_next_nolock_srng(struct CE_state *CE_state,
457 			      void **per_CE_contextp,
458 			      void **per_transfer_contextp,
459 			      qdf_dma_addr_t *bufferp,
460 			      unsigned int *nbytesp,
461 			      unsigned int *transfer_idp,
462 			      unsigned int *sw_idx,
463 			      unsigned int *hw_idx,
464 			      uint32_t *toeplitz_hash_result)
465 {
466 	int status = QDF_STATUS_E_FAILURE;
467 	struct CE_ring_state *src_ring = CE_state->src_ring;
468 	unsigned int nentries_mask = src_ring->nentries_mask;
469 	unsigned int sw_index = src_ring->sw_index;
470 	unsigned int swi = src_ring->sw_index;
471 	struct hif_softc *scn = CE_state->scn;
472 	struct ce_srng_src_desc *src_desc;
473 
474 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
475 		status = QDF_STATUS_E_FAILURE;
476 		return status;
477 	}
478 
479 	src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx);
480 	if (src_desc) {
481 		hif_record_ce_desc_event(scn, CE_state->id,
482 					HIF_TX_DESC_COMPLETION,
483 					(union ce_desc *)src_desc,
484 					src_ring->per_transfer_context[swi],
485 					swi, src_desc->nbytes);
486 
487 		/* Return data from completed source descriptor */
488 		*bufferp = (qdf_dma_addr_t)
489 			(((uint64_t)(src_desc)->buffer_addr_lo +
490 			  ((uint64_t)((src_desc)->buffer_addr_hi &
491 				  0xFF) << 32)));
492 		*nbytesp = src_desc->nbytes;
493 		*transfer_idp = src_desc->meta_data;
494 		*toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
495 
496 		if (per_CE_contextp)
497 			*per_CE_contextp = CE_state->send_context;
498 
499 		/* sw_index is used more like read index */
500 		if (per_transfer_contextp)
501 			*per_transfer_contextp =
502 				src_ring->per_transfer_context[sw_index];
503 
504 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
505 
506 		/* Update sw_index */
507 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
508 		src_ring->sw_index = sw_index;
509 		status = QDF_STATUS_SUCCESS;
510 	}
511 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
512 
513 	return status;
514 }
515 
516 /* NB: Modelled after ce_completed_send_next */
517 static QDF_STATUS
518 ce_cancel_send_next_srng(struct CE_handle *copyeng,
519 		void **per_CE_contextp,
520 		void **per_transfer_contextp,
521 		qdf_dma_addr_t *bufferp,
522 		unsigned int *nbytesp,
523 		unsigned int *transfer_idp,
524 		uint32_t *toeplitz_hash_result)
525 {
526 	struct CE_state *CE_state;
527 	int status = QDF_STATUS_E_FAILURE;
528 	struct CE_ring_state *src_ring;
529 	unsigned int nentries_mask;
530 	unsigned int sw_index;
531 	struct hif_softc *scn;
532 	struct ce_srng_src_desc *src_desc;
533 
534 	CE_state = (struct CE_state *)copyeng;
535 	src_ring = CE_state->src_ring;
536 	if (!src_ring)
537 		return QDF_STATUS_E_FAILURE;
538 
539 	nentries_mask = src_ring->nentries_mask;
540 	sw_index = src_ring->sw_index;
541 	scn = CE_state->scn;
542 
543 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
544 		status = QDF_STATUS_E_FAILURE;
545 		return status;
546 	}
547 
548 	src_desc = hal_srng_src_pending_reap_next(scn->hal_soc,
549 			src_ring->srng_ctx);
550 	if (src_desc) {
551 		/* Return data from completed source descriptor */
552 		*bufferp = (qdf_dma_addr_t)
553 			(((uint64_t)(src_desc)->buffer_addr_lo +
554 			  ((uint64_t)((src_desc)->buffer_addr_hi &
555 				  0xFF) << 32)));
556 		*nbytesp = src_desc->nbytes;
557 		*transfer_idp = src_desc->meta_data;
558 		*toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
559 
560 		if (per_CE_contextp)
561 			*per_CE_contextp = CE_state->send_context;
562 
563 		/* sw_index is used more like read index */
564 		if (per_transfer_contextp)
565 			*per_transfer_contextp =
566 				src_ring->per_transfer_context[sw_index];
567 
568 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
569 
570 		/* Update sw_index */
571 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
572 		src_ring->sw_index = sw_index;
573 		status = QDF_STATUS_SUCCESS;
574 	}
575 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
576 
577 	return status;
578 }
579 
580 /* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
581 #define CE_WM_SHFT 1
582 
583 /*
584  * Number of times to check for any pending tx/rx completion on
585  * a copy engine, this count should be big enough. Once we hit
586  * this threashold we'll not check for any Tx/Rx comlpetion in same
587  * interrupt handling. Note that this threashold is only used for
588  * Rx interrupt processing, this can be used tor Tx as well if we
589  * suspect any infinite loop in checking for pending Tx completion.
590  */
591 #define CE_TXRX_COMP_CHECK_THRESHOLD 20
592 
593 /*
594  * Adjust interrupts for the copy complete handler.
595  * If it's needed for either send or recv, then unmask
596  * this interrupt; otherwise, mask it.
597  *
598  * Called with target_lock held.
599  */
600 static void
601 ce_per_engine_handler_adjust_srng(struct CE_state *CE_state,
602 			     int disable_copy_compl_intr)
603 {
604 }
605 
606 static bool ce_check_int_watermark_srng(struct CE_state *CE_state,
607 					unsigned int *flags)
608 {
609 	/*TODO*/
610 	return false;
611 }
612 
613 static uint32_t ce_get_desc_size_srng(uint8_t ring_type)
614 {
615 	switch (ring_type) {
616 	case CE_RING_SRC:
617 		return sizeof(struct ce_srng_src_desc);
618 	case CE_RING_DEST:
619 		return sizeof(struct ce_srng_dest_desc);
620 	case CE_RING_STATUS:
621 		return sizeof(struct ce_srng_dest_status_desc);
622 	default:
623 		return 0;
624 	}
625 	return 0;
626 }
627 
628 static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id,
629 			      struct hal_srng_params *ring_params)
630 {
631 	uint32_t addr_low;
632 	uint32_t addr_high;
633 	uint32_t msi_data_start;
634 	uint32_t msi_data_count;
635 	uint32_t msi_irq_start;
636 	int ret;
637 
638 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
639 					  &msi_data_count, &msi_data_start,
640 					  &msi_irq_start);
641 
642 	/* msi config not found */
643 	if (ret)
644 		return;
645 
646 	pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high);
647 
648 	ring_params->msi_addr = addr_low;
649 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
650 	ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start;
651 	ring_params->flags |= HAL_SRNG_MSI_INTR;
652 
653 	HIF_DBG("%s: ce_id %d, msi_addr %pK, msi_data %d", __func__, ce_id,
654 		  (void *)ring_params->msi_addr, ring_params->msi_data);
655 }
656 
657 static void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
658 			struct CE_ring_state *src_ring,
659 			struct CE_attr *attr)
660 {
661 	struct hal_srng_params ring_params = {0};
662 
663 	HIF_INFO("%s: ce_id %d", __func__, ce_id);
664 
665 	ring_params.ring_base_paddr = src_ring->base_addr_CE_space;
666 	ring_params.ring_base_vaddr = src_ring->base_addr_owner_space;
667 	ring_params.num_entries = src_ring->nentries;
668 	/*
669 	 * The minimum increment for the timer is 8us
670 	 * A default value of 0 disables the timer
671 	 * A valid default value caused continuous interrupts to
672 	 * fire with MSI enabled. Need to revisit usage of the timer
673 	 */
674 
675 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
676 		ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
677 
678 		ring_params.intr_timer_thres_us = 0;
679 		ring_params.intr_batch_cntr_thres_entries = 1;
680 	}
681 
682 	src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0,
683 			&ring_params);
684 }
685 
686 /**
687  * ce_srng_initialize_dest_timer_interrupt_war() - war initialization
688  * @dest_ring: ring being initialized
689  * @ring_params: pointer to initialized parameters
690  *
691  * For Napier & Hawkeye v1, the status ring timer interrupts do not work
692  * As a work arround host configures the destination rings to be a proxy for
693  * work needing to be done.
694  *
695  * The interrupts are setup such that if the destination ring is less than fully
696  * posted, there is likely undone work for the status ring that the host should
697  * process.
698  *
699  * There is a timing bug in srng based copy engines such that a fully posted
700  * srng based copy engine has 2 empty entries instead of just one.  The copy
701  * engine data sturctures work with 1 empty entry, but the software frequently
702  * fails to post the last entry due to the race condition.
703  */
704 static void ce_srng_initialize_dest_timer_interrupt_war(
705 		struct CE_ring_state *dest_ring,
706 		struct hal_srng_params *ring_params) {
707 	int num_buffers_when_fully_posted = dest_ring->nentries - 2;
708 
709 	ring_params->low_threshold = num_buffers_when_fully_posted - 1;
710 	ring_params->intr_timer_thres_us = 1024;
711 	ring_params->intr_batch_cntr_thres_entries = 0;
712 	ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
713 }
714 
715 static void ce_srng_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
716 				struct CE_ring_state *dest_ring,
717 				struct CE_attr *attr)
718 {
719 	struct hal_srng_params ring_params = {0};
720 	bool status_ring_timer_thresh_work_arround = true;
721 
722 	HIF_INFO("%s: ce_id %d", __func__, ce_id);
723 
724 	ring_params.ring_base_paddr = dest_ring->base_addr_CE_space;
725 	ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space;
726 	ring_params.num_entries = dest_ring->nentries;
727 	ring_params.max_buffer_length = attr->src_sz_max;
728 
729 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
730 		ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
731 		if (status_ring_timer_thresh_work_arround) {
732 			ce_srng_initialize_dest_timer_interrupt_war(
733 					dest_ring, &ring_params);
734 		} else {
735 			/* normal behavior for future chips */
736 			ring_params.low_threshold = dest_ring->nentries >> 3;
737 			ring_params.intr_timer_thres_us = 100000;
738 			ring_params.intr_batch_cntr_thres_entries = 0;
739 			ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
740 		}
741 	}
742 
743 	/*Dest ring is also source ring*/
744 	dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0,
745 			&ring_params);
746 }
747 
748 static void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id,
749 				struct CE_ring_state *status_ring,
750 				struct CE_attr *attr)
751 {
752 	struct hal_srng_params ring_params = {0};
753 
754 	HIF_INFO("%s: ce_id %d", __func__, ce_id);
755 
756 	ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
757 
758 	ring_params.ring_base_paddr = status_ring->base_addr_CE_space;
759 	ring_params.ring_base_vaddr = status_ring->base_addr_owner_space;
760 	ring_params.num_entries = status_ring->nentries;
761 
762 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
763 		ring_params.intr_timer_thres_us = 0x1000;
764 		ring_params.intr_batch_cntr_thres_entries = 0x1;
765 	}
766 
767 	status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS,
768 			ce_id, 0, &ring_params);
769 }
770 
771 static int ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type,
772 		uint32_t ce_id, struct CE_ring_state *ring,
773 		struct CE_attr *attr)
774 {
775 	switch (ring_type) {
776 	case CE_RING_SRC:
777 		ce_srng_src_ring_setup(scn, ce_id, ring, attr);
778 		break;
779 	case CE_RING_DEST:
780 		ce_srng_dest_ring_setup(scn, ce_id, ring, attr);
781 		break;
782 	case CE_RING_STATUS:
783 		ce_srng_status_ring_setup(scn, ce_id, ring, attr);
784 		break;
785 	default:
786 		qdf_assert(0);
787 		break;
788 	}
789 
790 	return 0;
791 }
792 
793 static void ce_construct_shadow_config_srng(struct hif_softc *scn)
794 {
795 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
796 	int ce_id;
797 
798 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
799 		if (hif_state->host_ce_config[ce_id].src_nentries)
800 			hal_set_one_shadow_config(scn->hal_soc,
801 						  CE_SRC, ce_id);
802 
803 		if (hif_state->host_ce_config[ce_id].dest_nentries) {
804 			hal_set_one_shadow_config(scn->hal_soc,
805 						  CE_DST, ce_id);
806 
807 			hal_set_one_shadow_config(scn->hal_soc,
808 						  CE_DST_STATUS, ce_id);
809 		}
810 	}
811 }
812 
813 static void ce_prepare_shadow_register_v2_cfg_srng(struct hif_softc *scn,
814 		struct pld_shadow_reg_v2_cfg **shadow_config,
815 		int *num_shadow_registers_configured)
816 {
817 	if (scn->hal_soc == NULL) {
818 		HIF_ERROR("%s: hal not initialized: not initializing shadow config",
819 			  __func__);
820 		return;
821 	}
822 
823 	hal_get_shadow_config(scn->hal_soc, shadow_config,
824 			      num_shadow_registers_configured);
825 
826 	if (*num_shadow_registers_configured != 0) {
827 		HIF_ERROR("%s: hal shadow register configuration allready constructed",
828 			  __func__);
829 
830 		/* return with original configuration*/
831 		return;
832 	}
833 
834 	hal_construct_shadow_config(scn->hal_soc);
835 	ce_construct_shadow_config_srng(scn);
836 
837 	/* get updated configuration */
838 	hal_get_shadow_config(scn->hal_soc, shadow_config,
839 			      num_shadow_registers_configured);
840 }
841 
842 static struct ce_ops ce_service_srng = {
843 	.ce_get_desc_size = ce_get_desc_size_srng,
844 	.ce_ring_setup = ce_ring_setup_srng,
845 	.ce_sendlist_send = ce_sendlist_send_srng,
846 	.ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng,
847 	.ce_revoke_recv_next = ce_revoke_recv_next_srng,
848 	.ce_cancel_send_next = ce_cancel_send_next_srng,
849 	.ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng,
850 	.ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng,
851 	.ce_send_nolock = ce_send_nolock_srng,
852 	.watermark_int = ce_check_int_watermark_srng,
853 	.ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng,
854 	.ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng,
855 	.ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng,
856 	.ce_prepare_shadow_register_v2_cfg =
857 		ce_prepare_shadow_register_v2_cfg_srng,
858 };
859 
860 struct ce_ops *ce_services_srng()
861 {
862 	return &ce_service_srng;
863 }
864 qdf_export_symbol(ce_services_srng);
865