xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service_srng.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 #include "hif_io32.h"
19 #include "reg_struct.h"
20 #include "ce_api.h"
21 #include "ce_main.h"
22 #include "ce_internal.h"
23 #include "ce_reg.h"
24 #include "qdf_lock.h"
25 #include "regtable.h"
26 #include "hif_main.h"
27 #include "hif_debug.h"
28 #include "hal_api.h"
29 #include "pld_common.h"
30 #include "qdf_module.h"
31 #include "hif.h"
32 
33 /*
34  * Support for Copy Engine hardware, which is mainly used for
35  * communication between Host and Target over a PCIe interconnect.
36  */
37 
38 /*
39  * A single CopyEngine (CE) comprises two "rings":
40  *   a source ring
41  *   a destination ring
42  *
43  * Each ring consists of a number of descriptors which specify
44  * an address, length, and meta-data.
45  *
46  * Typically, one side of the PCIe interconnect (Host or Target)
47  * controls one ring and the other side controls the other ring.
48  * The source side chooses when to initiate a transfer and it
49  * chooses what to send (buffer address, length). The destination
50  * side keeps a supply of "anonymous receive buffers" available and
51  * it handles incoming data as it arrives (when the destination
52  * receives an interrupt).
53  *
54  * The sender may send a simple buffer (address/length) or it may
55  * send a small list of buffers.  When a small list is sent, hardware
56  * "gathers" these and they end up in a single destination buffer
57  * with a single interrupt.
58  *
59  * There are several "contexts" managed by this layer -- more, it
60  * may seem -- than should be needed. These are provided mainly for
61  * maximum flexibility and especially to facilitate a simpler HIF
62  * implementation. There are per-CopyEngine recv, send, and watermark
63  * contexts. These are supplied by the caller when a recv, send,
64  * or watermark handler is established and they are echoed back to
65  * the caller when the respective callbacks are invoked. There is
66  * also a per-transfer context supplied by the caller when a buffer
67  * (or sendlist) is sent and when a buffer is enqueued for recv.
68  * These per-transfer contexts are echoed back to the caller when
69  * the buffer is sent/received.
70  * Target TX harsh result toeplitz_hash_result
71  */
72 
73 #define CE_ADDR_COPY(desc, dma_addr) do {\
74 		(desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\
75 							  0xFFFFFFFF);\
76 		(desc)->buffer_addr_hi =\
77 			(uint32_t)(((dma_addr) >> 32) & 0xFF);\
78 	} while (0)
79 
80 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
81 void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
82 				   enum hif_ce_event_type type,
83 				   union ce_srng_desc *descriptor,
84 				   void *memory, int index,
85 				   int len, void *hal_ring)
86 {
87 	int record_index;
88 	struct hif_ce_desc_event *event;
89 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
90 	struct hif_ce_desc_event *hist_ev = NULL;
91 
92 	if (ce_id < CE_COUNT_MAX)
93 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
94 	else
95 		return;
96 
97 	if (ce_id >= CE_COUNT_MAX)
98 		return;
99 
100 	if (!ce_hist->enable[ce_id])
101 		return;
102 
103 	if (!hist_ev)
104 		return;
105 
106 	record_index = get_next_record_index(
107 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
108 
109 	event = &hist_ev[record_index];
110 
111 	hif_clear_ce_desc_debug_data(event);
112 
113 	event->type = type;
114 	event->time = qdf_get_log_timestamp();
115 	event->cpu_id = qdf_get_cpu();
116 
117 	if (descriptor)
118 		qdf_mem_copy(&event->descriptor, descriptor,
119 			     hal_get_entrysize_from_srng(hal_ring));
120 
121 	if (hal_ring)
122 		hal_get_sw_hptp(scn->hal_soc, hal_ring, &event->current_tp,
123 				&event->current_hp);
124 
125 	event->memory = memory;
126 	event->index = index;
127 
128 	if (event->type == HIF_CE_SRC_RING_BUFFER_POST)
129 		hif_ce_desc_record_rx_paddr(scn, event, memory);
130 
131 	if (ce_hist->data_enable[ce_id])
132 		hif_ce_desc_data_record(event, len);
133 }
134 #endif /* HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
135 
136 static QDF_STATUS
137 ce_send_nolock_srng(struct CE_handle *copyeng,
138 			   void *per_transfer_context,
139 			   qdf_dma_addr_t buffer,
140 			   uint32_t nbytes,
141 			   uint32_t transfer_id,
142 			   uint32_t flags,
143 			   uint32_t user_flags)
144 {
145 	QDF_STATUS status;
146 	struct CE_state *CE_state = (struct CE_state *)copyeng;
147 	struct CE_ring_state *src_ring = CE_state->src_ring;
148 	unsigned int nentries_mask = src_ring->nentries_mask;
149 	unsigned int write_index = src_ring->write_index;
150 	uint64_t dma_addr = buffer;
151 	struct hif_softc *scn = CE_state->scn;
152 
153 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
154 		return QDF_STATUS_E_FAILURE;
155 	if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx,
156 					false) <= 0)) {
157 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
158 		Q_TARGET_ACCESS_END(scn);
159 		return QDF_STATUS_E_FAILURE;
160 	}
161 	{
162 		enum hif_ce_event_type event_type = HIF_CE_SRC_RING_BUFFER_POST;
163 		struct ce_srng_src_desc *src_desc;
164 
165 		if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
166 			Q_TARGET_ACCESS_END(scn);
167 			return QDF_STATUS_E_FAILURE;
168 		}
169 
170 		src_desc = hal_srng_src_get_next_reaped(scn->hal_soc,
171 				src_ring->srng_ctx);
172 		if (!src_desc) {
173 			Q_TARGET_ACCESS_END(scn);
174 			return QDF_STATUS_E_INVAL;
175 		}
176 
177 		/* Update low 32 bits source descriptor address */
178 		src_desc->buffer_addr_lo =
179 			(uint32_t)(dma_addr & 0xFFFFFFFF);
180 		src_desc->buffer_addr_hi =
181 			(uint32_t)((dma_addr >> 32) & 0xFF);
182 
183 		src_desc->meta_data = transfer_id;
184 
185 		/*
186 		 * Set the swap bit if:
187 		 * typical sends on this CE are swapped (host is big-endian)
188 		 * and this send doesn't disable the swapping
189 		 * (data is not bytestream)
190 		 */
191 		src_desc->byte_swap =
192 			(((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
193 			  != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
194 		src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
195 		src_desc->nbytes = nbytes;
196 
197 		src_ring->per_transfer_context[write_index] =
198 			per_transfer_context;
199 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
200 
201 		hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
202 
203 		/* src_ring->write index hasn't been updated event though
204 		 * the register has allready been written to.
205 		 */
206 		hif_record_ce_srng_desc_event(scn, CE_state->id, event_type,
207 					      (union ce_srng_desc *)src_desc,
208 					      per_transfer_context,
209 					      src_ring->write_index, nbytes,
210 					      src_ring->srng_ctx);
211 
212 		src_ring->write_index = write_index;
213 		status = QDF_STATUS_SUCCESS;
214 	}
215 	Q_TARGET_ACCESS_END(scn);
216 	return status;
217 }
218 
219 static QDF_STATUS
220 ce_sendlist_send_srng(struct CE_handle *copyeng,
221 		 void *per_transfer_context,
222 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
223 {
224 	QDF_STATUS status = QDF_STATUS_E_NOMEM;
225 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
226 	struct CE_state *CE_state = (struct CE_state *)copyeng;
227 	struct CE_ring_state *src_ring = CE_state->src_ring;
228 	unsigned int num_items = sl->num_items;
229 	unsigned int sw_index;
230 	unsigned int write_index;
231 	struct hif_softc *scn = CE_state->scn;
232 
233 	QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
234 
235 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
236 	sw_index = src_ring->sw_index;
237 	write_index = src_ring->write_index;
238 
239 	if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >=
240 	    num_items) {
241 		struct ce_sendlist_item *item;
242 		int i;
243 
244 		/* handle all but the last item uniformly */
245 		for (i = 0; i < num_items - 1; i++) {
246 			item = &sl->item[i];
247 			/* TBDXXX: Support extensible sendlist_types? */
248 			QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
249 			status = ce_send_nolock_srng(copyeng,
250 					CE_SENDLIST_ITEM_CTXT,
251 				(qdf_dma_addr_t) item->data,
252 				item->u.nbytes, transfer_id,
253 				item->flags | CE_SEND_FLAG_GATHER,
254 				item->user_flags);
255 			QDF_ASSERT(status == QDF_STATUS_SUCCESS);
256 		}
257 		/* provide valid context pointer for final item */
258 		item = &sl->item[i];
259 		/* TBDXXX: Support extensible sendlist_types? */
260 		QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
261 		status = ce_send_nolock_srng(copyeng, per_transfer_context,
262 					(qdf_dma_addr_t) item->data,
263 					item->u.nbytes,
264 					transfer_id, item->flags,
265 					item->user_flags);
266 		QDF_ASSERT(status == QDF_STATUS_SUCCESS);
267 		QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
268 					QDF_NBUF_TX_PKT_CE);
269 		DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
270 			QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
271 			QDF_TRACE_DEFAULT_PDEV_ID,
272 			(uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
273 			sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX));
274 	} else {
275 		/*
276 		 * Probably not worth the additional complexity to support
277 		 * partial sends with continuation or notification.  We expect
278 		 * to use large rings and small sendlists. If we can't handle
279 		 * the entire request at once, punt it back to the caller.
280 		 */
281 	}
282 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
283 
284 	return status;
285 }
286 
287 #define SLOTS_PER_DATAPATH_TX 2
288 
289 #ifndef AH_NEED_TX_DATA_SWAP
290 #define AH_NEED_TX_DATA_SWAP 0
291 #endif
292 /**
293  * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine
294  * @coyeng: copy engine handle
295  * @per_recv_context: virtual address of the nbuf
296  * @buffer: physical address of the nbuf
297  *
298  * Return: QDF_STATUS_SUCCESS if the buffer is enqueued
299  */
300 static QDF_STATUS
301 ce_recv_buf_enqueue_srng(struct CE_handle *copyeng,
302 		    void *per_recv_context, qdf_dma_addr_t buffer)
303 {
304 	QDF_STATUS status;
305 	struct CE_state *CE_state = (struct CE_state *)copyeng;
306 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
307 	unsigned int nentries_mask = dest_ring->nentries_mask;
308 	unsigned int write_index;
309 	unsigned int sw_index;
310 	uint64_t dma_addr = buffer;
311 	struct hif_softc *scn = CE_state->scn;
312 	struct ce_srng_dest_desc *dest_desc = NULL;
313 
314 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
315 	write_index = dest_ring->write_index;
316 	sw_index = dest_ring->sw_index;
317 
318 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
319 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
320 		return QDF_STATUS_E_IO;
321 	}
322 
323 	if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) {
324 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
325 		return QDF_STATUS_E_FAILURE;
326 	}
327 
328 	if ((hal_srng_src_num_avail(scn->hal_soc,
329 					dest_ring->srng_ctx, false) > 0)) {
330 		dest_desc = hal_srng_src_get_next(scn->hal_soc,
331 						  dest_ring->srng_ctx);
332 
333 		if (!dest_desc) {
334 			status = QDF_STATUS_E_FAILURE;
335 		} else {
336 
337 			CE_ADDR_COPY(dest_desc, dma_addr);
338 
339 			dest_ring->per_transfer_context[write_index] =
340 				per_recv_context;
341 
342 			/* Update Destination Ring Write Index */
343 			write_index = CE_RING_IDX_INCR(nentries_mask,
344 								write_index);
345 			status = QDF_STATUS_SUCCESS;
346 		}
347 	} else {
348 		dest_desc = NULL;
349 		status = QDF_STATUS_E_FAILURE;
350 	}
351 
352 	dest_ring->write_index = write_index;
353 	hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx);
354 	hif_record_ce_srng_desc_event(scn, CE_state->id,
355 				      HIF_CE_DEST_RING_BUFFER_POST,
356 				      (union ce_srng_desc *)dest_desc,
357 				      per_recv_context,
358 				      dest_ring->write_index, 0,
359 				      dest_ring->srng_ctx);
360 
361 	Q_TARGET_ACCESS_END(scn);
362 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
363 	return status;
364 }
365 
366 /*
367  * Guts of ce_recv_entries_done.
368  * The caller takes responsibility for any necessary locking.
369  */
370 static unsigned int
371 ce_recv_entries_done_nolock_srng(struct hif_softc *scn,
372 			    struct CE_state *CE_state)
373 {
374 	struct CE_ring_state *status_ring = CE_state->status_ring;
375 
376 	return hal_srng_dst_num_valid(scn->hal_soc,
377 				status_ring->srng_ctx, false);
378 }
379 
380 /*
381  * Guts of ce_send_entries_done.
382  * The caller takes responsibility for any necessary locking.
383  */
384 static unsigned int
385 ce_send_entries_done_nolock_srng(struct hif_softc *scn,
386 					struct CE_state *CE_state)
387 {
388 
389 	struct CE_ring_state *src_ring = CE_state->src_ring;
390 	int count = 0;
391 
392 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx))
393 		return 0;
394 
395 	count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx);
396 
397 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
398 
399 	return count;
400 }
401 
402 /*
403  * Guts of ce_completed_recv_next.
404  * The caller takes responsibility for any necessary locking.
405  */
406 static QDF_STATUS
407 ce_completed_recv_next_nolock_srng(struct CE_state *CE_state,
408 			      void **per_CE_contextp,
409 			      void **per_transfer_contextp,
410 			      qdf_dma_addr_t *bufferp,
411 			      unsigned int *nbytesp,
412 			      unsigned int *transfer_idp,
413 			      unsigned int *flagsp)
414 {
415 	QDF_STATUS status;
416 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
417 	struct CE_ring_state *status_ring = CE_state->status_ring;
418 	unsigned int nentries_mask = dest_ring->nentries_mask;
419 	unsigned int sw_index = dest_ring->sw_index;
420 	struct hif_softc *scn = CE_state->scn;
421 	struct ce_srng_dest_status_desc *dest_status = NULL;
422 	int nbytes;
423 	struct ce_srng_dest_status_desc dest_status_info;
424 
425 	if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx)) {
426 		status = QDF_STATUS_E_FAILURE;
427 		goto done;
428 	}
429 
430 	dest_status = hal_srng_dst_peek(scn->hal_soc, status_ring->srng_ctx);
431 	if (!dest_status) {
432 		status = QDF_STATUS_E_FAILURE;
433 		hal_srng_access_end_reap(scn->hal_soc, status_ring->srng_ctx);
434 		goto done;
435 	}
436 
437 	/*
438 	 * By copying the dest_desc_info element to local memory, we could
439 	 * avoid extra memory read from non-cachable memory.
440 	 */
441 	dest_status_info = *dest_status;
442 	nbytes = dest_status_info.nbytes;
443 	if (nbytes == 0) {
444 		uint32_t hp, tp;
445 
446 		/*
447 		 * This closes a relatively unusual race where the Host
448 		 * sees the updated DRRI before the update to the
449 		 * corresponding descriptor has completed. We treat this
450 		 * as a descriptor that is not yet done.
451 		 */
452 		hal_get_sw_hptp(scn->hal_soc, status_ring->srng_ctx,
453 				&hp, &tp);
454 		hif_info("No data to reap, hp %d tp %d", hp, tp);
455 		status = QDF_STATUS_E_FAILURE;
456 		hal_srng_access_end_reap(scn->hal_soc, status_ring->srng_ctx);
457 		goto done;
458 	}
459 
460 	/*
461 	 * Move the tail pointer since nbytes is non-zero and
462 	 * this entry is processed.
463 	 */
464 	hal_srng_dst_get_next(scn->hal_soc, status_ring->srng_ctx);
465 
466 	dest_status->nbytes = 0;
467 
468 	*nbytesp = nbytes;
469 	*transfer_idp = dest_status_info.meta_data;
470 	*flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
471 
472 	if (per_CE_contextp)
473 		*per_CE_contextp = CE_state->recv_context;
474 
475 	/* NOTE: sw_index is more like a read_index in this context. It has a
476 	 * one-to-one mapping with status ring.
477 	 * Get the per trasnfer context from dest_ring.
478 	 */
479 	if (per_transfer_contextp)
480 		*per_transfer_contextp =
481 			dest_ring->per_transfer_context[sw_index];
482 
483 	dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
484 
485 	/* Update sw_index */
486 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
487 	dest_ring->sw_index = sw_index;
488 	status = QDF_STATUS_SUCCESS;
489 
490 	hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx);
491 	hif_record_ce_srng_desc_event(scn, CE_state->id,
492 				      HIF_CE_DEST_RING_BUFFER_REAP,
493 				      NULL,
494 				      dest_ring->
495 				      per_transfer_context[sw_index],
496 				      dest_ring->sw_index, nbytes,
497 				      dest_ring->srng_ctx);
498 
499 done:
500 	hif_record_ce_srng_desc_event(scn, CE_state->id,
501 				      HIF_CE_DEST_STATUS_RING_REAP,
502 				      (union ce_srng_desc *)dest_status,
503 				      NULL,
504 				      -1, 0,
505 				      status_ring->srng_ctx);
506 
507 	return status;
508 }
509 
510 static QDF_STATUS
511 ce_revoke_recv_next_srng(struct CE_handle *copyeng,
512 		    void **per_CE_contextp,
513 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
514 {
515 	struct CE_state *CE_state = (struct CE_state *)copyeng;
516 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
517 	unsigned int sw_index;
518 
519 	if (!dest_ring)
520 		return QDF_STATUS_E_FAILURE;
521 
522 	sw_index = dest_ring->sw_index;
523 
524 	if (per_CE_contextp)
525 		*per_CE_contextp = CE_state->recv_context;
526 
527 	/* NOTE: sw_index is more like a read_index in this context. It has a
528 	 * one-to-one mapping with status ring.
529 	 * Get the per trasnfer context from dest_ring.
530 	 */
531 	if (per_transfer_contextp)
532 		*per_transfer_contextp =
533 			dest_ring->per_transfer_context[sw_index];
534 
535 	if (!dest_ring->per_transfer_context[sw_index])
536 		return QDF_STATUS_E_FAILURE;
537 
538 	/* provide end condition */
539 	dest_ring->per_transfer_context[sw_index] = NULL;
540 
541 	/* Update sw_index */
542 	sw_index = CE_RING_IDX_INCR(dest_ring->nentries_mask, sw_index);
543 	dest_ring->sw_index = sw_index;
544 	return QDF_STATUS_SUCCESS;
545 }
546 
547 /*
548  * Guts of ce_completed_send_next.
549  * The caller takes responsibility for any necessary locking.
550  */
551 static QDF_STATUS
552 ce_completed_send_next_nolock_srng(struct CE_state *CE_state,
553 			      void **per_CE_contextp,
554 			      void **per_transfer_contextp,
555 			      qdf_dma_addr_t *bufferp,
556 			      unsigned int *nbytesp,
557 			      unsigned int *transfer_idp,
558 			      unsigned int *sw_idx,
559 			      unsigned int *hw_idx,
560 			      uint32_t *toeplitz_hash_result)
561 {
562 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
563 	struct CE_ring_state *src_ring = CE_state->src_ring;
564 	unsigned int nentries_mask = src_ring->nentries_mask;
565 	unsigned int sw_index = src_ring->sw_index;
566 	unsigned int swi = src_ring->sw_index;
567 	struct hif_softc *scn = CE_state->scn;
568 	struct ce_srng_src_desc *src_desc;
569 
570 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
571 		status = QDF_STATUS_E_FAILURE;
572 		return status;
573 	}
574 
575 	src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx);
576 	if (src_desc) {
577 		hif_record_ce_srng_desc_event(scn, CE_state->id,
578 					      HIF_TX_DESC_COMPLETION,
579 					      (union ce_srng_desc *)src_desc,
580 					      src_ring->
581 					      per_transfer_context[swi],
582 					      swi, src_desc->nbytes,
583 					      src_ring->srng_ctx);
584 
585 		/* Return data from completed source descriptor */
586 		*bufferp = (qdf_dma_addr_t)
587 			(((uint64_t)(src_desc)->buffer_addr_lo +
588 			  ((uint64_t)((src_desc)->buffer_addr_hi &
589 				  0xFF) << 32)));
590 		*nbytesp = src_desc->nbytes;
591 		*transfer_idp = src_desc->meta_data;
592 		*toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
593 
594 		if (per_CE_contextp)
595 			*per_CE_contextp = CE_state->send_context;
596 
597 		/* sw_index is used more like read index */
598 		if (per_transfer_contextp)
599 			*per_transfer_contextp =
600 				src_ring->per_transfer_context[sw_index];
601 
602 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
603 
604 		/* Update sw_index */
605 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
606 		src_ring->sw_index = sw_index;
607 		status = QDF_STATUS_SUCCESS;
608 	}
609 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
610 
611 	return status;
612 }
613 
614 /* NB: Modelled after ce_completed_send_next */
615 static QDF_STATUS
616 ce_cancel_send_next_srng(struct CE_handle *copyeng,
617 		void **per_CE_contextp,
618 		void **per_transfer_contextp,
619 		qdf_dma_addr_t *bufferp,
620 		unsigned int *nbytesp,
621 		unsigned int *transfer_idp,
622 		uint32_t *toeplitz_hash_result)
623 {
624 	struct CE_state *CE_state;
625 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
626 	struct CE_ring_state *src_ring;
627 	unsigned int nentries_mask;
628 	unsigned int sw_index;
629 	struct hif_softc *scn;
630 	struct ce_srng_src_desc *src_desc;
631 
632 	CE_state = (struct CE_state *)copyeng;
633 	src_ring = CE_state->src_ring;
634 	if (!src_ring)
635 		return QDF_STATUS_E_FAILURE;
636 
637 	nentries_mask = src_ring->nentries_mask;
638 	sw_index = src_ring->sw_index;
639 	scn = CE_state->scn;
640 
641 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
642 		status = QDF_STATUS_E_FAILURE;
643 		return status;
644 	}
645 
646 	src_desc = hal_srng_src_pending_reap_next(scn->hal_soc,
647 			src_ring->srng_ctx);
648 	if (src_desc) {
649 		/* Return data from completed source descriptor */
650 		*bufferp = (qdf_dma_addr_t)
651 			(((uint64_t)(src_desc)->buffer_addr_lo +
652 			  ((uint64_t)((src_desc)->buffer_addr_hi &
653 				  0xFF) << 32)));
654 		*nbytesp = src_desc->nbytes;
655 		*transfer_idp = src_desc->meta_data;
656 		*toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
657 
658 		if (per_CE_contextp)
659 			*per_CE_contextp = CE_state->send_context;
660 
661 		/* sw_index is used more like read index */
662 		if (per_transfer_contextp)
663 			*per_transfer_contextp =
664 				src_ring->per_transfer_context[sw_index];
665 
666 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
667 
668 		/* Update sw_index */
669 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
670 		src_ring->sw_index = sw_index;
671 		status = QDF_STATUS_SUCCESS;
672 	}
673 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
674 
675 	return status;
676 }
677 
678 /*
679  * Adjust interrupts for the copy complete handler.
680  * If it's needed for either send or recv, then unmask
681  * this interrupt; otherwise, mask it.
682  *
683  * Called with target_lock held.
684  */
685 static void
686 ce_per_engine_handler_adjust_srng(struct CE_state *CE_state,
687 			     int disable_copy_compl_intr)
688 {
689 }
690 
691 static bool ce_check_int_watermark_srng(struct CE_state *CE_state,
692 					unsigned int *flags)
693 {
694 	/*TODO*/
695 	return false;
696 }
697 
698 static uint32_t ce_get_desc_size_srng(uint8_t ring_type)
699 {
700 	switch (ring_type) {
701 	case CE_RING_SRC:
702 		return sizeof(struct ce_srng_src_desc);
703 	case CE_RING_DEST:
704 		return sizeof(struct ce_srng_dest_desc);
705 	case CE_RING_STATUS:
706 		return sizeof(struct ce_srng_dest_status_desc);
707 	default:
708 		return 0;
709 	}
710 	return 0;
711 }
712 
713 static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id,
714 			      struct hal_srng_params *ring_params)
715 {
716 	uint32_t addr_low;
717 	uint32_t addr_high;
718 	uint32_t msi_data_start;
719 	uint32_t msi_data_count;
720 	uint32_t msi_irq_start;
721 	int ret;
722 
723 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
724 					  &msi_data_count, &msi_data_start,
725 					  &msi_irq_start);
726 
727 	/* msi config not found */
728 	if (ret)
729 		return;
730 
731 	pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high);
732 
733 	ring_params->msi_addr = addr_low;
734 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
735 	ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start;
736 	ring_params->flags |= HAL_SRNG_MSI_INTR;
737 
738 	hif_debug("ce_id %d, msi_addr %pK, msi_data %d", ce_id,
739 		  (void *)ring_params->msi_addr, ring_params->msi_data);
740 }
741 
742 static void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
743 				   struct CE_ring_state *src_ring,
744 				   struct CE_attr *attr)
745 {
746 	struct hal_srng_params ring_params = {0};
747 
748 	hif_debug("%s: ce_id %d", __func__, ce_id);
749 
750 	ring_params.ring_base_paddr = src_ring->base_addr_CE_space;
751 	ring_params.ring_base_vaddr = src_ring->base_addr_owner_space;
752 	ring_params.num_entries = src_ring->nentries;
753 	/*
754 	 * The minimum increment for the timer is 8us
755 	 * A default value of 0 disables the timer
756 	 * A valid default value caused continuous interrupts to
757 	 * fire with MSI enabled. Need to revisit usage of the timer
758 	 */
759 
760 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
761 		ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
762 
763 		ring_params.intr_timer_thres_us = 0;
764 		ring_params.intr_batch_cntr_thres_entries = 1;
765 		ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER;
766 	}
767 
768 	src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0,
769 			&ring_params);
770 }
771 
772 /**
773  * ce_srng_initialize_dest_timer_interrupt_war() - war initialization
774  * @dest_ring: ring being initialized
775  * @ring_params: pointer to initialized parameters
776  *
777  * For Napier & Hawkeye v1, the status ring timer interrupts do not work
778  * As a work arround host configures the destination rings to be a proxy for
779  * work needing to be done.
780  *
781  * The interrupts are setup such that if the destination ring is less than fully
782  * posted, there is likely undone work for the status ring that the host should
783  * process.
784  *
785  * There is a timing bug in srng based copy engines such that a fully posted
786  * srng based copy engine has 2 empty entries instead of just one.  The copy
787  * engine data sturctures work with 1 empty entry, but the software frequently
788  * fails to post the last entry due to the race condition.
789  */
790 static void ce_srng_initialize_dest_timer_interrupt_war(
791 					struct CE_ring_state *dest_ring,
792 					struct hal_srng_params *ring_params)
793 {
794 	int num_buffers_when_fully_posted = dest_ring->nentries - 2;
795 
796 	ring_params->low_threshold = num_buffers_when_fully_posted - 1;
797 	ring_params->intr_timer_thres_us = 1024;
798 	ring_params->intr_batch_cntr_thres_entries = 0;
799 	ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
800 }
801 
802 static void ce_srng_dest_ring_setup(struct hif_softc *scn,
803 				    uint32_t ce_id,
804 				    struct CE_ring_state *dest_ring,
805 				    struct CE_attr *attr)
806 {
807 	struct hal_srng_params ring_params = {0};
808 	bool status_ring_timer_thresh_work_arround = true;
809 
810 	hif_debug("ce_id: %d", ce_id);
811 
812 	ring_params.ring_base_paddr = dest_ring->base_addr_CE_space;
813 	ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space;
814 	ring_params.num_entries = dest_ring->nentries;
815 	ring_params.max_buffer_length = attr->src_sz_max;
816 
817 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
818 		ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
819 		if (status_ring_timer_thresh_work_arround) {
820 			ce_srng_initialize_dest_timer_interrupt_war(
821 					dest_ring, &ring_params);
822 		} else {
823 			/* normal behavior for future chips */
824 			ring_params.low_threshold = dest_ring->nentries >> 3;
825 			ring_params.intr_timer_thres_us = 100000;
826 			ring_params.intr_batch_cntr_thres_entries = 0;
827 			ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
828 		}
829 		ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER;
830 	}
831 
832 	/*Dest ring is also source ring*/
833 	dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0,
834 			&ring_params);
835 }
836 
837 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
838 /**
839  * ce_status_ring_config_int_threshold() - configure ce status ring interrupt
840  *                                         thresholds
841  * @scn: hif handle
842  * @ring_params: ce srng params
843  *
844  * Return: None
845  */
846 static inline
847 void ce_status_ring_config_int_threshold(struct hif_softc *scn,
848 					 struct hal_srng_params *ring_params)
849 {
850 	ring_params->intr_timer_thres_us =
851 			scn->ini_cfg.ce_status_ring_timer_threshold;
852 	ring_params->intr_batch_cntr_thres_entries =
853 			scn->ini_cfg.ce_status_ring_batch_count_threshold;
854 }
855 #else
856 static inline
857 void ce_status_ring_config_int_threshold(struct hif_softc *scn,
858 					 struct hal_srng_params *ring_params)
859 {
860 	ring_params->intr_timer_thres_us = 0x1000;
861 	ring_params->intr_batch_cntr_thres_entries = 0x1;
862 }
863 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
864 
865 static void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id,
866 				struct CE_ring_state *status_ring,
867 				struct CE_attr *attr)
868 {
869 	struct hal_srng_params ring_params = {0};
870 
871 	hif_debug("ce_id: %d", ce_id);
872 
873 	ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
874 
875 	ring_params.ring_base_paddr = status_ring->base_addr_CE_space;
876 	ring_params.ring_base_vaddr = status_ring->base_addr_owner_space;
877 	ring_params.num_entries = status_ring->nentries;
878 
879 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
880 		ce_status_ring_config_int_threshold(scn, &ring_params);
881 	}
882 
883 	status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS,
884 			ce_id, 0, &ring_params);
885 }
886 
887 static int ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type,
888 		uint32_t ce_id, struct CE_ring_state *ring,
889 		struct CE_attr *attr)
890 {
891 	switch (ring_type) {
892 	case CE_RING_SRC:
893 		ce_srng_src_ring_setup(scn, ce_id, ring, attr);
894 		break;
895 	case CE_RING_DEST:
896 		ce_srng_dest_ring_setup(scn, ce_id, ring, attr);
897 		break;
898 	case CE_RING_STATUS:
899 		ce_srng_status_ring_setup(scn, ce_id, ring, attr);
900 		break;
901 	default:
902 		qdf_assert(0);
903 		break;
904 	}
905 
906 	return 0;
907 }
908 
909 static void ce_construct_shadow_config_srng(struct hif_softc *scn)
910 {
911 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
912 	int ce_id;
913 
914 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
915 		if (hif_state->host_ce_config[ce_id].src_nentries)
916 			hal_set_one_shadow_config(scn->hal_soc,
917 						  CE_SRC, ce_id);
918 
919 		if (hif_state->host_ce_config[ce_id].dest_nentries) {
920 			hal_set_one_shadow_config(scn->hal_soc,
921 						  CE_DST, ce_id);
922 
923 			hal_set_one_shadow_config(scn->hal_soc,
924 						  CE_DST_STATUS, ce_id);
925 		}
926 	}
927 }
928 
929 static void ce_prepare_shadow_register_v2_cfg_srng(struct hif_softc *scn,
930 		struct pld_shadow_reg_v2_cfg **shadow_config,
931 		int *num_shadow_registers_configured)
932 {
933 	if (!scn->hal_soc) {
934 		hif_err("hal not initialized: not initializing shadow config");
935 		return;
936 	}
937 
938 	hal_get_shadow_config(scn->hal_soc, shadow_config,
939 			      num_shadow_registers_configured);
940 
941 	if (*num_shadow_registers_configured != 0) {
942 		hif_err("hal shadow register configuration allready constructed");
943 
944 		/* return with original configuration*/
945 		return;
946 	}
947 	hal_construct_srng_shadow_regs(scn->hal_soc);
948 	ce_construct_shadow_config_srng(scn);
949 	hal_set_shadow_regs(scn->hal_soc);
950 	hal_construct_shadow_regs(scn->hal_soc);
951 	/* get updated configuration */
952 	hal_get_shadow_config(scn->hal_soc, shadow_config,
953 			      num_shadow_registers_configured);
954 }
955 
956 #ifdef HIF_CE_LOG_INFO
957 /**
958  * ce_get_index_info_srng(): Get CE index info
959  * @scn: HIF Context
960  * @ce_state: CE opaque handle
961  * @info: CE info
962  *
963  * Return: 0 for success and non zero for failure
964  */
965 static
966 int ce_get_index_info_srng(struct hif_softc *scn, void *ce_state,
967 			   struct ce_index *info)
968 {
969 	struct CE_state *CE_state = (struct CE_state *)ce_state;
970 	uint32_t tp, hp;
971 
972 	info->id = CE_state->id;
973 	if (CE_state->src_ring) {
974 		hal_get_sw_hptp(scn->hal_soc, CE_state->src_ring->srng_ctx,
975 				&tp, &hp);
976 		info->u.srng_info.tp = tp;
977 		info->u.srng_info.hp = hp;
978 	} else if (CE_state->dest_ring && CE_state->status_ring) {
979 		hal_get_sw_hptp(scn->hal_soc, CE_state->status_ring->srng_ctx,
980 				&tp, &hp);
981 		info->u.srng_info.status_tp = tp;
982 		info->u.srng_info.status_hp = hp;
983 		hal_get_sw_hptp(scn->hal_soc, CE_state->dest_ring->srng_ctx,
984 				&tp, &hp);
985 		info->u.srng_info.tp = tp;
986 		info->u.srng_info.hp = hp;
987 	}
988 
989 	return 0;
990 }
991 #endif
992 
993 static struct ce_ops ce_service_srng = {
994 	.ce_get_desc_size = ce_get_desc_size_srng,
995 	.ce_ring_setup = ce_ring_setup_srng,
996 	.ce_sendlist_send = ce_sendlist_send_srng,
997 	.ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng,
998 	.ce_revoke_recv_next = ce_revoke_recv_next_srng,
999 	.ce_cancel_send_next = ce_cancel_send_next_srng,
1000 	.ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng,
1001 	.ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng,
1002 	.ce_send_nolock = ce_send_nolock_srng,
1003 	.watermark_int = ce_check_int_watermark_srng,
1004 	.ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng,
1005 	.ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng,
1006 	.ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng,
1007 	.ce_prepare_shadow_register_v2_cfg =
1008 		ce_prepare_shadow_register_v2_cfg_srng,
1009 #ifdef HIF_CE_LOG_INFO
1010 	.ce_get_index_info =
1011 		ce_get_index_info_srng,
1012 #endif
1013 };
1014 
1015 struct ce_ops *ce_services_srng()
1016 {
1017 	return &ce_service_srng;
1018 }
1019 qdf_export_symbol(ce_services_srng);
1020 
1021 void ce_service_srng_init(void)
1022 {
1023 	ce_service_register_module(CE_SVC_SRNG, &ce_services_srng);
1024 }
1025