xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service_srng.c (revision 6d768494e5ce14eb1603a695c86739d12ecc6ec2)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 #include "hif_io32.h"
19 #include "reg_struct.h"
20 #include "ce_api.h"
21 #include "ce_main.h"
22 #include "ce_internal.h"
23 #include "ce_reg.h"
24 #include "qdf_lock.h"
25 #include "regtable.h"
26 #include "hif_main.h"
27 #include "hif_debug.h"
28 #include "hal_api.h"
29 #include "pld_common.h"
30 #include "qdf_module.h"
31 #include "hif.h"
32 
33 /*
34  * Support for Copy Engine hardware, which is mainly used for
35  * communication between Host and Target over a PCIe interconnect.
36  */
37 
38 /*
39  * A single CopyEngine (CE) comprises two "rings":
40  *   a source ring
41  *   a destination ring
42  *
43  * Each ring consists of a number of descriptors which specify
44  * an address, length, and meta-data.
45  *
46  * Typically, one side of the PCIe interconnect (Host or Target)
47  * controls one ring and the other side controls the other ring.
48  * The source side chooses when to initiate a transfer and it
49  * chooses what to send (buffer address, length). The destination
50  * side keeps a supply of "anonymous receive buffers" available and
51  * it handles incoming data as it arrives (when the destination
52  * receives an interrupt).
53  *
54  * The sender may send a simple buffer (address/length) or it may
55  * send a small list of buffers.  When a small list is sent, hardware
56  * "gathers" these and they end up in a single destination buffer
57  * with a single interrupt.
58  *
59  * There are several "contexts" managed by this layer -- more, it
60  * may seem -- than should be needed. These are provided mainly for
61  * maximum flexibility and especially to facilitate a simpler HIF
62  * implementation. There are per-CopyEngine recv, send, and watermark
63  * contexts. These are supplied by the caller when a recv, send,
64  * or watermark handler is established and they are echoed back to
65  * the caller when the respective callbacks are invoked. There is
66  * also a per-transfer context supplied by the caller when a buffer
67  * (or sendlist) is sent and when a buffer is enqueued for recv.
68  * These per-transfer contexts are echoed back to the caller when
69  * the buffer is sent/received.
70  * Target TX harsh result toeplitz_hash_result
71  */
72 
73 #define CE_ADDR_COPY(desc, dma_addr) do {\
74 		(desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\
75 							  0xFFFFFFFF);\
76 		(desc)->buffer_addr_hi =\
77 			(uint32_t)(((dma_addr) >> 32) & 0xFF);\
78 	} while (0)
79 
80 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
81 void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
82 				   enum hif_ce_event_type type,
83 				   union ce_srng_desc *descriptor,
84 				   void *memory, int index,
85 				   int len, void *hal_ring)
86 {
87 	int record_index;
88 	struct hif_ce_desc_event *event;
89 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
90 	struct hif_ce_desc_event *hist_ev = NULL;
91 
92 	if (ce_id < CE_COUNT_MAX)
93 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
94 	else
95 		return;
96 
97 	if (ce_id >= CE_COUNT_MAX)
98 		return;
99 
100 	if (!ce_hist->enable[ce_id])
101 		return;
102 
103 	if (!hist_ev)
104 		return;
105 
106 	record_index = get_next_record_index(
107 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
108 
109 	event = &hist_ev[record_index];
110 
111 	hif_clear_ce_desc_debug_data(event);
112 
113 	event->type = type;
114 	event->time = qdf_get_log_timestamp();
115 
116 	if (descriptor)
117 		qdf_mem_copy(&event->descriptor, descriptor,
118 			     hal_get_entrysize_from_srng(hal_ring));
119 
120 	if (hal_ring)
121 		hal_get_sw_hptp(scn->hal_soc, hal_ring, &event->current_tp,
122 				&event->current_hp);
123 
124 	event->memory = memory;
125 	event->index = index;
126 
127 	if (event->type == HIF_CE_SRC_RING_BUFFER_POST)
128 		hif_ce_desc_record_rx_paddr(scn, event, memory);
129 
130 	if (ce_hist->data_enable[ce_id])
131 		hif_ce_desc_data_record(event, len);
132 }
133 #endif /* HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
134 
135 static int
136 ce_send_nolock_srng(struct CE_handle *copyeng,
137 			   void *per_transfer_context,
138 			   qdf_dma_addr_t buffer,
139 			   uint32_t nbytes,
140 			   uint32_t transfer_id,
141 			   uint32_t flags,
142 			   uint32_t user_flags)
143 {
144 	int status;
145 	struct CE_state *CE_state = (struct CE_state *)copyeng;
146 	struct CE_ring_state *src_ring = CE_state->src_ring;
147 	unsigned int nentries_mask = src_ring->nentries_mask;
148 	unsigned int write_index = src_ring->write_index;
149 	uint64_t dma_addr = buffer;
150 	struct hif_softc *scn = CE_state->scn;
151 
152 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
153 		return QDF_STATUS_E_FAILURE;
154 	if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx,
155 					false) <= 0)) {
156 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
157 		Q_TARGET_ACCESS_END(scn);
158 		return QDF_STATUS_E_FAILURE;
159 	}
160 	{
161 		enum hif_ce_event_type event_type = HIF_CE_SRC_RING_BUFFER_POST;
162 		struct ce_srng_src_desc *src_desc;
163 
164 		if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
165 			Q_TARGET_ACCESS_END(scn);
166 			return QDF_STATUS_E_FAILURE;
167 		}
168 
169 		src_desc = hal_srng_src_get_next_reaped(scn->hal_soc,
170 				src_ring->srng_ctx);
171 		if (!src_desc) {
172 			Q_TARGET_ACCESS_END(scn);
173 			return QDF_STATUS_E_INVAL;
174 		}
175 
176 		/* Update low 32 bits source descriptor address */
177 		src_desc->buffer_addr_lo =
178 			(uint32_t)(dma_addr & 0xFFFFFFFF);
179 		src_desc->buffer_addr_hi =
180 			(uint32_t)((dma_addr >> 32) & 0xFF);
181 
182 		src_desc->meta_data = transfer_id;
183 
184 		/*
185 		 * Set the swap bit if:
186 		 * typical sends on this CE are swapped (host is big-endian)
187 		 * and this send doesn't disable the swapping
188 		 * (data is not bytestream)
189 		 */
190 		src_desc->byte_swap =
191 			(((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
192 			  != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
193 		src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
194 		src_desc->nbytes = nbytes;
195 
196 		src_ring->per_transfer_context[write_index] =
197 			per_transfer_context;
198 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
199 
200 		hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
201 
202 		/* src_ring->write index hasn't been updated event though
203 		 * the register has allready been written to.
204 		 */
205 		hif_record_ce_srng_desc_event(scn, CE_state->id, event_type,
206 					      (union ce_srng_desc *)src_desc,
207 					      per_transfer_context,
208 					      src_ring->write_index, nbytes,
209 					      src_ring->srng_ctx);
210 
211 		src_ring->write_index = write_index;
212 		status = QDF_STATUS_SUCCESS;
213 	}
214 	Q_TARGET_ACCESS_END(scn);
215 	return status;
216 }
217 
218 static int
219 ce_sendlist_send_srng(struct CE_handle *copyeng,
220 		 void *per_transfer_context,
221 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
222 {
223 	int status = -ENOMEM;
224 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
225 	struct CE_state *CE_state = (struct CE_state *)copyeng;
226 	struct CE_ring_state *src_ring = CE_state->src_ring;
227 	unsigned int num_items = sl->num_items;
228 	unsigned int sw_index;
229 	unsigned int write_index;
230 	struct hif_softc *scn = CE_state->scn;
231 
232 	QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
233 
234 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
235 	sw_index = src_ring->sw_index;
236 	write_index = src_ring->write_index;
237 
238 	if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >=
239 	    num_items) {
240 		struct ce_sendlist_item *item;
241 		int i;
242 
243 		/* handle all but the last item uniformly */
244 		for (i = 0; i < num_items - 1; i++) {
245 			item = &sl->item[i];
246 			/* TBDXXX: Support extensible sendlist_types? */
247 			QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
248 			status = ce_send_nolock_srng(copyeng,
249 					CE_SENDLIST_ITEM_CTXT,
250 				(qdf_dma_addr_t) item->data,
251 				item->u.nbytes, transfer_id,
252 				item->flags | CE_SEND_FLAG_GATHER,
253 				item->user_flags);
254 			QDF_ASSERT(status == QDF_STATUS_SUCCESS);
255 		}
256 		/* provide valid context pointer for final item */
257 		item = &sl->item[i];
258 		/* TBDXXX: Support extensible sendlist_types? */
259 		QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
260 		status = ce_send_nolock_srng(copyeng, per_transfer_context,
261 					(qdf_dma_addr_t) item->data,
262 					item->u.nbytes,
263 					transfer_id, item->flags,
264 					item->user_flags);
265 		QDF_ASSERT(status == QDF_STATUS_SUCCESS);
266 		QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
267 					QDF_NBUF_TX_PKT_CE);
268 		DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
269 			QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
270 			QDF_TRACE_DEFAULT_PDEV_ID,
271 			(uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
272 			sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX));
273 	} else {
274 		/*
275 		 * Probably not worth the additional complexity to support
276 		 * partial sends with continuation or notification.  We expect
277 		 * to use large rings and small sendlists. If we can't handle
278 		 * the entire request at once, punt it back to the caller.
279 		 */
280 	}
281 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
282 
283 	return status;
284 }
285 
286 #define SLOTS_PER_DATAPATH_TX 2
287 
288 #ifndef AH_NEED_TX_DATA_SWAP
289 #define AH_NEED_TX_DATA_SWAP 0
290 #endif
291 /**
292  * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine
293  * @coyeng: copy engine handle
294  * @per_recv_context: virtual address of the nbuf
295  * @buffer: physical address of the nbuf
296  *
297  * Return: 0 if the buffer is enqueued
298  */
299 static int
300 ce_recv_buf_enqueue_srng(struct CE_handle *copyeng,
301 		    void *per_recv_context, qdf_dma_addr_t buffer)
302 {
303 	int status;
304 	struct CE_state *CE_state = (struct CE_state *)copyeng;
305 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
306 	unsigned int nentries_mask = dest_ring->nentries_mask;
307 	unsigned int write_index;
308 	unsigned int sw_index;
309 	uint64_t dma_addr = buffer;
310 	struct hif_softc *scn = CE_state->scn;
311 	struct ce_srng_dest_desc *dest_desc = NULL;
312 
313 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
314 	write_index = dest_ring->write_index;
315 	sw_index = dest_ring->sw_index;
316 
317 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
318 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
319 		return -EIO;
320 	}
321 
322 	if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) {
323 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
324 		return QDF_STATUS_E_FAILURE;
325 	}
326 
327 	if ((hal_srng_src_num_avail(scn->hal_soc,
328 					dest_ring->srng_ctx, false) > 0)) {
329 		dest_desc = hal_srng_src_get_next(scn->hal_soc,
330 						  dest_ring->srng_ctx);
331 
332 		if (!dest_desc) {
333 			status = QDF_STATUS_E_FAILURE;
334 		} else {
335 
336 			CE_ADDR_COPY(dest_desc, dma_addr);
337 
338 			dest_ring->per_transfer_context[write_index] =
339 				per_recv_context;
340 
341 			/* Update Destination Ring Write Index */
342 			write_index = CE_RING_IDX_INCR(nentries_mask,
343 								write_index);
344 			status = QDF_STATUS_SUCCESS;
345 		}
346 	} else {
347 		dest_desc = NULL;
348 		status = QDF_STATUS_E_FAILURE;
349 	}
350 
351 	dest_ring->write_index = write_index;
352 	hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx);
353 	hif_record_ce_srng_desc_event(scn, CE_state->id,
354 				      HIF_CE_DEST_RING_BUFFER_POST,
355 				      (union ce_srng_desc *)dest_desc,
356 				      per_recv_context,
357 				      dest_ring->write_index, 0,
358 				      dest_ring->srng_ctx);
359 
360 	Q_TARGET_ACCESS_END(scn);
361 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
362 	return status;
363 }
364 
365 /*
366  * Guts of ce_recv_entries_done.
367  * The caller takes responsibility for any necessary locking.
368  */
369 static unsigned int
370 ce_recv_entries_done_nolock_srng(struct hif_softc *scn,
371 			    struct CE_state *CE_state)
372 {
373 	struct CE_ring_state *status_ring = CE_state->status_ring;
374 
375 	return hal_srng_dst_num_valid(scn->hal_soc,
376 				status_ring->srng_ctx, false);
377 }
378 
379 /*
380  * Guts of ce_send_entries_done.
381  * The caller takes responsibility for any necessary locking.
382  */
383 static unsigned int
384 ce_send_entries_done_nolock_srng(struct hif_softc *scn,
385 					struct CE_state *CE_state)
386 {
387 
388 	struct CE_ring_state *src_ring = CE_state->src_ring;
389 	int count = 0;
390 
391 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx))
392 		return 0;
393 
394 	count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx);
395 
396 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
397 
398 	return count;
399 }
400 
401 /*
402  * Guts of ce_completed_recv_next.
403  * The caller takes responsibility for any necessary locking.
404  */
405 static int
406 ce_completed_recv_next_nolock_srng(struct CE_state *CE_state,
407 			      void **per_CE_contextp,
408 			      void **per_transfer_contextp,
409 			      qdf_dma_addr_t *bufferp,
410 			      unsigned int *nbytesp,
411 			      unsigned int *transfer_idp,
412 			      unsigned int *flagsp)
413 {
414 	int status;
415 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
416 	struct CE_ring_state *status_ring = CE_state->status_ring;
417 	unsigned int nentries_mask = dest_ring->nentries_mask;
418 	unsigned int sw_index = dest_ring->sw_index;
419 	struct hif_softc *scn = CE_state->scn;
420 	struct ce_srng_dest_status_desc *dest_status = NULL;
421 	int nbytes;
422 	struct ce_srng_dest_status_desc dest_status_info;
423 
424 	if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx)) {
425 		status = QDF_STATUS_E_FAILURE;
426 		goto done;
427 	}
428 
429 	dest_status = hal_srng_dst_peek(scn->hal_soc, status_ring->srng_ctx);
430 	if (!dest_status) {
431 		status = QDF_STATUS_E_FAILURE;
432 		hal_srng_access_end_reap(scn->hal_soc, status_ring->srng_ctx);
433 		goto done;
434 	}
435 
436 	/*
437 	 * By copying the dest_desc_info element to local memory, we could
438 	 * avoid extra memory read from non-cachable memory.
439 	 */
440 	dest_status_info = *dest_status;
441 	nbytes = dest_status_info.nbytes;
442 	if (nbytes == 0) {
443 		uint32_t hp, tp;
444 
445 		/*
446 		 * This closes a relatively unusual race where the Host
447 		 * sees the updated DRRI before the update to the
448 		 * corresponding descriptor has completed. We treat this
449 		 * as a descriptor that is not yet done.
450 		 */
451 		hal_get_sw_hptp(scn->hal_soc, status_ring->srng_ctx,
452 				&hp, &tp);
453 		hif_info("No data to reap, hp %d tp %d", hp, tp);
454 		status = QDF_STATUS_E_FAILURE;
455 		hal_srng_access_end_reap(scn->hal_soc, status_ring->srng_ctx);
456 		goto done;
457 	}
458 
459 	/*
460 	 * Move the tail pointer since nbytes is non-zero and
461 	 * this entry is processed.
462 	 */
463 	hal_srng_dst_get_next(scn->hal_soc, status_ring->srng_ctx);
464 
465 	dest_status->nbytes = 0;
466 
467 	*nbytesp = nbytes;
468 	*transfer_idp = dest_status_info.meta_data;
469 	*flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
470 
471 	if (per_CE_contextp)
472 		*per_CE_contextp = CE_state->recv_context;
473 
474 	/* NOTE: sw_index is more like a read_index in this context. It has a
475 	 * one-to-one mapping with status ring.
476 	 * Get the per trasnfer context from dest_ring.
477 	 */
478 	if (per_transfer_contextp)
479 		*per_transfer_contextp =
480 			dest_ring->per_transfer_context[sw_index];
481 
482 	dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
483 
484 	/* Update sw_index */
485 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
486 	dest_ring->sw_index = sw_index;
487 	status = QDF_STATUS_SUCCESS;
488 
489 	hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx);
490 	hif_record_ce_srng_desc_event(scn, CE_state->id,
491 				      HIF_CE_DEST_RING_BUFFER_REAP,
492 				      NULL,
493 				      dest_ring->
494 				      per_transfer_context[sw_index],
495 				      dest_ring->sw_index, nbytes,
496 				      dest_ring->srng_ctx);
497 
498 done:
499 	hif_record_ce_srng_desc_event(scn, CE_state->id,
500 				      HIF_CE_DEST_STATUS_RING_REAP,
501 				      (union ce_srng_desc *)dest_status,
502 				      NULL,
503 				      -1, 0,
504 				      status_ring->srng_ctx);
505 
506 	return status;
507 }
508 
509 static QDF_STATUS
510 ce_revoke_recv_next_srng(struct CE_handle *copyeng,
511 		    void **per_CE_contextp,
512 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
513 {
514 	struct CE_state *CE_state = (struct CE_state *)copyeng;
515 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
516 	unsigned int sw_index;
517 
518 	if (!dest_ring)
519 		return QDF_STATUS_E_FAILURE;
520 
521 	sw_index = dest_ring->sw_index;
522 
523 	if (per_CE_contextp)
524 		*per_CE_contextp = CE_state->recv_context;
525 
526 	/* NOTE: sw_index is more like a read_index in this context. It has a
527 	 * one-to-one mapping with status ring.
528 	 * Get the per trasnfer context from dest_ring.
529 	 */
530 	if (per_transfer_contextp)
531 		*per_transfer_contextp =
532 			dest_ring->per_transfer_context[sw_index];
533 
534 	if (!dest_ring->per_transfer_context[sw_index])
535 		return QDF_STATUS_E_FAILURE;
536 
537 	/* provide end condition */
538 	dest_ring->per_transfer_context[sw_index] = NULL;
539 
540 	/* Update sw_index */
541 	sw_index = CE_RING_IDX_INCR(dest_ring->nentries_mask, sw_index);
542 	dest_ring->sw_index = sw_index;
543 	return QDF_STATUS_SUCCESS;
544 }
545 
546 /*
547  * Guts of ce_completed_send_next.
548  * The caller takes responsibility for any necessary locking.
549  */
550 static int
551 ce_completed_send_next_nolock_srng(struct CE_state *CE_state,
552 			      void **per_CE_contextp,
553 			      void **per_transfer_contextp,
554 			      qdf_dma_addr_t *bufferp,
555 			      unsigned int *nbytesp,
556 			      unsigned int *transfer_idp,
557 			      unsigned int *sw_idx,
558 			      unsigned int *hw_idx,
559 			      uint32_t *toeplitz_hash_result)
560 {
561 	int status = QDF_STATUS_E_FAILURE;
562 	struct CE_ring_state *src_ring = CE_state->src_ring;
563 	unsigned int nentries_mask = src_ring->nentries_mask;
564 	unsigned int sw_index = src_ring->sw_index;
565 	unsigned int swi = src_ring->sw_index;
566 	struct hif_softc *scn = CE_state->scn;
567 	struct ce_srng_src_desc *src_desc;
568 
569 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
570 		status = QDF_STATUS_E_FAILURE;
571 		return status;
572 	}
573 
574 	src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx);
575 	if (src_desc) {
576 		hif_record_ce_srng_desc_event(scn, CE_state->id,
577 					      HIF_TX_DESC_COMPLETION,
578 					      (union ce_srng_desc *)src_desc,
579 					      src_ring->
580 					      per_transfer_context[swi],
581 					      swi, src_desc->nbytes,
582 					      src_ring->srng_ctx);
583 
584 		/* Return data from completed source descriptor */
585 		*bufferp = (qdf_dma_addr_t)
586 			(((uint64_t)(src_desc)->buffer_addr_lo +
587 			  ((uint64_t)((src_desc)->buffer_addr_hi &
588 				  0xFF) << 32)));
589 		*nbytesp = src_desc->nbytes;
590 		*transfer_idp = src_desc->meta_data;
591 		*toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
592 
593 		if (per_CE_contextp)
594 			*per_CE_contextp = CE_state->send_context;
595 
596 		/* sw_index is used more like read index */
597 		if (per_transfer_contextp)
598 			*per_transfer_contextp =
599 				src_ring->per_transfer_context[sw_index];
600 
601 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
602 
603 		/* Update sw_index */
604 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
605 		src_ring->sw_index = sw_index;
606 		status = QDF_STATUS_SUCCESS;
607 	}
608 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
609 
610 	return status;
611 }
612 
613 /* NB: Modelled after ce_completed_send_next */
614 static QDF_STATUS
615 ce_cancel_send_next_srng(struct CE_handle *copyeng,
616 		void **per_CE_contextp,
617 		void **per_transfer_contextp,
618 		qdf_dma_addr_t *bufferp,
619 		unsigned int *nbytesp,
620 		unsigned int *transfer_idp,
621 		uint32_t *toeplitz_hash_result)
622 {
623 	struct CE_state *CE_state;
624 	int status = QDF_STATUS_E_FAILURE;
625 	struct CE_ring_state *src_ring;
626 	unsigned int nentries_mask;
627 	unsigned int sw_index;
628 	struct hif_softc *scn;
629 	struct ce_srng_src_desc *src_desc;
630 
631 	CE_state = (struct CE_state *)copyeng;
632 	src_ring = CE_state->src_ring;
633 	if (!src_ring)
634 		return QDF_STATUS_E_FAILURE;
635 
636 	nentries_mask = src_ring->nentries_mask;
637 	sw_index = src_ring->sw_index;
638 	scn = CE_state->scn;
639 
640 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
641 		status = QDF_STATUS_E_FAILURE;
642 		return status;
643 	}
644 
645 	src_desc = hal_srng_src_pending_reap_next(scn->hal_soc,
646 			src_ring->srng_ctx);
647 	if (src_desc) {
648 		/* Return data from completed source descriptor */
649 		*bufferp = (qdf_dma_addr_t)
650 			(((uint64_t)(src_desc)->buffer_addr_lo +
651 			  ((uint64_t)((src_desc)->buffer_addr_hi &
652 				  0xFF) << 32)));
653 		*nbytesp = src_desc->nbytes;
654 		*transfer_idp = src_desc->meta_data;
655 		*toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
656 
657 		if (per_CE_contextp)
658 			*per_CE_contextp = CE_state->send_context;
659 
660 		/* sw_index is used more like read index */
661 		if (per_transfer_contextp)
662 			*per_transfer_contextp =
663 				src_ring->per_transfer_context[sw_index];
664 
665 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
666 
667 		/* Update sw_index */
668 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
669 		src_ring->sw_index = sw_index;
670 		status = QDF_STATUS_SUCCESS;
671 	}
672 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
673 
674 	return status;
675 }
676 
677 /*
678  * Adjust interrupts for the copy complete handler.
679  * If it's needed for either send or recv, then unmask
680  * this interrupt; otherwise, mask it.
681  *
682  * Called with target_lock held.
683  */
684 static void
685 ce_per_engine_handler_adjust_srng(struct CE_state *CE_state,
686 			     int disable_copy_compl_intr)
687 {
688 }
689 
690 static bool ce_check_int_watermark_srng(struct CE_state *CE_state,
691 					unsigned int *flags)
692 {
693 	/*TODO*/
694 	return false;
695 }
696 
697 static uint32_t ce_get_desc_size_srng(uint8_t ring_type)
698 {
699 	switch (ring_type) {
700 	case CE_RING_SRC:
701 		return sizeof(struct ce_srng_src_desc);
702 	case CE_RING_DEST:
703 		return sizeof(struct ce_srng_dest_desc);
704 	case CE_RING_STATUS:
705 		return sizeof(struct ce_srng_dest_status_desc);
706 	default:
707 		return 0;
708 	}
709 	return 0;
710 }
711 
712 static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id,
713 			      struct hal_srng_params *ring_params)
714 {
715 	uint32_t addr_low;
716 	uint32_t addr_high;
717 	uint32_t msi_data_start;
718 	uint32_t msi_data_count;
719 	uint32_t msi_irq_start;
720 	int ret;
721 
722 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
723 					  &msi_data_count, &msi_data_start,
724 					  &msi_irq_start);
725 
726 	/* msi config not found */
727 	if (ret)
728 		return;
729 
730 	pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high);
731 
732 	ring_params->msi_addr = addr_low;
733 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
734 	ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start;
735 	ring_params->flags |= HAL_SRNG_MSI_INTR;
736 
737 	HIF_DBG("%s: ce_id %d, msi_addr %pK, msi_data %d", __func__, ce_id,
738 		  (void *)ring_params->msi_addr, ring_params->msi_data);
739 }
740 
741 static void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
742 				   struct CE_ring_state *src_ring,
743 				   struct CE_attr *attr)
744 {
745 	struct hal_srng_params ring_params = {0};
746 
747 	hif_debug("%s: ce_id %d", __func__, ce_id);
748 
749 	ring_params.ring_base_paddr = src_ring->base_addr_CE_space;
750 	ring_params.ring_base_vaddr = src_ring->base_addr_owner_space;
751 	ring_params.num_entries = src_ring->nentries;
752 	/*
753 	 * The minimum increment for the timer is 8us
754 	 * A default value of 0 disables the timer
755 	 * A valid default value caused continuous interrupts to
756 	 * fire with MSI enabled. Need to revisit usage of the timer
757 	 */
758 
759 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
760 		ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
761 
762 		ring_params.intr_timer_thres_us = 0;
763 		ring_params.intr_batch_cntr_thres_entries = 1;
764 		ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER;
765 	}
766 
767 	src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0,
768 			&ring_params);
769 }
770 
771 /**
772  * ce_srng_initialize_dest_timer_interrupt_war() - war initialization
773  * @dest_ring: ring being initialized
774  * @ring_params: pointer to initialized parameters
775  *
776  * For Napier & Hawkeye v1, the status ring timer interrupts do not work
777  * As a work arround host configures the destination rings to be a proxy for
778  * work needing to be done.
779  *
780  * The interrupts are setup such that if the destination ring is less than fully
781  * posted, there is likely undone work for the status ring that the host should
782  * process.
783  *
784  * There is a timing bug in srng based copy engines such that a fully posted
785  * srng based copy engine has 2 empty entries instead of just one.  The copy
786  * engine data sturctures work with 1 empty entry, but the software frequently
787  * fails to post the last entry due to the race condition.
788  */
789 static void ce_srng_initialize_dest_timer_interrupt_war(
790 					struct CE_ring_state *dest_ring,
791 					struct hal_srng_params *ring_params)
792 {
793 	int num_buffers_when_fully_posted = dest_ring->nentries - 2;
794 
795 	ring_params->low_threshold = num_buffers_when_fully_posted - 1;
796 	ring_params->intr_timer_thres_us = 1024;
797 	ring_params->intr_batch_cntr_thres_entries = 0;
798 	ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
799 }
800 
801 static void ce_srng_dest_ring_setup(struct hif_softc *scn,
802 				    uint32_t ce_id,
803 				    struct CE_ring_state *dest_ring,
804 				    struct CE_attr *attr)
805 {
806 	struct hal_srng_params ring_params = {0};
807 	bool status_ring_timer_thresh_work_arround = true;
808 
809 	HIF_INFO("%s: ce_id %d", __func__, ce_id);
810 
811 	ring_params.ring_base_paddr = dest_ring->base_addr_CE_space;
812 	ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space;
813 	ring_params.num_entries = dest_ring->nentries;
814 	ring_params.max_buffer_length = attr->src_sz_max;
815 
816 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
817 		ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
818 		if (status_ring_timer_thresh_work_arround) {
819 			ce_srng_initialize_dest_timer_interrupt_war(
820 					dest_ring, &ring_params);
821 		} else {
822 			/* normal behavior for future chips */
823 			ring_params.low_threshold = dest_ring->nentries >> 3;
824 			ring_params.intr_timer_thres_us = 100000;
825 			ring_params.intr_batch_cntr_thres_entries = 0;
826 			ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
827 		}
828 		ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER;
829 	}
830 
831 	/*Dest ring is also source ring*/
832 	dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0,
833 			&ring_params);
834 }
835 
836 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
837 /**
838  * ce_status_ring_config_int_threshold() - configure ce status ring interrupt
839  *                                         thresholds
840  * @scn: hif handle
841  * @ring_params: ce srng params
842  *
843  * Return: None
844  */
845 static inline
846 void ce_status_ring_config_int_threshold(struct hif_softc *scn,
847 					 struct hal_srng_params *ring_params)
848 {
849 	ring_params->intr_timer_thres_us =
850 			scn->ini_cfg.ce_status_ring_timer_threshold;
851 	ring_params->intr_batch_cntr_thres_entries =
852 			scn->ini_cfg.ce_status_ring_batch_count_threshold;
853 }
854 #else
855 static inline
856 void ce_status_ring_config_int_threshold(struct hif_softc *scn,
857 					 struct hal_srng_params *ring_params)
858 {
859 	ring_params->intr_timer_thres_us = 0x1000;
860 	ring_params->intr_batch_cntr_thres_entries = 0x1;
861 }
862 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
863 
864 static void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id,
865 				struct CE_ring_state *status_ring,
866 				struct CE_attr *attr)
867 {
868 	struct hal_srng_params ring_params = {0};
869 
870 	HIF_INFO("%s: ce_id %d", __func__, ce_id);
871 
872 	ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
873 
874 	ring_params.ring_base_paddr = status_ring->base_addr_CE_space;
875 	ring_params.ring_base_vaddr = status_ring->base_addr_owner_space;
876 	ring_params.num_entries = status_ring->nentries;
877 
878 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
879 		ce_status_ring_config_int_threshold(scn, &ring_params);
880 	}
881 
882 	status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS,
883 			ce_id, 0, &ring_params);
884 }
885 
886 static int ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type,
887 		uint32_t ce_id, struct CE_ring_state *ring,
888 		struct CE_attr *attr)
889 {
890 	switch (ring_type) {
891 	case CE_RING_SRC:
892 		ce_srng_src_ring_setup(scn, ce_id, ring, attr);
893 		break;
894 	case CE_RING_DEST:
895 		ce_srng_dest_ring_setup(scn, ce_id, ring, attr);
896 		break;
897 	case CE_RING_STATUS:
898 		ce_srng_status_ring_setup(scn, ce_id, ring, attr);
899 		break;
900 	default:
901 		qdf_assert(0);
902 		break;
903 	}
904 
905 	return 0;
906 }
907 
908 static void ce_construct_shadow_config_srng(struct hif_softc *scn)
909 {
910 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
911 	int ce_id;
912 
913 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
914 		if (hif_state->host_ce_config[ce_id].src_nentries)
915 			hal_set_one_shadow_config(scn->hal_soc,
916 						  CE_SRC, ce_id);
917 
918 		if (hif_state->host_ce_config[ce_id].dest_nentries) {
919 			hal_set_one_shadow_config(scn->hal_soc,
920 						  CE_DST, ce_id);
921 
922 			hal_set_one_shadow_config(scn->hal_soc,
923 						  CE_DST_STATUS, ce_id);
924 		}
925 	}
926 }
927 
928 static void ce_prepare_shadow_register_v2_cfg_srng(struct hif_softc *scn,
929 		struct pld_shadow_reg_v2_cfg **shadow_config,
930 		int *num_shadow_registers_configured)
931 {
932 	if (!scn->hal_soc) {
933 		HIF_ERROR("%s: hal not initialized: not initializing shadow config",
934 			  __func__);
935 		return;
936 	}
937 
938 	hal_get_shadow_config(scn->hal_soc, shadow_config,
939 			      num_shadow_registers_configured);
940 
941 	if (*num_shadow_registers_configured != 0) {
942 		HIF_ERROR("%s: hal shadow register configuration allready constructed",
943 			  __func__);
944 
945 		/* return with original configuration*/
946 		return;
947 	}
948 
949 	hal_construct_shadow_config(scn->hal_soc);
950 	ce_construct_shadow_config_srng(scn);
951 
952 	/* get updated configuration */
953 	hal_get_shadow_config(scn->hal_soc, shadow_config,
954 			      num_shadow_registers_configured);
955 }
956 
957 #ifdef HIF_CE_LOG_INFO
958 /**
959  * ce_get_index_info_srng(): Get CE index info
960  * @scn: HIF Context
961  * @ce_state: CE opaque handle
962  * @info: CE info
963  *
964  * Return: 0 for success and non zero for failure
965  */
966 static
967 int ce_get_index_info_srng(struct hif_softc *scn, void *ce_state,
968 			   struct ce_index *info)
969 {
970 	struct CE_state *CE_state = (struct CE_state *)ce_state;
971 	uint32_t tp, hp;
972 
973 	info->id = CE_state->id;
974 	if (CE_state->src_ring) {
975 		hal_get_sw_hptp(scn->hal_soc, CE_state->src_ring->srng_ctx,
976 				&tp, &hp);
977 		info->u.srng_info.tp = tp;
978 		info->u.srng_info.hp = hp;
979 	} else if (CE_state->dest_ring && CE_state->status_ring) {
980 		hal_get_sw_hptp(scn->hal_soc, CE_state->status_ring->srng_ctx,
981 				&tp, &hp);
982 		info->u.srng_info.status_tp = tp;
983 		info->u.srng_info.status_hp = hp;
984 		hal_get_sw_hptp(scn->hal_soc, CE_state->dest_ring->srng_ctx,
985 				&tp, &hp);
986 		info->u.srng_info.tp = tp;
987 		info->u.srng_info.hp = hp;
988 	}
989 
990 	return 0;
991 }
992 #endif
993 
994 static struct ce_ops ce_service_srng = {
995 	.ce_get_desc_size = ce_get_desc_size_srng,
996 	.ce_ring_setup = ce_ring_setup_srng,
997 	.ce_sendlist_send = ce_sendlist_send_srng,
998 	.ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng,
999 	.ce_revoke_recv_next = ce_revoke_recv_next_srng,
1000 	.ce_cancel_send_next = ce_cancel_send_next_srng,
1001 	.ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng,
1002 	.ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng,
1003 	.ce_send_nolock = ce_send_nolock_srng,
1004 	.watermark_int = ce_check_int_watermark_srng,
1005 	.ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng,
1006 	.ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng,
1007 	.ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng,
1008 	.ce_prepare_shadow_register_v2_cfg =
1009 		ce_prepare_shadow_register_v2_cfg_srng,
1010 #ifdef HIF_CE_LOG_INFO
1011 	.ce_get_index_info =
1012 		ce_get_index_info_srng,
1013 #endif
1014 };
1015 
1016 struct ce_ops *ce_services_srng()
1017 {
1018 	return &ce_service_srng;
1019 }
1020 qdf_export_symbol(ce_services_srng);
1021 
1022 void ce_service_srng_init(void)
1023 {
1024 	ce_service_register_module(CE_SVC_SRNG, &ce_services_srng);
1025 }
1026