xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service_srng.c (revision f7586e623efa756e484a12a6c70ae6864eb1c1a2)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #include "hif_io32.h"
20 #include "reg_struct.h"
21 #include "ce_api.h"
22 #include "ce_main.h"
23 #include "ce_internal.h"
24 #include "ce_reg.h"
25 #include "qdf_lock.h"
26 #include "regtable.h"
27 #include "hif_main.h"
28 #include "hif_debug.h"
29 #include "hal_api.h"
30 #include "pld_common.h"
31 #include "qdf_module.h"
32 #include "hif.h"
33 
34 /*
35  * Support for Copy Engine hardware, which is mainly used for
36  * communication between Host and Target over a PCIe interconnect.
37  */
38 
39 /*
40  * A single CopyEngine (CE) comprises two "rings":
41  *   a source ring
42  *   a destination ring
43  *
44  * Each ring consists of a number of descriptors which specify
45  * an address, length, and meta-data.
46  *
47  * Typically, one side of the PCIe interconnect (Host or Target)
48  * controls one ring and the other side controls the other ring.
49  * The source side chooses when to initiate a transfer and it
50  * chooses what to send (buffer address, length). The destination
51  * side keeps a supply of "anonymous receive buffers" available and
52  * it handles incoming data as it arrives (when the destination
53  * receives an interrupt).
54  *
55  * The sender may send a simple buffer (address/length) or it may
56  * send a small list of buffers.  When a small list is sent, hardware
57  * "gathers" these and they end up in a single destination buffer
58  * with a single interrupt.
59  *
60  * There are several "contexts" managed by this layer -- more, it
61  * may seem -- than should be needed. These are provided mainly for
62  * maximum flexibility and especially to facilitate a simpler HIF
63  * implementation. There are per-CopyEngine recv, send, and watermark
64  * contexts. These are supplied by the caller when a recv, send,
65  * or watermark handler is established and they are echoed back to
66  * the caller when the respective callbacks are invoked. There is
67  * also a per-transfer context supplied by the caller when a buffer
68  * (or sendlist) is sent and when a buffer is enqueued for recv.
69  * These per-transfer contexts are echoed back to the caller when
70  * the buffer is sent/received.
71  * Target TX harsh result toeplitz_hash_result
72  */
73 
74 #define CE_ADDR_COPY(desc, dma_addr) do {\
75 		(desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\
76 							  0xFFFFFFFF);\
77 		(desc)->buffer_addr_hi =\
78 			(uint32_t)(((dma_addr) >> 32) & 0xFF);\
79 	} while (0)
80 
81 void hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc *hif_ctx)
82 {
83 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
84 	struct CE_state *CE_state;
85 	uint32_t hp = 0, tp = 0;
86 
87 	CE_state = scn->ce_id_to_state[2];
88 	hal_get_sw_hptp(scn->hal_soc,
89 			CE_state->status_ring->srng_ctx,
90 			&tp, &hp);
91 	hif_info_high("CE-2 Dest status ring current snapshot HP:%u TP:%u",
92 		      hp, tp);
93 
94 	hp = 0;
95 	tp = 0;
96 	CE_state = scn->ce_id_to_state[3];
97 	hal_get_sw_hptp(scn->hal_soc, CE_state->src_ring->srng_ctx, &tp, &hp);
98 	hif_info_high("CE-3 Source ring current snapshot HP:%u TP:%u", hp, tp);
99 }
100 
101 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
102 void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
103 				   enum hif_ce_event_type type,
104 				   union ce_srng_desc *descriptor,
105 				   void *memory, int index,
106 				   int len, void *hal_ring)
107 {
108 	int record_index;
109 	struct hif_ce_desc_event *event;
110 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
111 	struct hif_ce_desc_event *hist_ev = NULL;
112 
113 	if (ce_id < CE_COUNT_MAX)
114 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
115 	else
116 		return;
117 
118 	if (ce_id >= CE_COUNT_MAX)
119 		return;
120 
121 	if (!ce_hist->enable[ce_id])
122 		return;
123 
124 	if (!hist_ev)
125 		return;
126 
127 	record_index = get_next_record_index(
128 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
129 
130 	event = &hist_ev[record_index];
131 
132 	hif_clear_ce_desc_debug_data(event);
133 
134 	event->type = type;
135 	event->time = qdf_get_log_timestamp();
136 	event->cpu_id = qdf_get_cpu();
137 
138 	if (descriptor)
139 		qdf_mem_copy(&event->descriptor, descriptor,
140 			     hal_get_entrysize_from_srng(hal_ring));
141 
142 	if (hal_ring)
143 		hal_get_sw_hptp(scn->hal_soc, hal_ring, &event->current_tp,
144 				&event->current_hp);
145 
146 	event->memory = memory;
147 	event->index = index;
148 
149 	if (event->type == HIF_CE_SRC_RING_BUFFER_POST)
150 		hif_ce_desc_record_rx_paddr(scn, event, memory);
151 
152 	if (ce_hist->data_enable[ce_id])
153 		hif_ce_desc_data_record(event, len);
154 
155 	hif_record_latest_evt(ce_hist, type, ce_id, event->time,
156 			      event->current_hp, event->current_tp);
157 }
158 #endif /* HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
159 
160 static QDF_STATUS
161 ce_send_nolock_srng(struct CE_handle *copyeng,
162 			   void *per_transfer_context,
163 			   qdf_dma_addr_t buffer,
164 			   uint32_t nbytes,
165 			   uint32_t transfer_id,
166 			   uint32_t flags,
167 			   uint32_t user_flags)
168 {
169 	QDF_STATUS status;
170 	struct CE_state *CE_state = (struct CE_state *)copyeng;
171 	struct CE_ring_state *src_ring = CE_state->src_ring;
172 	unsigned int nentries_mask = src_ring->nentries_mask;
173 	unsigned int write_index = src_ring->write_index;
174 	uint64_t dma_addr = buffer;
175 	struct hif_softc *scn = CE_state->scn;
176 
177 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
178 		return QDF_STATUS_E_FAILURE;
179 	if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx,
180 					false) <= 0)) {
181 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
182 		Q_TARGET_ACCESS_END(scn);
183 		return QDF_STATUS_E_FAILURE;
184 	}
185 	{
186 		enum hif_ce_event_type event_type = HIF_CE_SRC_RING_BUFFER_POST;
187 		struct ce_srng_src_desc *src_desc;
188 
189 		if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
190 			Q_TARGET_ACCESS_END(scn);
191 			return QDF_STATUS_E_FAILURE;
192 		}
193 
194 		src_desc = hal_srng_src_get_next_reaped(scn->hal_soc,
195 				src_ring->srng_ctx);
196 		if (!src_desc) {
197 			Q_TARGET_ACCESS_END(scn);
198 			return QDF_STATUS_E_INVAL;
199 		}
200 
201 		/* Update low 32 bits source descriptor address */
202 		src_desc->buffer_addr_lo =
203 			(uint32_t)(dma_addr & 0xFFFFFFFF);
204 		src_desc->buffer_addr_hi =
205 			(uint32_t)((dma_addr >> 32) & 0xFF);
206 
207 		src_desc->meta_data = transfer_id;
208 
209 		/*
210 		 * Set the swap bit if:
211 		 * typical sends on this CE are swapped (host is big-endian)
212 		 * and this send doesn't disable the swapping
213 		 * (data is not bytestream)
214 		 */
215 		src_desc->byte_swap =
216 			(((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
217 			  != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
218 		src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
219 		src_desc->nbytes = nbytes;
220 
221 		src_ring->per_transfer_context[write_index] =
222 			per_transfer_context;
223 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
224 
225 		hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
226 
227 		/* src_ring->write index hasn't been updated event though
228 		 * the register has allready been written to.
229 		 */
230 		hif_record_ce_srng_desc_event(scn, CE_state->id, event_type,
231 					      (union ce_srng_desc *)src_desc,
232 					      per_transfer_context,
233 					      src_ring->write_index, nbytes,
234 					      src_ring->srng_ctx);
235 
236 		src_ring->write_index = write_index;
237 		status = QDF_STATUS_SUCCESS;
238 	}
239 	Q_TARGET_ACCESS_END(scn);
240 	return status;
241 }
242 
243 static QDF_STATUS
244 ce_sendlist_send_srng(struct CE_handle *copyeng,
245 		 void *per_transfer_context,
246 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
247 {
248 	QDF_STATUS status = QDF_STATUS_E_NOMEM;
249 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
250 	struct CE_state *CE_state = (struct CE_state *)copyeng;
251 	struct CE_ring_state *src_ring = CE_state->src_ring;
252 	unsigned int num_items = sl->num_items;
253 	unsigned int sw_index;
254 	unsigned int write_index;
255 	struct hif_softc *scn = CE_state->scn;
256 
257 	QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
258 
259 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
260 	sw_index = src_ring->sw_index;
261 	write_index = src_ring->write_index;
262 
263 	if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >=
264 	    num_items) {
265 		struct ce_sendlist_item *item;
266 		int i;
267 
268 		/* handle all but the last item uniformly */
269 		for (i = 0; i < num_items - 1; i++) {
270 			item = &sl->item[i];
271 			/* TBDXXX: Support extensible sendlist_types? */
272 			QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
273 			status = ce_send_nolock_srng(copyeng,
274 					CE_SENDLIST_ITEM_CTXT,
275 				(qdf_dma_addr_t) item->data,
276 				item->u.nbytes, transfer_id,
277 				item->flags | CE_SEND_FLAG_GATHER,
278 				item->user_flags);
279 			QDF_ASSERT(status == QDF_STATUS_SUCCESS);
280 		}
281 		/* provide valid context pointer for final item */
282 		item = &sl->item[i];
283 		/* TBDXXX: Support extensible sendlist_types? */
284 		QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
285 		status = ce_send_nolock_srng(copyeng, per_transfer_context,
286 					(qdf_dma_addr_t) item->data,
287 					item->u.nbytes,
288 					transfer_id, item->flags,
289 					item->user_flags);
290 		QDF_ASSERT(status == QDF_STATUS_SUCCESS);
291 		QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
292 					QDF_NBUF_TX_PKT_CE);
293 		DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
294 			QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
295 			QDF_TRACE_DEFAULT_PDEV_ID,
296 			(uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
297 			sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX));
298 	} else {
299 		/*
300 		 * Probably not worth the additional complexity to support
301 		 * partial sends with continuation or notification.  We expect
302 		 * to use large rings and small sendlists. If we can't handle
303 		 * the entire request at once, punt it back to the caller.
304 		 */
305 	}
306 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
307 
308 	return status;
309 }
310 
311 #define SLOTS_PER_DATAPATH_TX 2
312 
313 #ifndef AH_NEED_TX_DATA_SWAP
314 #define AH_NEED_TX_DATA_SWAP 0
315 #endif
316 /**
317  * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine
318  * @coyeng: copy engine handle
319  * @per_recv_context: virtual address of the nbuf
320  * @buffer: physical address of the nbuf
321  *
322  * Return: QDF_STATUS_SUCCESS if the buffer is enqueued
323  */
324 static QDF_STATUS
325 ce_recv_buf_enqueue_srng(struct CE_handle *copyeng,
326 		    void *per_recv_context, qdf_dma_addr_t buffer)
327 {
328 	QDF_STATUS status;
329 	struct CE_state *CE_state = (struct CE_state *)copyeng;
330 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
331 	unsigned int nentries_mask = dest_ring->nentries_mask;
332 	unsigned int write_index;
333 	unsigned int sw_index;
334 	uint64_t dma_addr = buffer;
335 	struct hif_softc *scn = CE_state->scn;
336 	struct ce_srng_dest_desc *dest_desc = NULL;
337 
338 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
339 	write_index = dest_ring->write_index;
340 	sw_index = dest_ring->sw_index;
341 
342 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
343 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
344 		return QDF_STATUS_E_IO;
345 	}
346 
347 	if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) {
348 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
349 		return QDF_STATUS_E_FAILURE;
350 	}
351 
352 	if ((hal_srng_src_num_avail(scn->hal_soc,
353 					dest_ring->srng_ctx, false) > 0)) {
354 		dest_desc = hal_srng_src_get_next(scn->hal_soc,
355 						  dest_ring->srng_ctx);
356 
357 		if (!dest_desc) {
358 			status = QDF_STATUS_E_FAILURE;
359 		} else {
360 
361 			CE_ADDR_COPY(dest_desc, dma_addr);
362 
363 			dest_ring->per_transfer_context[write_index] =
364 				per_recv_context;
365 
366 			/* Update Destination Ring Write Index */
367 			write_index = CE_RING_IDX_INCR(nentries_mask,
368 								write_index);
369 			status = QDF_STATUS_SUCCESS;
370 		}
371 	} else {
372 		dest_desc = NULL;
373 		status = QDF_STATUS_E_FAILURE;
374 	}
375 
376 	dest_ring->write_index = write_index;
377 	hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx);
378 	hif_record_ce_srng_desc_event(scn, CE_state->id,
379 				      HIF_CE_DEST_RING_BUFFER_POST,
380 				      (union ce_srng_desc *)dest_desc,
381 				      per_recv_context,
382 				      dest_ring->write_index, 0,
383 				      dest_ring->srng_ctx);
384 
385 	Q_TARGET_ACCESS_END(scn);
386 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
387 	return status;
388 }
389 
390 /*
391  * Guts of ce_recv_entries_done.
392  * The caller takes responsibility for any necessary locking.
393  */
394 static unsigned int
395 ce_recv_entries_done_nolock_srng(struct hif_softc *scn,
396 			    struct CE_state *CE_state)
397 {
398 	struct CE_ring_state *status_ring = CE_state->status_ring;
399 
400 	return hal_srng_dst_num_valid(scn->hal_soc,
401 				status_ring->srng_ctx, false);
402 }
403 
404 /*
405  * Guts of ce_send_entries_done.
406  * The caller takes responsibility for any necessary locking.
407  */
408 static unsigned int
409 ce_send_entries_done_nolock_srng(struct hif_softc *scn,
410 					struct CE_state *CE_state)
411 {
412 
413 	struct CE_ring_state *src_ring = CE_state->src_ring;
414 	int count = 0;
415 
416 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx))
417 		return 0;
418 
419 	count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx);
420 
421 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
422 
423 	return count;
424 }
425 
426 /*
427  * Guts of ce_completed_recv_next.
428  * The caller takes responsibility for any necessary locking.
429  */
430 static QDF_STATUS
431 ce_completed_recv_next_nolock_srng(struct CE_state *CE_state,
432 			      void **per_CE_contextp,
433 			      void **per_transfer_contextp,
434 			      qdf_dma_addr_t *bufferp,
435 			      unsigned int *nbytesp,
436 			      unsigned int *transfer_idp,
437 			      unsigned int *flagsp)
438 {
439 	QDF_STATUS status;
440 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
441 	struct CE_ring_state *status_ring = CE_state->status_ring;
442 	unsigned int nentries_mask = dest_ring->nentries_mask;
443 	unsigned int sw_index = dest_ring->sw_index;
444 	struct hif_softc *scn = CE_state->scn;
445 	struct ce_srng_dest_status_desc *dest_status = NULL;
446 	int nbytes;
447 	struct ce_srng_dest_status_desc dest_status_info;
448 
449 	if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx)) {
450 		status = QDF_STATUS_E_FAILURE;
451 		goto done;
452 	}
453 
454 	dest_status = hal_srng_dst_peek(scn->hal_soc, status_ring->srng_ctx);
455 	if (!dest_status) {
456 		status = QDF_STATUS_E_FAILURE;
457 		hal_srng_access_end_reap(scn->hal_soc, status_ring->srng_ctx);
458 		goto done;
459 	}
460 
461 	/*
462 	 * By copying the dest_desc_info element to local memory, we could
463 	 * avoid extra memory read from non-cachable memory.
464 	 */
465 	dest_status_info = *dest_status;
466 	nbytes = dest_status_info.nbytes;
467 	if (nbytes == 0) {
468 		uint32_t hp, tp;
469 
470 		/*
471 		 * This closes a relatively unusual race where the Host
472 		 * sees the updated DRRI before the update to the
473 		 * corresponding descriptor has completed. We treat this
474 		 * as a descriptor that is not yet done.
475 		 */
476 		hal_get_sw_hptp(scn->hal_soc, status_ring->srng_ctx,
477 				&tp, &hp);
478 		hif_info_rl("No data to reap, hp %d tp %d", hp, tp);
479 		status = QDF_STATUS_E_FAILURE;
480 		hal_srng_access_end_reap(scn->hal_soc, status_ring->srng_ctx);
481 		goto done;
482 	}
483 
484 	/*
485 	 * Move the tail pointer since nbytes is non-zero and
486 	 * this entry is processed.
487 	 */
488 	hal_srng_dst_get_next(scn->hal_soc, status_ring->srng_ctx);
489 
490 	dest_status->nbytes = 0;
491 
492 	*nbytesp = nbytes;
493 	*transfer_idp = dest_status_info.meta_data;
494 	*flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
495 
496 	if (per_CE_contextp)
497 		*per_CE_contextp = CE_state->recv_context;
498 
499 	/* NOTE: sw_index is more like a read_index in this context. It has a
500 	 * one-to-one mapping with status ring.
501 	 * Get the per trasnfer context from dest_ring.
502 	 */
503 	if (per_transfer_contextp)
504 		*per_transfer_contextp =
505 			dest_ring->per_transfer_context[sw_index];
506 
507 	dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
508 
509 	/* Update sw_index */
510 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
511 	dest_ring->sw_index = sw_index;
512 	status = QDF_STATUS_SUCCESS;
513 
514 	hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx);
515 	hif_record_ce_srng_desc_event(scn, CE_state->id,
516 				      HIF_CE_DEST_RING_BUFFER_REAP,
517 				      NULL,
518 				      dest_ring->
519 				      per_transfer_context[sw_index],
520 				      dest_ring->sw_index, nbytes,
521 				      dest_ring->srng_ctx);
522 
523 done:
524 	hif_record_ce_srng_desc_event(scn, CE_state->id,
525 				      HIF_CE_DEST_STATUS_RING_REAP,
526 				      (union ce_srng_desc *)dest_status,
527 				      NULL,
528 				      -1, 0,
529 				      status_ring->srng_ctx);
530 
531 	return status;
532 }
533 
534 static QDF_STATUS
535 ce_revoke_recv_next_srng(struct CE_handle *copyeng,
536 		    void **per_CE_contextp,
537 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
538 {
539 	struct CE_state *CE_state = (struct CE_state *)copyeng;
540 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
541 	unsigned int sw_index;
542 
543 	if (!dest_ring)
544 		return QDF_STATUS_E_FAILURE;
545 
546 	sw_index = dest_ring->sw_index;
547 
548 	if (per_CE_contextp)
549 		*per_CE_contextp = CE_state->recv_context;
550 
551 	/* NOTE: sw_index is more like a read_index in this context. It has a
552 	 * one-to-one mapping with status ring.
553 	 * Get the per trasnfer context from dest_ring.
554 	 */
555 	if (per_transfer_contextp)
556 		*per_transfer_contextp =
557 			dest_ring->per_transfer_context[sw_index];
558 
559 	if (!dest_ring->per_transfer_context[sw_index])
560 		return QDF_STATUS_E_FAILURE;
561 
562 	/* provide end condition */
563 	dest_ring->per_transfer_context[sw_index] = NULL;
564 
565 	/* Update sw_index */
566 	sw_index = CE_RING_IDX_INCR(dest_ring->nentries_mask, sw_index);
567 	dest_ring->sw_index = sw_index;
568 	return QDF_STATUS_SUCCESS;
569 }
570 
571 /*
572  * Guts of ce_completed_send_next.
573  * The caller takes responsibility for any necessary locking.
574  */
575 static QDF_STATUS
576 ce_completed_send_next_nolock_srng(struct CE_state *CE_state,
577 			      void **per_CE_contextp,
578 			      void **per_transfer_contextp,
579 			      qdf_dma_addr_t *bufferp,
580 			      unsigned int *nbytesp,
581 			      unsigned int *transfer_idp,
582 			      unsigned int *sw_idx,
583 			      unsigned int *hw_idx,
584 			      uint32_t *toeplitz_hash_result)
585 {
586 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
587 	struct CE_ring_state *src_ring = CE_state->src_ring;
588 	unsigned int nentries_mask = src_ring->nentries_mask;
589 	unsigned int sw_index = src_ring->sw_index;
590 	unsigned int swi = src_ring->sw_index;
591 	struct hif_softc *scn = CE_state->scn;
592 	struct ce_srng_src_desc *src_desc;
593 
594 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
595 		status = QDF_STATUS_E_FAILURE;
596 		return status;
597 	}
598 
599 	src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx);
600 	if (src_desc) {
601 		hif_record_ce_srng_desc_event(scn, CE_state->id,
602 					      HIF_TX_DESC_COMPLETION,
603 					      (union ce_srng_desc *)src_desc,
604 					      src_ring->
605 					      per_transfer_context[swi],
606 					      swi, src_desc->nbytes,
607 					      src_ring->srng_ctx);
608 
609 		/* Return data from completed source descriptor */
610 		*bufferp = (qdf_dma_addr_t)
611 			(((uint64_t)(src_desc)->buffer_addr_lo +
612 			  ((uint64_t)((src_desc)->buffer_addr_hi &
613 				  0xFF) << 32)));
614 		*nbytesp = src_desc->nbytes;
615 		*transfer_idp = src_desc->meta_data;
616 		*toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
617 
618 		if (per_CE_contextp)
619 			*per_CE_contextp = CE_state->send_context;
620 
621 		/* sw_index is used more like read index */
622 		if (per_transfer_contextp)
623 			*per_transfer_contextp =
624 				src_ring->per_transfer_context[sw_index];
625 
626 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
627 
628 		/* Update sw_index */
629 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
630 		src_ring->sw_index = sw_index;
631 		status = QDF_STATUS_SUCCESS;
632 	}
633 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
634 
635 	return status;
636 }
637 
638 /* NB: Modelled after ce_completed_send_next */
639 static QDF_STATUS
640 ce_cancel_send_next_srng(struct CE_handle *copyeng,
641 		void **per_CE_contextp,
642 		void **per_transfer_contextp,
643 		qdf_dma_addr_t *bufferp,
644 		unsigned int *nbytesp,
645 		unsigned int *transfer_idp,
646 		uint32_t *toeplitz_hash_result)
647 {
648 	struct CE_state *CE_state;
649 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
650 	struct CE_ring_state *src_ring;
651 	unsigned int nentries_mask;
652 	unsigned int sw_index;
653 	struct hif_softc *scn;
654 	struct ce_srng_src_desc *src_desc;
655 
656 	CE_state = (struct CE_state *)copyeng;
657 	src_ring = CE_state->src_ring;
658 	if (!src_ring)
659 		return QDF_STATUS_E_FAILURE;
660 
661 	nentries_mask = src_ring->nentries_mask;
662 	sw_index = src_ring->sw_index;
663 	scn = CE_state->scn;
664 
665 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
666 		status = QDF_STATUS_E_FAILURE;
667 		return status;
668 	}
669 
670 	src_desc = hal_srng_src_pending_reap_next(scn->hal_soc,
671 			src_ring->srng_ctx);
672 	if (src_desc) {
673 		/* Return data from completed source descriptor */
674 		*bufferp = (qdf_dma_addr_t)
675 			(((uint64_t)(src_desc)->buffer_addr_lo +
676 			  ((uint64_t)((src_desc)->buffer_addr_hi &
677 				  0xFF) << 32)));
678 		*nbytesp = src_desc->nbytes;
679 		*transfer_idp = src_desc->meta_data;
680 		*toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
681 
682 		if (per_CE_contextp)
683 			*per_CE_contextp = CE_state->send_context;
684 
685 		/* sw_index is used more like read index */
686 		if (per_transfer_contextp)
687 			*per_transfer_contextp =
688 				src_ring->per_transfer_context[sw_index];
689 
690 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
691 
692 		/* Update sw_index */
693 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
694 		src_ring->sw_index = sw_index;
695 		status = QDF_STATUS_SUCCESS;
696 	}
697 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
698 
699 	return status;
700 }
701 
702 /*
703  * Adjust interrupts for the copy complete handler.
704  * If it's needed for either send or recv, then unmask
705  * this interrupt; otherwise, mask it.
706  *
707  * Called with target_lock held.
708  */
709 static void
710 ce_per_engine_handler_adjust_srng(struct CE_state *CE_state,
711 			     int disable_copy_compl_intr)
712 {
713 }
714 
715 static bool ce_check_int_watermark_srng(struct CE_state *CE_state,
716 					unsigned int *flags)
717 {
718 	/*TODO*/
719 	return false;
720 }
721 
722 static uint32_t ce_get_desc_size_srng(uint8_t ring_type)
723 {
724 	switch (ring_type) {
725 	case CE_RING_SRC:
726 		return sizeof(struct ce_srng_src_desc);
727 	case CE_RING_DEST:
728 		return sizeof(struct ce_srng_dest_desc);
729 	case CE_RING_STATUS:
730 		return sizeof(struct ce_srng_dest_status_desc);
731 	default:
732 		return 0;
733 	}
734 	return 0;
735 }
736 
737 static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id,
738 			      struct hal_srng_params *ring_params)
739 {
740 	uint32_t addr_low;
741 	uint32_t addr_high;
742 	uint32_t msi_data_start;
743 	uint32_t msi_data_count;
744 	uint32_t msi_irq_start;
745 	int ret;
746 	int irq_id;
747 
748 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
749 					  &msi_data_count, &msi_data_start,
750 					  &msi_irq_start);
751 
752 	/* msi config not found */
753 	if (ret)
754 		return;
755 
756 	irq_id = scn->int_assignment->msi_idx[ce_id];
757 	pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high);
758 
759 	ring_params->msi_addr = addr_low;
760 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
761 	ring_params->msi_data =  irq_id + msi_data_start;
762 	ring_params->flags |= HAL_SRNG_MSI_INTR;
763 
764 	hif_debug("ce_id %d irq_id %d, msi_addr %pK, msi_data %d", ce_id,
765 		  irq_id, (void *)ring_params->msi_addr, ring_params->msi_data);
766 }
767 
768 static void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
769 				   struct CE_ring_state *src_ring,
770 				   struct CE_attr *attr)
771 {
772 	struct hal_srng_params ring_params = {0};
773 
774 	hif_debug("%s: ce_id %d", __func__, ce_id);
775 
776 	ring_params.ring_base_paddr = src_ring->base_addr_CE_space;
777 	ring_params.ring_base_vaddr = src_ring->base_addr_owner_space;
778 	ring_params.num_entries = src_ring->nentries;
779 	/*
780 	 * The minimum increment for the timer is 8us
781 	 * A default value of 0 disables the timer
782 	 * A valid default value caused continuous interrupts to
783 	 * fire with MSI enabled. Need to revisit usage of the timer
784 	 */
785 
786 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
787 		ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
788 
789 		ring_params.intr_timer_thres_us = 0;
790 		ring_params.intr_batch_cntr_thres_entries = 1;
791 		ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER;
792 	}
793 
794 	src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0,
795 					    &ring_params, 0);
796 }
797 
798 #ifdef WLAN_WAR_CE_DISABLE_SRNG_TIMER_IRQ
799 static void
800 ce_srng_initialize_dest_ring_thresh(struct CE_ring_state *dest_ring,
801 				    struct hal_srng_params *ring_params)
802 {
803 	ring_params->low_threshold = dest_ring->nentries >> 3;
804 	ring_params->intr_timer_thres_us = 0;
805 	ring_params->intr_batch_cntr_thres_entries = 1;
806 	ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
807 }
808 #else
809 static void
810 ce_srng_initialize_dest_ring_thresh(struct CE_ring_state *dest_ring,
811 				    struct hal_srng_params *ring_params)
812 {
813 	ring_params->low_threshold = dest_ring->nentries >> 3;
814 	ring_params->intr_timer_thres_us = 100000;
815 	ring_params->intr_batch_cntr_thres_entries = 0;
816 	ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
817 }
818 #endif
819 
820 #ifdef WLAN_DISABLE_STATUS_RING_TIMER_WAR
821 static inline bool ce_is_status_ring_timer_thresh_war_needed(void)
822 {
823 	return false;
824 }
825 #else
826 static inline bool ce_is_status_ring_timer_thresh_war_needed(void)
827 {
828 	return true;
829 }
830 #endif
831 
832 /**
833  * ce_srng_initialize_dest_timer_interrupt_war() - war initialization
834  * @dest_ring: ring being initialized
835  * @ring_params: pointer to initialized parameters
836  *
837  * For Napier & Hawkeye v1, the status ring timer interrupts do not work
838  * As a work arround host configures the destination rings to be a proxy for
839  * work needing to be done.
840  *
841  * The interrupts are setup such that if the destination ring is less than fully
842  * posted, there is likely undone work for the status ring that the host should
843  * process.
844  *
845  * There is a timing bug in srng based copy engines such that a fully posted
846  * srng based copy engine has 2 empty entries instead of just one.  The copy
847  * engine data sturctures work with 1 empty entry, but the software frequently
848  * fails to post the last entry due to the race condition.
849  */
850 static void ce_srng_initialize_dest_timer_interrupt_war(
851 					struct CE_ring_state *dest_ring,
852 					struct hal_srng_params *ring_params)
853 {
854 	int num_buffers_when_fully_posted = dest_ring->nentries - 2;
855 
856 	ring_params->low_threshold = num_buffers_when_fully_posted - 1;
857 	ring_params->intr_timer_thres_us = 1024;
858 	ring_params->intr_batch_cntr_thres_entries = 0;
859 	ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
860 }
861 
862 static void ce_srng_dest_ring_setup(struct hif_softc *scn,
863 				    uint32_t ce_id,
864 				    struct CE_ring_state *dest_ring,
865 				    struct CE_attr *attr)
866 {
867 	struct hal_srng_params ring_params = {0};
868 
869 	hif_debug("ce_id: %d", ce_id);
870 
871 	ring_params.ring_base_paddr = dest_ring->base_addr_CE_space;
872 	ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space;
873 	ring_params.num_entries = dest_ring->nentries;
874 	ring_params.max_buffer_length = attr->src_sz_max;
875 
876 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
877 		ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
878 		if (ce_is_status_ring_timer_thresh_war_needed()) {
879 			ce_srng_initialize_dest_timer_interrupt_war(
880 					dest_ring, &ring_params);
881 		} else {
882 			/* normal behavior for future chips */
883 			ce_srng_initialize_dest_ring_thresh(dest_ring,
884 							    &ring_params);
885 		}
886 		ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER;
887 	}
888 
889 	/*Dest ring is also source ring*/
890 	dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0,
891 					     &ring_params, 0);
892 }
893 
894 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
895 /**
896  * ce_status_ring_config_int_threshold() - configure ce status ring interrupt
897  *                                         thresholds
898  * @scn: hif handle
899  * @ring_params: ce srng params
900  *
901  * Return: None
902  */
903 static inline
904 void ce_status_ring_config_int_threshold(struct hif_softc *scn,
905 					 struct hal_srng_params *ring_params)
906 {
907 	ring_params->intr_timer_thres_us =
908 			scn->ini_cfg.ce_status_ring_timer_threshold;
909 	ring_params->intr_batch_cntr_thres_entries =
910 			scn->ini_cfg.ce_status_ring_batch_count_threshold;
911 }
912 #else
913 static inline
914 void ce_status_ring_config_int_threshold(struct hif_softc *scn,
915 					 struct hal_srng_params *ring_params)
916 {
917 	ring_params->intr_timer_thres_us = 0x1000;
918 	ring_params->intr_batch_cntr_thres_entries = 0x1;
919 }
920 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
921 
922 static void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id,
923 				struct CE_ring_state *status_ring,
924 				struct CE_attr *attr)
925 {
926 	struct hal_srng_params ring_params = {0};
927 
928 	hif_debug("ce_id: %d", ce_id);
929 
930 	ring_params.ring_base_paddr = status_ring->base_addr_CE_space;
931 	ring_params.ring_base_vaddr = status_ring->base_addr_owner_space;
932 	ring_params.num_entries = status_ring->nentries;
933 
934 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
935 		ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
936 		ce_status_ring_config_int_threshold(scn, &ring_params);
937 	}
938 
939 	status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS,
940 					       ce_id, 0, &ring_params, 0);
941 }
942 
943 static int ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type,
944 		uint32_t ce_id, struct CE_ring_state *ring,
945 		struct CE_attr *attr)
946 {
947 	switch (ring_type) {
948 	case CE_RING_SRC:
949 		ce_srng_src_ring_setup(scn, ce_id, ring, attr);
950 		break;
951 	case CE_RING_DEST:
952 		ce_srng_dest_ring_setup(scn, ce_id, ring, attr);
953 		break;
954 	case CE_RING_STATUS:
955 		ce_srng_status_ring_setup(scn, ce_id, ring, attr);
956 		break;
957 	default:
958 		qdf_assert(0);
959 		break;
960 	}
961 
962 	return 0;
963 }
964 
965 static void ce_ring_cleanup_srng(struct hif_softc *scn,
966 				 struct CE_state *CE_state,
967 				 uint8_t ring_type)
968 {
969 	hal_ring_handle_t hal_srng = NULL;
970 
971 	switch (ring_type) {
972 	case CE_RING_SRC:
973 		hal_srng = (hal_ring_handle_t)CE_state->src_ring->srng_ctx;
974 	break;
975 	case CE_RING_DEST:
976 		hal_srng = (hal_ring_handle_t)CE_state->dest_ring->srng_ctx;
977 	break;
978 	case CE_RING_STATUS:
979 		hal_srng = (hal_ring_handle_t)CE_state->status_ring->srng_ctx;
980 	break;
981 	}
982 
983 	if (hal_srng)
984 		hal_srng_cleanup(scn->hal_soc, hal_srng);
985 }
986 
987 static void ce_construct_shadow_config_srng(struct hif_softc *scn)
988 {
989 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
990 	int ce_id;
991 
992 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
993 		if (hif_state->host_ce_config[ce_id].src_nentries)
994 			hal_set_one_shadow_config(scn->hal_soc,
995 						  CE_SRC, ce_id);
996 
997 		if (hif_state->host_ce_config[ce_id].dest_nentries) {
998 			hal_set_one_shadow_config(scn->hal_soc,
999 						  CE_DST, ce_id);
1000 
1001 			hal_set_one_shadow_config(scn->hal_soc,
1002 						  CE_DST_STATUS, ce_id);
1003 		}
1004 	}
1005 }
1006 
1007 static void ce_prepare_shadow_register_v2_cfg_srng(struct hif_softc *scn,
1008 		struct pld_shadow_reg_v2_cfg **shadow_config,
1009 		int *num_shadow_registers_configured)
1010 {
1011 	if (!scn->hal_soc) {
1012 		hif_err("hal not initialized: not initializing shadow config");
1013 		return;
1014 	}
1015 
1016 	hal_get_shadow_config(scn->hal_soc, shadow_config,
1017 			      num_shadow_registers_configured);
1018 
1019 	if (*num_shadow_registers_configured != 0) {
1020 		hif_err("hal shadow register configuration allready constructed");
1021 
1022 		/* return with original configuration*/
1023 		return;
1024 	}
1025 	hal_construct_srng_shadow_regs(scn->hal_soc);
1026 	ce_construct_shadow_config_srng(scn);
1027 	hal_set_shadow_regs(scn->hal_soc);
1028 	hal_construct_shadow_regs(scn->hal_soc);
1029 	/* get updated configuration */
1030 	hal_get_shadow_config(scn->hal_soc, shadow_config,
1031 			      num_shadow_registers_configured);
1032 }
1033 
1034 #ifdef CONFIG_SHADOW_V3
1035 static void ce_prepare_shadow_register_v3_cfg_srng(struct hif_softc *scn,
1036 		struct pld_shadow_reg_v3_cfg **shadow_config,
1037 		int *num_shadow_registers_configured)
1038 {
1039 	if (!scn->hal_soc) {
1040 		hif_err("hal not initialized: not initializing shadow config");
1041 		return;
1042 	}
1043 
1044 	hal_get_shadow_v3_config(scn->hal_soc, shadow_config,
1045 				 num_shadow_registers_configured);
1046 
1047 	if (*num_shadow_registers_configured != 0) {
1048 		hif_err("hal shadow register configuration allready constructed");
1049 
1050 		/* return with original configuration*/
1051 		return;
1052 	}
1053 	hal_construct_srng_shadow_regs(scn->hal_soc);
1054 	ce_construct_shadow_config_srng(scn);
1055 	hal_set_shadow_regs(scn->hal_soc);
1056 	hal_construct_shadow_regs(scn->hal_soc);
1057 	/* get updated configuration */
1058 	hal_get_shadow_v3_config(scn->hal_soc, shadow_config,
1059 				 num_shadow_registers_configured);
1060 }
1061 #endif
1062 
1063 #ifdef HIF_CE_LOG_INFO
1064 /**
1065  * ce_get_index_info_srng(): Get CE index info
1066  * @scn: HIF Context
1067  * @ce_state: CE opaque handle
1068  * @info: CE info
1069  *
1070  * Return: 0 for success and non zero for failure
1071  */
1072 static
1073 int ce_get_index_info_srng(struct hif_softc *scn, void *ce_state,
1074 			   struct ce_index *info)
1075 {
1076 	struct CE_state *CE_state = (struct CE_state *)ce_state;
1077 	uint32_t tp, hp;
1078 
1079 	info->id = CE_state->id;
1080 	if (CE_state->src_ring) {
1081 		hal_get_sw_hptp(scn->hal_soc, CE_state->src_ring->srng_ctx,
1082 				&tp, &hp);
1083 		info->u.srng_info.tp = tp;
1084 		info->u.srng_info.hp = hp;
1085 	} else if (CE_state->dest_ring && CE_state->status_ring) {
1086 		hal_get_sw_hptp(scn->hal_soc, CE_state->status_ring->srng_ctx,
1087 				&tp, &hp);
1088 		info->u.srng_info.status_tp = tp;
1089 		info->u.srng_info.status_hp = hp;
1090 		hal_get_sw_hptp(scn->hal_soc, CE_state->dest_ring->srng_ctx,
1091 				&tp, &hp);
1092 		info->u.srng_info.tp = tp;
1093 		info->u.srng_info.hp = hp;
1094 	}
1095 
1096 	return 0;
1097 }
1098 #endif
1099 
1100 static struct ce_ops ce_service_srng = {
1101 	.ce_get_desc_size = ce_get_desc_size_srng,
1102 	.ce_ring_setup = ce_ring_setup_srng,
1103 	.ce_srng_cleanup = ce_ring_cleanup_srng,
1104 	.ce_sendlist_send = ce_sendlist_send_srng,
1105 	.ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng,
1106 	.ce_revoke_recv_next = ce_revoke_recv_next_srng,
1107 	.ce_cancel_send_next = ce_cancel_send_next_srng,
1108 	.ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng,
1109 	.ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng,
1110 	.ce_send_nolock = ce_send_nolock_srng,
1111 	.watermark_int = ce_check_int_watermark_srng,
1112 	.ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng,
1113 	.ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng,
1114 	.ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng,
1115 	.ce_prepare_shadow_register_v2_cfg =
1116 		ce_prepare_shadow_register_v2_cfg_srng,
1117 #ifdef CONFIG_SHADOW_V3
1118 	.ce_prepare_shadow_register_v3_cfg =
1119 		ce_prepare_shadow_register_v3_cfg_srng,
1120 #endif
1121 #ifdef HIF_CE_LOG_INFO
1122 	.ce_get_index_info =
1123 		ce_get_index_info_srng,
1124 #endif
1125 };
1126 
1127 struct ce_ops *ce_services_srng()
1128 {
1129 	return &ce_service_srng;
1130 }
1131 qdf_export_symbol(ce_services_srng);
1132 
1133 void ce_service_srng_init(void)
1134 {
1135 	ce_service_register_module(CE_SVC_SRNG, &ce_services_srng);
1136 }
1137