xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service_legacy.c (revision 0626a4da6c07f30da06dd6747e8cc290a60371d8)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "ce_api.h"
20 #include "ce_internal.h"
21 #include "ce_main.h"
22 #include "ce_reg.h"
23 #include "hif.h"
24 #include "hif_debug.h"
25 #include "hif_io32.h"
26 #include "qdf_lock.h"
27 #include "hif_main.h"
28 #include "hif_napi.h"
29 #include "qdf_module.h"
30 #include "regtable.h"
31 
32 /*
33  * Support for Copy Engine hardware, which is mainly used for
34  * communication between Host and Target over a PCIe interconnect.
35  */
36 
37 /*
38  * A single CopyEngine (CE) comprises two "rings":
39  *   a source ring
40  *   a destination ring
41  *
42  * Each ring consists of a number of descriptors which specify
43  * an address, length, and meta-data.
44  *
45  * Typically, one side of the PCIe interconnect (Host or Target)
46  * controls one ring and the other side controls the other ring.
47  * The source side chooses when to initiate a transfer and it
48  * chooses what to send (buffer address, length). The destination
49  * side keeps a supply of "anonymous receive buffers" available and
50  * it handles incoming data as it arrives (when the destination
51  * receives an interrupt).
52  *
53  * The sender may send a simple buffer (address/length) or it may
54  * send a small list of buffers.  When a small list is sent, hardware
55  * "gathers" these and they end up in a single destination buffer
56  * with a single interrupt.
57  *
58  * There are several "contexts" managed by this layer -- more, it
59  * may seem -- than should be needed. These are provided mainly for
60  * maximum flexibility and especially to facilitate a simpler HIF
61  * implementation. There are per-CopyEngine recv, send, and watermark
62  * contexts. These are supplied by the caller when a recv, send,
63  * or watermark handler is established and they are echoed back to
64  * the caller when the respective callbacks are invoked. There is
65  * also a per-transfer context supplied by the caller when a buffer
66  * (or sendlist) is sent and when a buffer is enqueued for recv.
67  * These per-transfer contexts are echoed back to the caller when
68  * the buffer is sent/received.
69  * Target TX harsh result toeplitz_hash_result
70  */
71 
72 /* NB: Modeled after ce_completed_send_next */
73 /* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
74 #define CE_WM_SHFT 1
75 
76 #ifdef WLAN_FEATURE_FASTPATH
77 /**
78  * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
79  * @scn: Handle to HIF context
80  *
81  * Return: true if fastpath is enabled else false.
82  */
83 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
84 {
85 	return scn->fastpath_mode_on;
86 }
87 #else
88 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
89 {
90 	return false;
91 }
92 #endif /* WLAN_FEATURE_FASTPATH */
93 
94 static int
95 ce_send_nolock_legacy(struct CE_handle *copyeng,
96 		      void *per_transfer_context,
97 		      qdf_dma_addr_t buffer,
98 		      uint32_t nbytes,
99 		      uint32_t transfer_id,
100 		      uint32_t flags,
101 		      uint32_t user_flags)
102 {
103 	int status;
104 	struct CE_state *CE_state = (struct CE_state *)copyeng;
105 	struct CE_ring_state *src_ring = CE_state->src_ring;
106 	uint32_t ctrl_addr = CE_state->ctrl_addr;
107 	unsigned int nentries_mask = src_ring->nentries_mask;
108 	unsigned int sw_index = src_ring->sw_index;
109 	unsigned int write_index = src_ring->write_index;
110 	uint64_t dma_addr = buffer;
111 	struct hif_softc *scn = CE_state->scn;
112 
113 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
114 		return QDF_STATUS_E_FAILURE;
115 	if (unlikely(CE_RING_DELTA(nentries_mask,
116 				   write_index, sw_index - 1) <= 0)) {
117 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
118 		Q_TARGET_ACCESS_END(scn);
119 		return QDF_STATUS_E_FAILURE;
120 	}
121 	{
122 		enum hif_ce_event_type event_type;
123 		struct CE_src_desc *src_ring_base =
124 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
125 		struct CE_src_desc *shadow_base =
126 			(struct CE_src_desc *)src_ring->shadow_base;
127 		struct CE_src_desc *src_desc =
128 			CE_SRC_RING_TO_DESC(src_ring_base, write_index);
129 		struct CE_src_desc *shadow_src_desc =
130 			CE_SRC_RING_TO_DESC(shadow_base, write_index);
131 
132 		/* Update low 32 bits source descriptor address */
133 		shadow_src_desc->buffer_addr =
134 			(uint32_t)(dma_addr & 0xFFFFFFFF);
135 #ifdef QCA_WIFI_3_0
136 		shadow_src_desc->buffer_addr_hi =
137 			(uint32_t)((dma_addr >> 32) & 0x1F);
138 		user_flags |= shadow_src_desc->buffer_addr_hi;
139 		memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
140 		       sizeof(uint32_t));
141 #endif
142 		shadow_src_desc->target_int_disable = 0;
143 		shadow_src_desc->host_int_disable = 0;
144 
145 		shadow_src_desc->meta_data = transfer_id;
146 
147 		/*
148 		 * Set the swap bit if:
149 		 * typical sends on this CE are swapped (host is big-endian)
150 		 * and this send doesn't disable the swapping
151 		 * (data is not bytestream)
152 		 */
153 		shadow_src_desc->byte_swap =
154 			(((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
155 			 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
156 		shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
157 		shadow_src_desc->nbytes = nbytes;
158 		ce_validate_nbytes(nbytes, CE_state);
159 
160 		*src_desc = *shadow_src_desc;
161 
162 		src_ring->per_transfer_context[write_index] =
163 			per_transfer_context;
164 
165 		/* Update Source Ring Write Index */
166 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
167 
168 		/* WORKAROUND */
169 		if (shadow_src_desc->gather) {
170 			event_type = HIF_TX_GATHER_DESC_POST;
171 		} else if (qdf_unlikely(CE_state->state != CE_RUNNING)) {
172 			event_type = HIF_TX_DESC_SOFTWARE_POST;
173 			CE_state->state = CE_PENDING;
174 		} else {
175 			event_type = HIF_TX_DESC_POST;
176 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
177 						      write_index);
178 		}
179 
180 		/* src_ring->write index hasn't been updated event though
181 		 * the register has allready been written to.
182 		 */
183 		hif_record_ce_desc_event(scn, CE_state->id, event_type,
184 			(union ce_desc *)shadow_src_desc, per_transfer_context,
185 			src_ring->write_index, nbytes);
186 
187 		src_ring->write_index = write_index;
188 		status = QDF_STATUS_SUCCESS;
189 	}
190 	Q_TARGET_ACCESS_END(scn);
191 	return status;
192 }
193 
194 static int
195 ce_sendlist_send_legacy(struct CE_handle *copyeng,
196 			void *per_transfer_context,
197 			struct ce_sendlist *sendlist, unsigned int transfer_id)
198 {
199 	int status = -ENOMEM;
200 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
201 	struct CE_state *CE_state = (struct CE_state *)copyeng;
202 	struct CE_ring_state *src_ring = CE_state->src_ring;
203 	unsigned int nentries_mask = src_ring->nentries_mask;
204 	unsigned int num_items = sl->num_items;
205 	unsigned int sw_index;
206 	unsigned int write_index;
207 	struct hif_softc *scn = CE_state->scn;
208 
209 	QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
210 
211 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
212 
213 	if (CE_state->scn->fastpath_mode_on && CE_state->htt_tx_data &&
214 	    Q_TARGET_ACCESS_BEGIN(scn) == 0) {
215 		src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR(
216 					       scn, CE_state->ctrl_addr);
217 		Q_TARGET_ACCESS_END(scn);
218 	}
219 
220 	sw_index = src_ring->sw_index;
221 	write_index = src_ring->write_index;
222 
223 	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
224 	    num_items) {
225 		struct ce_sendlist_item *item;
226 		int i;
227 
228 		/* handle all but the last item uniformly */
229 		for (i = 0; i < num_items - 1; i++) {
230 			item = &sl->item[i];
231 			/* TBDXXX: Support extensible sendlist_types? */
232 			QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
233 			status = ce_send_nolock_legacy(copyeng,
234 				CE_SENDLIST_ITEM_CTXT,
235 				(qdf_dma_addr_t)item->data,
236 				item->u.nbytes, transfer_id,
237 				item->flags | CE_SEND_FLAG_GATHER,
238 				item->user_flags);
239 			QDF_ASSERT(status == QDF_STATUS_SUCCESS);
240 		}
241 		/* provide valid context pointer for final item */
242 		item = &sl->item[i];
243 		/* TBDXXX: Support extensible sendlist_types? */
244 		QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
245 		status = ce_send_nolock_legacy(copyeng, per_transfer_context,
246 					       (qdf_dma_addr_t) item->data,
247 					       item->u.nbytes,
248 					       transfer_id, item->flags,
249 					       item->user_flags);
250 		QDF_ASSERT(status == QDF_STATUS_SUCCESS);
251 		QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
252 					     QDF_NBUF_TX_PKT_CE);
253 		DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
254 			QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
255 			QDF_TRACE_DEFAULT_PDEV_ID,
256 			(uint8_t *)&(((qdf_nbuf_t)per_transfer_context)->data),
257 			sizeof(((qdf_nbuf_t)per_transfer_context)->data),
258 			QDF_TX));
259 	} else {
260 		/*
261 		 * Probably not worth the additional complexity to support
262 		 * partial sends with continuation or notification.  We expect
263 		 * to use large rings and small sendlists. If we can't handle
264 		 * the entire request at once, punt it back to the caller.
265 		 */
266 	}
267 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
268 
269 	return status;
270 }
271 
272 /**
273  * ce_recv_buf_enqueue_legacy() - enqueue a recv buffer into a copy engine
274  * @coyeng: copy engine handle
275  * @per_recv_context: virtual address of the nbuf
276  * @buffer: physical address of the nbuf
277  *
278  * Return: 0 if the buffer is enqueued
279  */
280 static int
281 ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng,
282 			   void *per_recv_context, qdf_dma_addr_t buffer)
283 {
284 	int status;
285 	struct CE_state *CE_state = (struct CE_state *)copyeng;
286 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
287 	uint32_t ctrl_addr = CE_state->ctrl_addr;
288 	unsigned int nentries_mask = dest_ring->nentries_mask;
289 	unsigned int write_index;
290 	unsigned int sw_index;
291 	uint64_t dma_addr = buffer;
292 	struct hif_softc *scn = CE_state->scn;
293 
294 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
295 	write_index = dest_ring->write_index;
296 	sw_index = dest_ring->sw_index;
297 
298 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
299 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
300 		return -EIO;
301 	}
302 
303 	if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
304 	    (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) {
305 		struct CE_dest_desc *dest_ring_base =
306 			(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
307 		struct CE_dest_desc *dest_desc =
308 			CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
309 
310 		/* Update low 32 bit destination descriptor */
311 		dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
312 #ifdef QCA_WIFI_3_0
313 		dest_desc->buffer_addr_hi =
314 			(uint32_t)((dma_addr >> 32) & 0x1F);
315 #endif
316 		dest_desc->nbytes = 0;
317 
318 		dest_ring->per_transfer_context[write_index] =
319 			per_recv_context;
320 
321 		hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_POST,
322 				(union ce_desc *)dest_desc, per_recv_context,
323 				write_index, 0);
324 
325 		/* Update Destination Ring Write Index */
326 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
327 		if (write_index != sw_index) {
328 			CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
329 			dest_ring->write_index = write_index;
330 		}
331 		status = QDF_STATUS_SUCCESS;
332 	} else
333 		status = QDF_STATUS_E_FAILURE;
334 
335 	Q_TARGET_ACCESS_END(scn);
336 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
337 	return status;
338 }
339 
340 static unsigned int
341 ce_send_entries_done_nolock_legacy(struct hif_softc *scn,
342 				   struct CE_state *CE_state)
343 {
344 	struct CE_ring_state *src_ring = CE_state->src_ring;
345 	uint32_t ctrl_addr = CE_state->ctrl_addr;
346 	unsigned int nentries_mask = src_ring->nentries_mask;
347 	unsigned int sw_index;
348 	unsigned int read_index;
349 
350 	sw_index = src_ring->sw_index;
351 	read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
352 
353 	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
354 }
355 
356 static unsigned int
357 ce_recv_entries_done_nolock_legacy(struct hif_softc *scn,
358 				   struct CE_state *CE_state)
359 {
360 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
361 	uint32_t ctrl_addr = CE_state->ctrl_addr;
362 	unsigned int nentries_mask = dest_ring->nentries_mask;
363 	unsigned int sw_index;
364 	unsigned int read_index;
365 
366 	sw_index = dest_ring->sw_index;
367 	read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
368 
369 	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
370 }
371 
372 static int
373 ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state,
374 				     void **per_CE_contextp,
375 				     void **per_transfer_contextp,
376 				     qdf_dma_addr_t *bufferp,
377 				     unsigned int *nbytesp,
378 				     unsigned int *transfer_idp,
379 				     unsigned int *flagsp)
380 {
381 	int status;
382 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
383 	unsigned int nentries_mask = dest_ring->nentries_mask;
384 	unsigned int sw_index = dest_ring->sw_index;
385 	struct hif_softc *scn = CE_state->scn;
386 	struct CE_dest_desc *dest_ring_base =
387 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
388 	struct CE_dest_desc *dest_desc =
389 		CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
390 	int nbytes;
391 	struct CE_dest_desc dest_desc_info;
392 	/*
393 	 * By copying the dest_desc_info element to local memory, we could
394 	 * avoid extra memory read from non-cachable memory.
395 	 */
396 	dest_desc_info =  *dest_desc;
397 	nbytes = dest_desc_info.nbytes;
398 	if (nbytes == 0) {
399 		/*
400 		 * This closes a relatively unusual race where the Host
401 		 * sees the updated DRRI before the update to the
402 		 * corresponding descriptor has completed. We treat this
403 		 * as a descriptor that is not yet done.
404 		 */
405 		status = QDF_STATUS_E_FAILURE;
406 		goto done;
407 	}
408 
409 	hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
410 				 (union ce_desc *)dest_desc,
411 				 dest_ring->per_transfer_context[sw_index],
412 				 sw_index, 0);
413 
414 	dest_desc->nbytes = 0;
415 
416 	/* Return data from completed destination descriptor */
417 	*bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
418 	*nbytesp = nbytes;
419 	*transfer_idp = dest_desc_info.meta_data;
420 	*flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
421 
422 	if (per_CE_contextp)
423 		*per_CE_contextp = CE_state->recv_context;
424 
425 	if (per_transfer_contextp) {
426 		*per_transfer_contextp =
427 			dest_ring->per_transfer_context[sw_index];
428 	}
429 	dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
430 
431 	/* Update sw_index */
432 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
433 	dest_ring->sw_index = sw_index;
434 	status = QDF_STATUS_SUCCESS;
435 
436 done:
437 	return status;
438 }
439 
440 /* NB: Modeled after ce_completed_recv_next_nolock */
441 static QDF_STATUS
442 ce_revoke_recv_next_legacy(struct CE_handle *copyeng,
443 			   void **per_CE_contextp,
444 			   void **per_transfer_contextp,
445 			   qdf_dma_addr_t *bufferp)
446 {
447 	struct CE_state *CE_state;
448 	struct CE_ring_state *dest_ring;
449 	unsigned int nentries_mask;
450 	unsigned int sw_index;
451 	unsigned int write_index;
452 	QDF_STATUS status;
453 	struct hif_softc *scn;
454 
455 	CE_state = (struct CE_state *)copyeng;
456 	dest_ring = CE_state->dest_ring;
457 	if (!dest_ring)
458 		return QDF_STATUS_E_FAILURE;
459 
460 	scn = CE_state->scn;
461 	qdf_spin_lock(&CE_state->ce_index_lock);
462 	nentries_mask = dest_ring->nentries_mask;
463 	sw_index = dest_ring->sw_index;
464 	write_index = dest_ring->write_index;
465 	if (write_index != sw_index) {
466 		struct CE_dest_desc *dest_ring_base =
467 			(struct CE_dest_desc *)dest_ring->
468 			    base_addr_owner_space;
469 		struct CE_dest_desc *dest_desc =
470 			CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
471 
472 		/* Return data from completed destination descriptor */
473 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
474 
475 		if (per_CE_contextp)
476 			*per_CE_contextp = CE_state->recv_context;
477 
478 		if (per_transfer_contextp) {
479 			*per_transfer_contextp =
480 				dest_ring->per_transfer_context[sw_index];
481 		}
482 		dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
483 
484 		/* Update sw_index */
485 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
486 		dest_ring->sw_index = sw_index;
487 		status = QDF_STATUS_SUCCESS;
488 	} else {
489 		status = QDF_STATUS_E_FAILURE;
490 	}
491 	qdf_spin_unlock(&CE_state->ce_index_lock);
492 
493 	return status;
494 }
495 
496 /*
497  * Guts of ce_completed_send_next.
498  * The caller takes responsibility for any necessary locking.
499  */
500 static int
501 ce_completed_send_next_nolock_legacy(struct CE_state *CE_state,
502 				     void **per_CE_contextp,
503 				     void **per_transfer_contextp,
504 				     qdf_dma_addr_t *bufferp,
505 				     unsigned int *nbytesp,
506 				     unsigned int *transfer_idp,
507 				     unsigned int *sw_idx,
508 				     unsigned int *hw_idx,
509 				     uint32_t *toeplitz_hash_result)
510 {
511 	int status = QDF_STATUS_E_FAILURE;
512 	struct CE_ring_state *src_ring = CE_state->src_ring;
513 	uint32_t ctrl_addr = CE_state->ctrl_addr;
514 	unsigned int nentries_mask = src_ring->nentries_mask;
515 	unsigned int sw_index = src_ring->sw_index;
516 	unsigned int read_index;
517 	struct hif_softc *scn = CE_state->scn;
518 
519 	if (src_ring->hw_index == sw_index) {
520 		/*
521 		 * The SW completion index has caught up with the cached
522 		 * version of the HW completion index.
523 		 * Update the cached HW completion index to see whether
524 		 * the SW has really caught up to the HW, or if the cached
525 		 * value of the HW index has become stale.
526 		 */
527 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
528 			return QDF_STATUS_E_FAILURE;
529 		src_ring->hw_index =
530 			CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
531 		if (Q_TARGET_ACCESS_END(scn) < 0)
532 			return QDF_STATUS_E_FAILURE;
533 	}
534 	read_index = src_ring->hw_index;
535 
536 	if (sw_idx)
537 		*sw_idx = sw_index;
538 
539 	if (hw_idx)
540 		*hw_idx = read_index;
541 
542 	if ((read_index != sw_index) && (read_index != 0xffffffff)) {
543 		struct CE_src_desc *shadow_base =
544 			(struct CE_src_desc *)src_ring->shadow_base;
545 		struct CE_src_desc *shadow_src_desc =
546 			CE_SRC_RING_TO_DESC(shadow_base, sw_index);
547 #ifdef QCA_WIFI_3_0
548 		struct CE_src_desc *src_ring_base =
549 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
550 		struct CE_src_desc *src_desc =
551 			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
552 #endif
553 		hif_record_ce_desc_event(scn, CE_state->id,
554 				HIF_TX_DESC_COMPLETION,
555 				(union ce_desc *)shadow_src_desc,
556 				src_ring->per_transfer_context[sw_index],
557 				sw_index, shadow_src_desc->nbytes);
558 
559 		/* Return data from completed source descriptor */
560 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
561 		*nbytesp = shadow_src_desc->nbytes;
562 		*transfer_idp = shadow_src_desc->meta_data;
563 #ifdef QCA_WIFI_3_0
564 		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
565 #else
566 		*toeplitz_hash_result = 0;
567 #endif
568 		if (per_CE_contextp)
569 			*per_CE_contextp = CE_state->send_context;
570 
571 		if (per_transfer_contextp) {
572 			*per_transfer_contextp =
573 				src_ring->per_transfer_context[sw_index];
574 		}
575 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
576 
577 		/* Update sw_index */
578 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
579 		src_ring->sw_index = sw_index;
580 		status = QDF_STATUS_SUCCESS;
581 	}
582 
583 	return status;
584 }
585 
586 static QDF_STATUS
587 ce_cancel_send_next_legacy(struct CE_handle *copyeng,
588 			   void **per_CE_contextp,
589 			   void **per_transfer_contextp,
590 			   qdf_dma_addr_t *bufferp,
591 			   unsigned int *nbytesp,
592 			   unsigned int *transfer_idp,
593 			   uint32_t *toeplitz_hash_result)
594 {
595 	struct CE_state *CE_state;
596 	struct CE_ring_state *src_ring;
597 	unsigned int nentries_mask;
598 	unsigned int sw_index;
599 	unsigned int write_index;
600 	QDF_STATUS status;
601 	struct hif_softc *scn;
602 
603 	CE_state = (struct CE_state *)copyeng;
604 	src_ring = CE_state->src_ring;
605 	if (!src_ring)
606 		return QDF_STATUS_E_FAILURE;
607 
608 	scn = CE_state->scn;
609 	qdf_spin_lock(&CE_state->ce_index_lock);
610 	nentries_mask = src_ring->nentries_mask;
611 	sw_index = src_ring->sw_index;
612 	write_index = src_ring->write_index;
613 
614 	if (write_index != sw_index) {
615 		struct CE_src_desc *src_ring_base =
616 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
617 		struct CE_src_desc *src_desc =
618 			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
619 
620 		/* Return data from completed source descriptor */
621 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
622 		*nbytesp = src_desc->nbytes;
623 		*transfer_idp = src_desc->meta_data;
624 #ifdef QCA_WIFI_3_0
625 		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
626 #else
627 		*toeplitz_hash_result = 0;
628 #endif
629 
630 		if (per_CE_contextp)
631 			*per_CE_contextp = CE_state->send_context;
632 
633 		if (per_transfer_contextp) {
634 			*per_transfer_contextp =
635 				src_ring->per_transfer_context[sw_index];
636 		}
637 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
638 
639 		/* Update sw_index */
640 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
641 		src_ring->sw_index = sw_index;
642 		status = QDF_STATUS_SUCCESS;
643 	} else {
644 		status = QDF_STATUS_E_FAILURE;
645 	}
646 	qdf_spin_unlock(&CE_state->ce_index_lock);
647 
648 	return status;
649 }
650 
651 /*
652  * Adjust interrupts for the copy complete handler.
653  * If it's needed for either send or recv, then unmask
654  * this interrupt; otherwise, mask it.
655  *
656  * Called with target_lock held.
657  */
658 static void
659 ce_per_engine_handler_adjust_legacy(struct CE_state *CE_state,
660 				    int disable_copy_compl_intr)
661 {
662 	uint32_t ctrl_addr = CE_state->ctrl_addr;
663 	struct hif_softc *scn = CE_state->scn;
664 
665 	CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
666 
667 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
668 		return;
669 
670 	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
671 		hif_err_rl("%s: target access is not allowed", __func__);
672 		return;
673 	}
674 
675 	if ((!disable_copy_compl_intr) &&
676 	    (CE_state->send_cb || CE_state->recv_cb))
677 		CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
678 	else
679 		CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
680 
681 	if (CE_state->watermark_cb)
682 		CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
683 	else
684 		CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
685 	Q_TARGET_ACCESS_END(scn);
686 }
687 
688 static void ce_legacy_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
689 				     struct CE_ring_state *src_ring,
690 				     struct CE_attr *attr)
691 {
692 	uint32_t ctrl_addr;
693 	uint64_t dma_addr;
694 
695 	QDF_ASSERT(ce_id < scn->ce_count);
696 	ctrl_addr = CE_BASE_ADDRESS(ce_id);
697 
698 	src_ring->hw_index =
699 		CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
700 	src_ring->sw_index = src_ring->hw_index;
701 	src_ring->write_index =
702 		CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
703 	dma_addr = src_ring->base_addr_CE_space;
704 	CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
705 				  (uint32_t)(dma_addr & 0xFFFFFFFF));
706 
707 	/* if SR_BA_ADDRESS_HIGH register exists */
708 	if (is_register_supported(SR_BA_ADDRESS_HIGH)) {
709 		uint32_t tmp;
710 
711 		tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
712 				scn, ctrl_addr);
713 		tmp &= ~0x1F;
714 		dma_addr = ((dma_addr >> 32) & 0x1F) | tmp;
715 		CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
716 					ctrl_addr, (uint32_t)dma_addr);
717 	}
718 	CE_SRC_RING_SZ_SET(scn, ctrl_addr, src_ring->nentries);
719 	CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
720 #ifdef BIG_ENDIAN_HOST
721 	/* Enable source ring byte swap for big endian host */
722 	CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
723 #endif
724 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
725 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, src_ring->nentries);
726 }
727 
728 static void ce_legacy_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
729 				struct CE_ring_state *dest_ring,
730 				struct CE_attr *attr)
731 {
732 	uint32_t ctrl_addr;
733 	uint64_t dma_addr;
734 
735 	QDF_ASSERT(ce_id < scn->ce_count);
736 	ctrl_addr = CE_BASE_ADDRESS(ce_id);
737 	dest_ring->sw_index =
738 		CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
739 	dest_ring->write_index =
740 		CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
741 	dma_addr = dest_ring->base_addr_CE_space;
742 	CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
743 				   (uint32_t)(dma_addr & 0xFFFFFFFF));
744 
745 	/* if DR_BA_ADDRESS_HIGH exists */
746 	if (is_register_supported(DR_BA_ADDRESS_HIGH)) {
747 		uint32_t tmp;
748 
749 		tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
750 						      ctrl_addr);
751 		tmp &= ~0x1F;
752 		dma_addr = ((dma_addr >> 32) & 0x1F) | tmp;
753 		CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
754 				ctrl_addr, (uint32_t)dma_addr);
755 	}
756 
757 	CE_DEST_RING_SZ_SET(scn, ctrl_addr, dest_ring->nentries);
758 #ifdef BIG_ENDIAN_HOST
759 	/* Enable Dest ring byte swap for big endian host */
760 	CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
761 #endif
762 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
763 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, dest_ring->nentries);
764 }
765 
766 static uint32_t ce_get_desc_size_legacy(uint8_t ring_type)
767 {
768 	switch (ring_type) {
769 	case CE_RING_SRC:
770 		return sizeof(struct CE_src_desc);
771 	case CE_RING_DEST:
772 		return sizeof(struct CE_dest_desc);
773 	case CE_RING_STATUS:
774 		qdf_assert(0);
775 		return 0;
776 	default:
777 		return 0;
778 	}
779 
780 	return 0;
781 }
782 
783 static int ce_ring_setup_legacy(struct hif_softc *scn, uint8_t ring_type,
784 				uint32_t ce_id, struct CE_ring_state *ring,
785 				struct CE_attr *attr)
786 {
787 	int status = Q_TARGET_ACCESS_BEGIN(scn);
788 
789 	if (status < 0)
790 		goto out;
791 
792 	switch (ring_type) {
793 	case CE_RING_SRC:
794 		ce_legacy_src_ring_setup(scn, ce_id, ring, attr);
795 		break;
796 	case CE_RING_DEST:
797 		ce_legacy_dest_ring_setup(scn, ce_id, ring, attr);
798 		break;
799 	case CE_RING_STATUS:
800 	default:
801 		qdf_assert(0);
802 		break;
803 	}
804 
805 	Q_TARGET_ACCESS_END(scn);
806 out:
807 	return status;
808 }
809 
810 static void ce_prepare_shadow_register_v2_cfg_legacy(struct hif_softc *scn,
811 			    struct pld_shadow_reg_v2_cfg **shadow_config,
812 			    int *num_shadow_registers_configured)
813 {
814 	*num_shadow_registers_configured = 0;
815 	*shadow_config = NULL;
816 }
817 
818 static bool ce_check_int_watermark(struct CE_state *CE_state,
819 				   unsigned int *flags)
820 {
821 	uint32_t ce_int_status;
822 	uint32_t ctrl_addr = CE_state->ctrl_addr;
823 	struct hif_softc *scn = CE_state->scn;
824 
825 	ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
826 	if (ce_int_status & CE_WATERMARK_MASK) {
827 		/* Convert HW IS bits to software flags */
828 		*flags =
829 			(ce_int_status & CE_WATERMARK_MASK) >>
830 			CE_WM_SHFT;
831 		return true;
832 	}
833 
834 	return false;
835 }
836 
837 struct ce_ops ce_service_legacy = {
838 	.ce_get_desc_size = ce_get_desc_size_legacy,
839 	.ce_ring_setup = ce_ring_setup_legacy,
840 	.ce_sendlist_send = ce_sendlist_send_legacy,
841 	.ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_legacy,
842 	.ce_revoke_recv_next = ce_revoke_recv_next_legacy,
843 	.ce_cancel_send_next = ce_cancel_send_next_legacy,
844 	.ce_recv_buf_enqueue = ce_recv_buf_enqueue_legacy,
845 	.ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_legacy,
846 	.ce_send_nolock = ce_send_nolock_legacy,
847 	.watermark_int = ce_check_int_watermark,
848 	.ce_completed_send_next_nolock = ce_completed_send_next_nolock_legacy,
849 	.ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_legacy,
850 	.ce_send_entries_done_nolock = ce_send_entries_done_nolock_legacy,
851 	.ce_prepare_shadow_register_v2_cfg =
852 		ce_prepare_shadow_register_v2_cfg_legacy,
853 };
854 
855 struct ce_ops *ce_services_legacy()
856 {
857 	return &ce_service_legacy;
858 }
859 
860 qdf_export_symbol(ce_services_legacy);
861 
862 void ce_service_legacy_init(void)
863 {
864 	ce_service_register_module(CE_SVC_LEGACY, &ce_services_legacy);
865 }
866