xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service_legacy.c (revision 503663c6daafffe652fa360bde17243568cd6d2a)
1 /*
2  * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "ce_api.h"
20 #include "ce_internal.h"
21 #include "ce_main.h"
22 #include "ce_reg.h"
23 #include "hif.h"
24 #include "hif_debug.h"
25 #include "hif_io32.h"
26 #include "qdf_lock.h"
27 #include "hif_main.h"
28 #include "hif_napi.h"
29 #include "qdf_module.h"
30 #include "regtable.h"
31 
32 /*
33  * Support for Copy Engine hardware, which is mainly used for
34  * communication between Host and Target over a PCIe interconnect.
35  */
36 
37 /*
38  * A single CopyEngine (CE) comprises two "rings":
39  *   a source ring
40  *   a destination ring
41  *
42  * Each ring consists of a number of descriptors which specify
43  * an address, length, and meta-data.
44  *
45  * Typically, one side of the PCIe interconnect (Host or Target)
46  * controls one ring and the other side controls the other ring.
47  * The source side chooses when to initiate a transfer and it
48  * chooses what to send (buffer address, length). The destination
49  * side keeps a supply of "anonymous receive buffers" available and
50  * it handles incoming data as it arrives (when the destination
51  * receives an interrupt).
52  *
53  * The sender may send a simple buffer (address/length) or it may
54  * send a small list of buffers.  When a small list is sent, hardware
55  * "gathers" these and they end up in a single destination buffer
56  * with a single interrupt.
57  *
58  * There are several "contexts" managed by this layer -- more, it
59  * may seem -- than should be needed. These are provided mainly for
60  * maximum flexibility and especially to facilitate a simpler HIF
61  * implementation. There are per-CopyEngine recv, send, and watermark
62  * contexts. These are supplied by the caller when a recv, send,
63  * or watermark handler is established and they are echoed back to
64  * the caller when the respective callbacks are invoked. There is
65  * also a per-transfer context supplied by the caller when a buffer
66  * (or sendlist) is sent and when a buffer is enqueued for recv.
67  * These per-transfer contexts are echoed back to the caller when
68  * the buffer is sent/received.
69  * Target TX harsh result toeplitz_hash_result
70  */
71 
72 /* NB: Modeled after ce_completed_send_next */
73 /* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
74 #define CE_WM_SHFT 1
75 
76 #ifdef WLAN_FEATURE_FASTPATH
77 #ifdef QCA_WIFI_3_0
78 static inline void
79 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
80 		      uint64_t dma_addr,
81 		      uint32_t user_flags)
82 {
83 	shadow_src_desc->buffer_addr_hi =
84 			(uint32_t)((dma_addr >> 32) & 0x1F);
85 	user_flags |= shadow_src_desc->buffer_addr_hi;
86 	memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
87 	       sizeof(uint32_t));
88 }
89 #else
90 static inline void
91 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
92 		      uint64_t dma_addr,
93 		      uint32_t user_flags)
94 {
95 }
96 #endif
97 
98 #define SLOTS_PER_DATAPATH_TX 2
99 
100 /**
101  * ce_send_fast() CE layer Tx buffer posting function
102  * @copyeng: copy engine handle
103  * @msdu: msdu to be sent
104  * @transfer_id: transfer_id
105  * @download_len: packet download length
106  *
107  * Assumption : Called with an array of MSDU's
108  * Function:
109  * For each msdu in the array
110  * 1. Check no. of available entries
111  * 2. Create src ring entries (allocated in consistent memory
112  * 3. Write index to h/w
113  *
114  * Return: No. of packets that could be sent
115  */
116 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
117 		 unsigned int transfer_id, uint32_t download_len)
118 {
119 	struct CE_state *ce_state = (struct CE_state *)copyeng;
120 	struct hif_softc *scn = ce_state->scn;
121 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
122 	struct CE_ring_state *src_ring = ce_state->src_ring;
123 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
124 	unsigned int nentries_mask = src_ring->nentries_mask;
125 	unsigned int write_index;
126 	unsigned int sw_index;
127 	unsigned int frag_len;
128 	uint64_t dma_addr;
129 	uint32_t user_flags;
130 	enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE;
131 	bool ok_to_send = true;
132 
133 	/*
134 	 * Create a log assuming the call will go through, and if not, we would
135 	 * add an error trace as well.
136 	 * Please add the same failure log for any additional error paths.
137 	 */
138 	DPTRACE(qdf_dp_trace(msdu,
139 			     QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
140 			     QDF_TRACE_DEFAULT_PDEV_ID,
141 			     qdf_nbuf_data_addr(msdu),
142 			     sizeof(qdf_nbuf_data(msdu)), QDF_TX));
143 
144 	qdf_spin_lock_bh(&ce_state->ce_index_lock);
145 
146 	/*
147 	 * Request runtime PM resume if it has already suspended and make
148 	 * sure there is no PCIe link access.
149 	 */
150 	if (hif_pm_runtime_get(hif_hdl) != 0)
151 		ok_to_send = false;
152 
153 	if (ok_to_send) {
154 		Q_TARGET_ACCESS_BEGIN(scn);
155 		DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
156 	}
157 
158 	write_index = src_ring->write_index;
159 	sw_index = src_ring->sw_index;
160 	hif_record_ce_desc_event(scn, ce_state->id,
161 				 FAST_TX_SOFTWARE_INDEX_UPDATE,
162 				 NULL, NULL, sw_index, 0);
163 
164 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
165 			 < SLOTS_PER_DATAPATH_TX)) {
166 		hif_err_rl("Source ring full, required %d, available %d",
167 			   SLOTS_PER_DATAPATH_TX,
168 			   CE_RING_DELTA(nentries_mask, write_index,
169 					 sw_index - 1));
170 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
171 		if (ok_to_send)
172 			Q_TARGET_ACCESS_END(scn);
173 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
174 
175 		DPTRACE(qdf_dp_trace(NULL,
176 				     QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD,
177 				     QDF_TRACE_DEFAULT_PDEV_ID,
178 				     NULL, 0, QDF_TX));
179 
180 		return 0;
181 	}
182 
183 	{
184 		struct CE_src_desc *src_ring_base =
185 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
186 		struct CE_src_desc *shadow_base =
187 			(struct CE_src_desc *)src_ring->shadow_base;
188 		struct CE_src_desc *src_desc =
189 			CE_SRC_RING_TO_DESC(src_ring_base, write_index);
190 		struct CE_src_desc *shadow_src_desc =
191 			CE_SRC_RING_TO_DESC(shadow_base, write_index);
192 
193 		hif_pm_runtime_get_noresume(hif_hdl);
194 
195 		/*
196 		 * First fill out the ring descriptor for the HTC HTT frame
197 		 * header. These are uncached writes. Should we use a local
198 		 * structure instead?
199 		 */
200 		/* HTT/HTC header can be passed as a argument */
201 		dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
202 		shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
203 							  0xFFFFFFFF);
204 		user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
205 		ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
206 			shadow_src_desc->meta_data = transfer_id;
207 		shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
208 		ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
209 		download_len -= shadow_src_desc->nbytes;
210 		/*
211 		 * HTC HTT header is a word stream, so byte swap if CE byte
212 		 * swap enabled
213 		 */
214 		shadow_src_desc->byte_swap = ((ce_state->attr_flags &
215 					CE_ATTR_BYTE_SWAP_DATA) != 0);
216 		/* For the first one, it still does not need to write */
217 		shadow_src_desc->gather = 1;
218 		*src_desc = *shadow_src_desc;
219 		/* By default we could initialize the transfer context to this
220 		 * value
221 		 */
222 		src_ring->per_transfer_context[write_index] =
223 			CE_SENDLIST_ITEM_CTXT;
224 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
225 
226 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
227 		shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
228 		/*
229 		 * Now fill out the ring descriptor for the actual data
230 		 * packet
231 		 */
232 		dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
233 		shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
234 							  0xFFFFFFFF);
235 		/*
236 		 * Clear packet offset for all but the first CE desc.
237 		 */
238 		user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
239 		ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
240 		shadow_src_desc->meta_data = transfer_id;
241 
242 		/* get actual packet length */
243 		frag_len = qdf_nbuf_get_frag_len(msdu, 1);
244 
245 		/* download remaining bytes of payload */
246 		shadow_src_desc->nbytes =  download_len;
247 		ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
248 		if (shadow_src_desc->nbytes > frag_len)
249 			shadow_src_desc->nbytes = frag_len;
250 
251 		/*  Data packet is a byte stream, so disable byte swap */
252 		shadow_src_desc->byte_swap = 0;
253 		/* For the last one, gather is not set */
254 		shadow_src_desc->gather    = 0;
255 		*src_desc = *shadow_src_desc;
256 		src_ring->per_transfer_context[write_index] = msdu;
257 
258 		hif_record_ce_desc_event(scn, ce_state->id, type,
259 					 (union ce_desc *)src_desc,
260 				src_ring->per_transfer_context[write_index],
261 				write_index, shadow_src_desc->nbytes);
262 
263 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
264 
265 		DPTRACE(qdf_dp_trace(msdu,
266 				     QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
267 				     QDF_TRACE_DEFAULT_PDEV_ID,
268 				     qdf_nbuf_data_addr(msdu),
269 				     sizeof(qdf_nbuf_data(msdu)), QDF_TX));
270 	}
271 
272 	src_ring->write_index = write_index;
273 
274 	if (ok_to_send) {
275 		if (qdf_likely(ce_state->state == CE_RUNNING)) {
276 			type = FAST_TX_WRITE_INDEX_UPDATE;
277 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
278 						      write_index);
279 			Q_TARGET_ACCESS_END(scn);
280 		} else {
281 			ce_state->state = CE_PENDING;
282 		}
283 		hif_pm_runtime_put(hif_hdl);
284 	}
285 
286 	qdf_spin_unlock_bh(&ce_state->ce_index_lock);
287 
288 	/* sent 1 packet */
289 	return 1;
290 }
291 
292 /**
293  * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler
294  * @ce_state: handle to copy engine state
295  * @cmpl_msdus: Rx msdus
296  * @num_cmpls: number of Rx msdus
297  * @ctrl_addr: CE control address
298  *
299  * Return: None
300  */
301 static void ce_fastpath_rx_handle(struct CE_state *ce_state,
302 				  qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls,
303 				  uint32_t ctrl_addr)
304 {
305 	struct hif_softc *scn = ce_state->scn;
306 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
307 	uint32_t nentries_mask = dest_ring->nentries_mask;
308 	uint32_t write_index;
309 
310 	qdf_spin_unlock(&ce_state->ce_index_lock);
311 	ce_state->fastpath_handler(ce_state->context,	cmpl_msdus, num_cmpls);
312 	qdf_spin_lock(&ce_state->ce_index_lock);
313 
314 	/* Update Destination Ring Write Index */
315 	write_index = dest_ring->write_index;
316 	write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
317 
318 	hif_record_ce_desc_event(scn, ce_state->id,
319 				 FAST_RX_WRITE_INDEX_UPDATE,
320 				 NULL, NULL, write_index, 0);
321 
322 	CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
323 	dest_ring->write_index = write_index;
324 }
325 
326 /**
327  * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs
328  * @scn: hif_context
329  * @ce_id: Copy engine ID
330  * 1) Go through the CE ring, and find the completions
331  * 2) For valid completions retrieve context (nbuf) for per_transfer_context[]
332  * 3) Unmap buffer & accumulate in an array.
333  * 4) Call message handler when array is full or when exiting the handler
334  *
335  * Return: void
336  */
337 
338 void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
339 {
340 	struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
341 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
342 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
343 	struct CE_dest_desc *dest_ring_base =
344 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
345 
346 	uint32_t nentries_mask = dest_ring->nentries_mask;
347 	uint32_t sw_index = dest_ring->sw_index;
348 	uint32_t nbytes;
349 	qdf_nbuf_t nbuf;
350 	dma_addr_t paddr;
351 	struct CE_dest_desc *dest_desc;
352 	qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM];
353 	uint32_t ctrl_addr = ce_state->ctrl_addr;
354 	uint32_t nbuf_cmpl_idx = 0;
355 	unsigned int more_comp_cnt = 0;
356 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
357 	struct ce_ops *ce_services = hif_state->ce_services;
358 
359 more_data:
360 	for (;;) {
361 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base,
362 						 sw_index);
363 
364 		/*
365 		 * The following 2 reads are from non-cached memory
366 		 */
367 		nbytes = dest_desc->nbytes;
368 
369 		/* If completion is invalid, break */
370 		if (qdf_unlikely(nbytes == 0))
371 			break;
372 
373 		/*
374 		 * Build the nbuf list from valid completions
375 		 */
376 		nbuf = dest_ring->per_transfer_context[sw_index];
377 
378 		/*
379 		 * No lock is needed here, since this is the only thread
380 		 * that accesses the sw_index
381 		 */
382 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
383 
384 		/*
385 		 * CAREFUL : Uncached write, but still less expensive,
386 		 * since most modern caches use "write-combining" to
387 		 * flush multiple cache-writes all at once.
388 		 */
389 		dest_desc->nbytes = 0;
390 
391 		/*
392 		 * Per our understanding this is not required on our
393 		 * since we are doing the same cache invalidation
394 		 * operation on the same buffer twice in succession,
395 		 * without any modifiication to this buffer by CPU in
396 		 * between.
397 		 * However, this code with 2 syncs in succession has
398 		 * been undergoing some testing at a customer site,
399 		 * and seemed to be showing no problems so far. Would
400 		 * like to validate from the customer, that this line
401 		 * is really not required, before we remove this line
402 		 * completely.
403 		 */
404 		paddr = QDF_NBUF_CB_PADDR(nbuf);
405 
406 		qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev, paddr,
407 						(skb_end_pointer(nbuf) -
408 						(nbuf)->data),
409 						DMA_FROM_DEVICE);
410 
411 		qdf_nbuf_put_tail(nbuf, nbytes);
412 
413 		qdf_assert_always(nbuf->data);
414 
415 		QDF_NBUF_CB_RX_CTX_ID(nbuf) =
416 				hif_get_rx_ctx_id(ce_state->id, hif_hdl);
417 		cmpl_msdus[nbuf_cmpl_idx++] = nbuf;
418 
419 		/*
420 		 * we are not posting the buffers back instead
421 		 * reusing the buffers
422 		 */
423 		if (nbuf_cmpl_idx == scn->ce_service_max_rx_ind_flush) {
424 			hif_record_ce_desc_event(scn, ce_state->id,
425 						 FAST_RX_SOFTWARE_INDEX_UPDATE,
426 						 NULL, NULL, sw_index, 0);
427 			dest_ring->sw_index = sw_index;
428 			ce_fastpath_rx_handle(ce_state, cmpl_msdus,
429 					      nbuf_cmpl_idx, ctrl_addr);
430 
431 			ce_state->receive_count += nbuf_cmpl_idx;
432 			if (qdf_unlikely(hif_ce_service_should_yield(
433 						scn, ce_state))) {
434 				ce_state->force_break = 1;
435 				qdf_atomic_set(&ce_state->rx_pending, 1);
436 				return;
437 			}
438 
439 			nbuf_cmpl_idx = 0;
440 			more_comp_cnt = 0;
441 		}
442 	}
443 
444 	hif_record_ce_desc_event(scn, ce_state->id,
445 				 FAST_RX_SOFTWARE_INDEX_UPDATE,
446 				 NULL, NULL, sw_index, 0);
447 
448 	dest_ring->sw_index = sw_index;
449 
450 	/*
451 	 * If there are not enough completions to fill the array,
452 	 * just call the message handler here
453 	 */
454 	if (nbuf_cmpl_idx) {
455 		ce_fastpath_rx_handle(ce_state, cmpl_msdus,
456 				      nbuf_cmpl_idx, ctrl_addr);
457 
458 		ce_state->receive_count += nbuf_cmpl_idx;
459 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
460 			ce_state->force_break = 1;
461 			qdf_atomic_set(&ce_state->rx_pending, 1);
462 			return;
463 		}
464 
465 		/* check for more packets after upper layer processing */
466 		nbuf_cmpl_idx = 0;
467 		more_comp_cnt = 0;
468 		goto more_data;
469 	}
470 
471 	hif_update_napi_max_poll_time(ce_state, ce_id, qdf_get_cpu());
472 
473 	qdf_atomic_set(&ce_state->rx_pending, 0);
474 	if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
475 		CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
476 					   HOST_IS_COPY_COMPLETE_MASK);
477 	} else {
478 		hif_err_rl("%s: target access is not allowed", __func__);
479 		return;
480 	}
481 
482 	if (ce_services->ce_recv_entries_done_nolock(scn, ce_state)) {
483 		if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
484 			goto more_data;
485 		} else {
486 			HIF_ERROR("%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
487 				  __func__, nentries_mask,
488 				  ce_state->dest_ring->sw_index,
489 				  CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr));
490 		}
491 	}
492 #ifdef NAPI_YIELD_BUDGET_BASED
493 	/*
494 	 * Caution : Before you modify this code, please refer hif_napi_poll
495 	 * function to understand how napi_complete gets called and make the
496 	 * necessary changes. Force break has to be done till WIN disables the
497 	 * interrupt at source
498 	 */
499 	ce_state->force_break = 1;
500 #endif
501 }
502 
503 /**
504  * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
505  * @scn: Handle to HIF context
506  *
507  * Return: true if fastpath is enabled else false.
508  */
509 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
510 {
511 	return scn->fastpath_mode_on;
512 }
513 #else
514 void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
515 {
516 }
517 
518 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
519 {
520 	return false;
521 }
522 #endif /* WLAN_FEATURE_FASTPATH */
523 
524 static int
525 ce_send_nolock_legacy(struct CE_handle *copyeng,
526 		      void *per_transfer_context,
527 		      qdf_dma_addr_t buffer,
528 		      uint32_t nbytes,
529 		      uint32_t transfer_id,
530 		      uint32_t flags,
531 		      uint32_t user_flags)
532 {
533 	int status;
534 	struct CE_state *CE_state = (struct CE_state *)copyeng;
535 	struct CE_ring_state *src_ring = CE_state->src_ring;
536 	uint32_t ctrl_addr = CE_state->ctrl_addr;
537 	unsigned int nentries_mask = src_ring->nentries_mask;
538 	unsigned int sw_index = src_ring->sw_index;
539 	unsigned int write_index = src_ring->write_index;
540 	uint64_t dma_addr = buffer;
541 	struct hif_softc *scn = CE_state->scn;
542 
543 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
544 		return QDF_STATUS_E_FAILURE;
545 	if (unlikely(CE_RING_DELTA(nentries_mask,
546 				   write_index, sw_index - 1) <= 0)) {
547 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
548 		Q_TARGET_ACCESS_END(scn);
549 		return QDF_STATUS_E_FAILURE;
550 	}
551 	{
552 		enum hif_ce_event_type event_type;
553 		struct CE_src_desc *src_ring_base =
554 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
555 		struct CE_src_desc *shadow_base =
556 			(struct CE_src_desc *)src_ring->shadow_base;
557 		struct CE_src_desc *src_desc =
558 			CE_SRC_RING_TO_DESC(src_ring_base, write_index);
559 		struct CE_src_desc *shadow_src_desc =
560 			CE_SRC_RING_TO_DESC(shadow_base, write_index);
561 
562 		/* Update low 32 bits source descriptor address */
563 		shadow_src_desc->buffer_addr =
564 			(uint32_t)(dma_addr & 0xFFFFFFFF);
565 #ifdef QCA_WIFI_3_0
566 		shadow_src_desc->buffer_addr_hi =
567 			(uint32_t)((dma_addr >> 32) & 0x1F);
568 		user_flags |= shadow_src_desc->buffer_addr_hi;
569 		memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
570 		       sizeof(uint32_t));
571 #endif
572 		shadow_src_desc->target_int_disable = 0;
573 		shadow_src_desc->host_int_disable = 0;
574 
575 		shadow_src_desc->meta_data = transfer_id;
576 
577 		/*
578 		 * Set the swap bit if:
579 		 * typical sends on this CE are swapped (host is big-endian)
580 		 * and this send doesn't disable the swapping
581 		 * (data is not bytestream)
582 		 */
583 		shadow_src_desc->byte_swap =
584 			(((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
585 			 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
586 		shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
587 		shadow_src_desc->nbytes = nbytes;
588 		ce_validate_nbytes(nbytes, CE_state);
589 
590 		*src_desc = *shadow_src_desc;
591 
592 		src_ring->per_transfer_context[write_index] =
593 			per_transfer_context;
594 
595 		/* Update Source Ring Write Index */
596 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
597 
598 		/* WORKAROUND */
599 		if (shadow_src_desc->gather) {
600 			event_type = HIF_TX_GATHER_DESC_POST;
601 		} else if (qdf_unlikely(CE_state->state != CE_RUNNING)) {
602 			event_type = HIF_TX_DESC_SOFTWARE_POST;
603 			CE_state->state = CE_PENDING;
604 		} else {
605 			event_type = HIF_TX_DESC_POST;
606 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
607 						      write_index);
608 		}
609 
610 		/* src_ring->write index hasn't been updated event though
611 		 * the register has allready been written to.
612 		 */
613 		hif_record_ce_desc_event(scn, CE_state->id, event_type,
614 			(union ce_desc *)shadow_src_desc, per_transfer_context,
615 			src_ring->write_index, nbytes);
616 
617 		src_ring->write_index = write_index;
618 		status = QDF_STATUS_SUCCESS;
619 	}
620 	Q_TARGET_ACCESS_END(scn);
621 	return status;
622 }
623 
624 static int
625 ce_sendlist_send_legacy(struct CE_handle *copyeng,
626 			void *per_transfer_context,
627 			struct ce_sendlist *sendlist, unsigned int transfer_id)
628 {
629 	int status = -ENOMEM;
630 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
631 	struct CE_state *CE_state = (struct CE_state *)copyeng;
632 	struct CE_ring_state *src_ring = CE_state->src_ring;
633 	unsigned int nentries_mask = src_ring->nentries_mask;
634 	unsigned int num_items = sl->num_items;
635 	unsigned int sw_index;
636 	unsigned int write_index;
637 	struct hif_softc *scn = CE_state->scn;
638 
639 	QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
640 
641 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
642 
643 	if (CE_state->scn->fastpath_mode_on && CE_state->htt_tx_data &&
644 	    Q_TARGET_ACCESS_BEGIN(scn) == 0) {
645 		src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR(
646 					       scn, CE_state->ctrl_addr);
647 		Q_TARGET_ACCESS_END(scn);
648 	}
649 
650 	sw_index = src_ring->sw_index;
651 	write_index = src_ring->write_index;
652 
653 	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
654 	    num_items) {
655 		struct ce_sendlist_item *item;
656 		int i;
657 
658 		/* handle all but the last item uniformly */
659 		for (i = 0; i < num_items - 1; i++) {
660 			item = &sl->item[i];
661 			/* TBDXXX: Support extensible sendlist_types? */
662 			QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
663 			status = ce_send_nolock_legacy(copyeng,
664 				CE_SENDLIST_ITEM_CTXT,
665 				(qdf_dma_addr_t)item->data,
666 				item->u.nbytes, transfer_id,
667 				item->flags | CE_SEND_FLAG_GATHER,
668 				item->user_flags);
669 			QDF_ASSERT(status == QDF_STATUS_SUCCESS);
670 		}
671 		/* provide valid context pointer for final item */
672 		item = &sl->item[i];
673 		/* TBDXXX: Support extensible sendlist_types? */
674 		QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
675 		status = ce_send_nolock_legacy(copyeng, per_transfer_context,
676 					       (qdf_dma_addr_t) item->data,
677 					       item->u.nbytes,
678 					       transfer_id, item->flags,
679 					       item->user_flags);
680 		QDF_ASSERT(status == QDF_STATUS_SUCCESS);
681 		QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
682 					     QDF_NBUF_TX_PKT_CE);
683 		DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
684 			QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
685 			QDF_TRACE_DEFAULT_PDEV_ID,
686 			(uint8_t *)&(((qdf_nbuf_t)per_transfer_context)->data),
687 			sizeof(((qdf_nbuf_t)per_transfer_context)->data),
688 			QDF_TX));
689 	} else {
690 		/*
691 		 * Probably not worth the additional complexity to support
692 		 * partial sends with continuation or notification.  We expect
693 		 * to use large rings and small sendlists. If we can't handle
694 		 * the entire request at once, punt it back to the caller.
695 		 */
696 	}
697 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
698 
699 	return status;
700 }
701 
702 /**
703  * ce_recv_buf_enqueue_legacy() - enqueue a recv buffer into a copy engine
704  * @coyeng: copy engine handle
705  * @per_recv_context: virtual address of the nbuf
706  * @buffer: physical address of the nbuf
707  *
708  * Return: 0 if the buffer is enqueued
709  */
710 static int
711 ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng,
712 			   void *per_recv_context, qdf_dma_addr_t buffer)
713 {
714 	int status;
715 	struct CE_state *CE_state = (struct CE_state *)copyeng;
716 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
717 	uint32_t ctrl_addr = CE_state->ctrl_addr;
718 	unsigned int nentries_mask = dest_ring->nentries_mask;
719 	unsigned int write_index;
720 	unsigned int sw_index;
721 	uint64_t dma_addr = buffer;
722 	struct hif_softc *scn = CE_state->scn;
723 
724 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
725 	write_index = dest_ring->write_index;
726 	sw_index = dest_ring->sw_index;
727 
728 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
729 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
730 		return -EIO;
731 	}
732 
733 	if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
734 	    (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) {
735 		struct CE_dest_desc *dest_ring_base =
736 			(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
737 		struct CE_dest_desc *dest_desc =
738 			CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
739 
740 		/* Update low 32 bit destination descriptor */
741 		dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
742 #ifdef QCA_WIFI_3_0
743 		dest_desc->buffer_addr_hi =
744 			(uint32_t)((dma_addr >> 32) & 0x1F);
745 #endif
746 		dest_desc->nbytes = 0;
747 
748 		dest_ring->per_transfer_context[write_index] =
749 			per_recv_context;
750 
751 		hif_record_ce_desc_event(scn, CE_state->id,
752 					 HIF_RX_DESC_POST,
753 					 (union ce_desc *)dest_desc,
754 					 per_recv_context,
755 					 write_index, 0);
756 
757 		/* Update Destination Ring Write Index */
758 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
759 		if (write_index != sw_index) {
760 			CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
761 			dest_ring->write_index = write_index;
762 		}
763 		status = QDF_STATUS_SUCCESS;
764 	} else
765 		status = QDF_STATUS_E_FAILURE;
766 
767 	Q_TARGET_ACCESS_END(scn);
768 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
769 	return status;
770 }
771 
772 static unsigned int
773 ce_send_entries_done_nolock_legacy(struct hif_softc *scn,
774 				   struct CE_state *CE_state)
775 {
776 	struct CE_ring_state *src_ring = CE_state->src_ring;
777 	uint32_t ctrl_addr = CE_state->ctrl_addr;
778 	unsigned int nentries_mask = src_ring->nentries_mask;
779 	unsigned int sw_index;
780 	unsigned int read_index;
781 
782 	sw_index = src_ring->sw_index;
783 	read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
784 
785 	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
786 }
787 
788 static unsigned int
789 ce_recv_entries_done_nolock_legacy(struct hif_softc *scn,
790 				   struct CE_state *CE_state)
791 {
792 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
793 	uint32_t ctrl_addr = CE_state->ctrl_addr;
794 	unsigned int nentries_mask = dest_ring->nentries_mask;
795 	unsigned int sw_index;
796 	unsigned int read_index;
797 
798 	sw_index = dest_ring->sw_index;
799 	read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
800 
801 	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
802 }
803 
804 static int
805 ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state,
806 				     void **per_CE_contextp,
807 				     void **per_transfer_contextp,
808 				     qdf_dma_addr_t *bufferp,
809 				     unsigned int *nbytesp,
810 				     unsigned int *transfer_idp,
811 				     unsigned int *flagsp)
812 {
813 	int status;
814 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
815 	unsigned int nentries_mask = dest_ring->nentries_mask;
816 	unsigned int sw_index = dest_ring->sw_index;
817 	struct hif_softc *scn = CE_state->scn;
818 	struct CE_dest_desc *dest_ring_base =
819 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
820 	struct CE_dest_desc *dest_desc =
821 		CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
822 	int nbytes;
823 	struct CE_dest_desc dest_desc_info;
824 	/*
825 	 * By copying the dest_desc_info element to local memory, we could
826 	 * avoid extra memory read from non-cachable memory.
827 	 */
828 	dest_desc_info =  *dest_desc;
829 	nbytes = dest_desc_info.nbytes;
830 	if (nbytes == 0) {
831 		/*
832 		 * This closes a relatively unusual race where the Host
833 		 * sees the updated DRRI before the update to the
834 		 * corresponding descriptor has completed. We treat this
835 		 * as a descriptor that is not yet done.
836 		 */
837 		status = QDF_STATUS_E_FAILURE;
838 		goto done;
839 	}
840 
841 	hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
842 				 (union ce_desc *)dest_desc,
843 				 dest_ring->per_transfer_context[sw_index],
844 				 sw_index, 0);
845 
846 	dest_desc->nbytes = 0;
847 
848 	/* Return data from completed destination descriptor */
849 	*bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
850 	*nbytesp = nbytes;
851 	*transfer_idp = dest_desc_info.meta_data;
852 	*flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
853 
854 	if (per_CE_contextp)
855 		*per_CE_contextp = CE_state->recv_context;
856 
857 	if (per_transfer_contextp) {
858 		*per_transfer_contextp =
859 			dest_ring->per_transfer_context[sw_index];
860 	}
861 	dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
862 
863 	/* Update sw_index */
864 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
865 	dest_ring->sw_index = sw_index;
866 	status = QDF_STATUS_SUCCESS;
867 
868 done:
869 	return status;
870 }
871 
872 /* NB: Modeled after ce_completed_recv_next_nolock */
873 static QDF_STATUS
874 ce_revoke_recv_next_legacy(struct CE_handle *copyeng,
875 			   void **per_CE_contextp,
876 			   void **per_transfer_contextp,
877 			   qdf_dma_addr_t *bufferp)
878 {
879 	struct CE_state *CE_state;
880 	struct CE_ring_state *dest_ring;
881 	unsigned int nentries_mask;
882 	unsigned int sw_index;
883 	unsigned int write_index;
884 	QDF_STATUS status;
885 	struct hif_softc *scn;
886 
887 	CE_state = (struct CE_state *)copyeng;
888 	dest_ring = CE_state->dest_ring;
889 	if (!dest_ring)
890 		return QDF_STATUS_E_FAILURE;
891 
892 	scn = CE_state->scn;
893 	qdf_spin_lock(&CE_state->ce_index_lock);
894 	nentries_mask = dest_ring->nentries_mask;
895 	sw_index = dest_ring->sw_index;
896 	write_index = dest_ring->write_index;
897 	if (write_index != sw_index) {
898 		struct CE_dest_desc *dest_ring_base =
899 			(struct CE_dest_desc *)dest_ring->
900 			    base_addr_owner_space;
901 		struct CE_dest_desc *dest_desc =
902 			CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
903 
904 		/* Return data from completed destination descriptor */
905 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
906 
907 		if (per_CE_contextp)
908 			*per_CE_contextp = CE_state->recv_context;
909 
910 		if (per_transfer_contextp) {
911 			*per_transfer_contextp =
912 				dest_ring->per_transfer_context[sw_index];
913 		}
914 		dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
915 
916 		/* Update sw_index */
917 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
918 		dest_ring->sw_index = sw_index;
919 		status = QDF_STATUS_SUCCESS;
920 	} else {
921 		status = QDF_STATUS_E_FAILURE;
922 	}
923 	qdf_spin_unlock(&CE_state->ce_index_lock);
924 
925 	return status;
926 }
927 
928 /*
929  * Guts of ce_completed_send_next.
930  * The caller takes responsibility for any necessary locking.
931  */
932 static int
933 ce_completed_send_next_nolock_legacy(struct CE_state *CE_state,
934 				     void **per_CE_contextp,
935 				     void **per_transfer_contextp,
936 				     qdf_dma_addr_t *bufferp,
937 				     unsigned int *nbytesp,
938 				     unsigned int *transfer_idp,
939 				     unsigned int *sw_idx,
940 				     unsigned int *hw_idx,
941 				     uint32_t *toeplitz_hash_result)
942 {
943 	int status = QDF_STATUS_E_FAILURE;
944 	struct CE_ring_state *src_ring = CE_state->src_ring;
945 	uint32_t ctrl_addr = CE_state->ctrl_addr;
946 	unsigned int nentries_mask = src_ring->nentries_mask;
947 	unsigned int sw_index = src_ring->sw_index;
948 	unsigned int read_index;
949 	struct hif_softc *scn = CE_state->scn;
950 
951 	if (src_ring->hw_index == sw_index) {
952 		/*
953 		 * The SW completion index has caught up with the cached
954 		 * version of the HW completion index.
955 		 * Update the cached HW completion index to see whether
956 		 * the SW has really caught up to the HW, or if the cached
957 		 * value of the HW index has become stale.
958 		 */
959 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
960 			return QDF_STATUS_E_FAILURE;
961 		src_ring->hw_index =
962 			CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
963 		if (Q_TARGET_ACCESS_END(scn) < 0)
964 			return QDF_STATUS_E_FAILURE;
965 	}
966 	read_index = src_ring->hw_index;
967 
968 	if (sw_idx)
969 		*sw_idx = sw_index;
970 
971 	if (hw_idx)
972 		*hw_idx = read_index;
973 
974 	if ((read_index != sw_index) && (read_index != 0xffffffff)) {
975 		struct CE_src_desc *shadow_base =
976 			(struct CE_src_desc *)src_ring->shadow_base;
977 		struct CE_src_desc *shadow_src_desc =
978 			CE_SRC_RING_TO_DESC(shadow_base, sw_index);
979 #ifdef QCA_WIFI_3_0
980 		struct CE_src_desc *src_ring_base =
981 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
982 		struct CE_src_desc *src_desc =
983 			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
984 #endif
985 		hif_record_ce_desc_event(scn, CE_state->id,
986 				HIF_TX_DESC_COMPLETION,
987 				(union ce_desc *)shadow_src_desc,
988 				src_ring->per_transfer_context[sw_index],
989 				sw_index, shadow_src_desc->nbytes);
990 
991 		/* Return data from completed source descriptor */
992 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
993 		*nbytesp = shadow_src_desc->nbytes;
994 		*transfer_idp = shadow_src_desc->meta_data;
995 #ifdef QCA_WIFI_3_0
996 		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
997 #else
998 		*toeplitz_hash_result = 0;
999 #endif
1000 		if (per_CE_contextp)
1001 			*per_CE_contextp = CE_state->send_context;
1002 
1003 		if (per_transfer_contextp) {
1004 			*per_transfer_contextp =
1005 				src_ring->per_transfer_context[sw_index];
1006 		}
1007 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
1008 
1009 		/* Update sw_index */
1010 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1011 		src_ring->sw_index = sw_index;
1012 		status = QDF_STATUS_SUCCESS;
1013 	}
1014 
1015 	return status;
1016 }
1017 
1018 static QDF_STATUS
1019 ce_cancel_send_next_legacy(struct CE_handle *copyeng,
1020 			   void **per_CE_contextp,
1021 			   void **per_transfer_contextp,
1022 			   qdf_dma_addr_t *bufferp,
1023 			   unsigned int *nbytesp,
1024 			   unsigned int *transfer_idp,
1025 			   uint32_t *toeplitz_hash_result)
1026 {
1027 	struct CE_state *CE_state;
1028 	struct CE_ring_state *src_ring;
1029 	unsigned int nentries_mask;
1030 	unsigned int sw_index;
1031 	unsigned int write_index;
1032 	QDF_STATUS status;
1033 	struct hif_softc *scn;
1034 
1035 	CE_state = (struct CE_state *)copyeng;
1036 	src_ring = CE_state->src_ring;
1037 	if (!src_ring)
1038 		return QDF_STATUS_E_FAILURE;
1039 
1040 	scn = CE_state->scn;
1041 	qdf_spin_lock(&CE_state->ce_index_lock);
1042 	nentries_mask = src_ring->nentries_mask;
1043 	sw_index = src_ring->sw_index;
1044 	write_index = src_ring->write_index;
1045 
1046 	if (write_index != sw_index) {
1047 		struct CE_src_desc *src_ring_base =
1048 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
1049 		struct CE_src_desc *src_desc =
1050 			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1051 
1052 		/* Return data from completed source descriptor */
1053 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1054 		*nbytesp = src_desc->nbytes;
1055 		*transfer_idp = src_desc->meta_data;
1056 #ifdef QCA_WIFI_3_0
1057 		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
1058 #else
1059 		*toeplitz_hash_result = 0;
1060 #endif
1061 
1062 		if (per_CE_contextp)
1063 			*per_CE_contextp = CE_state->send_context;
1064 
1065 		if (per_transfer_contextp) {
1066 			*per_transfer_contextp =
1067 				src_ring->per_transfer_context[sw_index];
1068 		}
1069 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
1070 
1071 		/* Update sw_index */
1072 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1073 		src_ring->sw_index = sw_index;
1074 		status = QDF_STATUS_SUCCESS;
1075 	} else {
1076 		status = QDF_STATUS_E_FAILURE;
1077 	}
1078 	qdf_spin_unlock(&CE_state->ce_index_lock);
1079 
1080 	return status;
1081 }
1082 
1083 /*
1084  * Adjust interrupts for the copy complete handler.
1085  * If it's needed for either send or recv, then unmask
1086  * this interrupt; otherwise, mask it.
1087  *
1088  * Called with target_lock held.
1089  */
1090 static void
1091 ce_per_engine_handler_adjust_legacy(struct CE_state *CE_state,
1092 				    int disable_copy_compl_intr)
1093 {
1094 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1095 	struct hif_softc *scn = CE_state->scn;
1096 
1097 	CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
1098 
1099 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1100 		return;
1101 
1102 	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1103 		hif_err_rl("%s: target access is not allowed", __func__);
1104 		return;
1105 	}
1106 
1107 	if ((!disable_copy_compl_intr) &&
1108 	    (CE_state->send_cb || CE_state->recv_cb))
1109 		CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1110 	else
1111 		CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1112 
1113 	if (CE_state->watermark_cb)
1114 		CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1115 	else
1116 		CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1117 	Q_TARGET_ACCESS_END(scn);
1118 }
1119 
1120 static void ce_legacy_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
1121 				     struct CE_ring_state *src_ring,
1122 				     struct CE_attr *attr)
1123 {
1124 	uint32_t ctrl_addr;
1125 	uint64_t dma_addr;
1126 
1127 	QDF_ASSERT(ce_id < scn->ce_count);
1128 	ctrl_addr = CE_BASE_ADDRESS(ce_id);
1129 
1130 	src_ring->hw_index =
1131 		CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
1132 	src_ring->sw_index = src_ring->hw_index;
1133 	src_ring->write_index =
1134 		CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
1135 	dma_addr = src_ring->base_addr_CE_space;
1136 	CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
1137 				  (uint32_t)(dma_addr & 0xFFFFFFFF));
1138 
1139 	/* if SR_BA_ADDRESS_HIGH register exists */
1140 	if (is_register_supported(SR_BA_ADDRESS_HIGH)) {
1141 		uint32_t tmp;
1142 
1143 		tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
1144 				scn, ctrl_addr);
1145 		tmp &= ~0x1F;
1146 		dma_addr = ((dma_addr >> 32) & 0x1F) | tmp;
1147 		CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
1148 					ctrl_addr, (uint32_t)dma_addr);
1149 	}
1150 	CE_SRC_RING_SZ_SET(scn, ctrl_addr, src_ring->nentries);
1151 	CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
1152 #ifdef BIG_ENDIAN_HOST
1153 	/* Enable source ring byte swap for big endian host */
1154 	CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
1155 #endif
1156 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
1157 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, src_ring->nentries);
1158 }
1159 
1160 static void ce_legacy_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
1161 				struct CE_ring_state *dest_ring,
1162 				struct CE_attr *attr)
1163 {
1164 	uint32_t ctrl_addr;
1165 	uint64_t dma_addr;
1166 
1167 	QDF_ASSERT(ce_id < scn->ce_count);
1168 	ctrl_addr = CE_BASE_ADDRESS(ce_id);
1169 	dest_ring->sw_index =
1170 		CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
1171 	dest_ring->write_index =
1172 		CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
1173 	dma_addr = dest_ring->base_addr_CE_space;
1174 	CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
1175 				   (uint32_t)(dma_addr & 0xFFFFFFFF));
1176 
1177 	/* if DR_BA_ADDRESS_HIGH exists */
1178 	if (is_register_supported(DR_BA_ADDRESS_HIGH)) {
1179 		uint32_t tmp;
1180 
1181 		tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
1182 						      ctrl_addr);
1183 		tmp &= ~0x1F;
1184 		dma_addr = ((dma_addr >> 32) & 0x1F) | tmp;
1185 		CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
1186 				ctrl_addr, (uint32_t)dma_addr);
1187 	}
1188 
1189 	CE_DEST_RING_SZ_SET(scn, ctrl_addr, dest_ring->nentries);
1190 #ifdef BIG_ENDIAN_HOST
1191 	/* Enable Dest ring byte swap for big endian host */
1192 	CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
1193 #endif
1194 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
1195 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, dest_ring->nentries);
1196 }
1197 
1198 static uint32_t ce_get_desc_size_legacy(uint8_t ring_type)
1199 {
1200 	switch (ring_type) {
1201 	case CE_RING_SRC:
1202 		return sizeof(struct CE_src_desc);
1203 	case CE_RING_DEST:
1204 		return sizeof(struct CE_dest_desc);
1205 	case CE_RING_STATUS:
1206 		qdf_assert(0);
1207 		return 0;
1208 	default:
1209 		return 0;
1210 	}
1211 
1212 	return 0;
1213 }
1214 
1215 static int ce_ring_setup_legacy(struct hif_softc *scn, uint8_t ring_type,
1216 				uint32_t ce_id, struct CE_ring_state *ring,
1217 				struct CE_attr *attr)
1218 {
1219 	int status = Q_TARGET_ACCESS_BEGIN(scn);
1220 
1221 	if (status < 0)
1222 		goto out;
1223 
1224 	switch (ring_type) {
1225 	case CE_RING_SRC:
1226 		ce_legacy_src_ring_setup(scn, ce_id, ring, attr);
1227 		break;
1228 	case CE_RING_DEST:
1229 		ce_legacy_dest_ring_setup(scn, ce_id, ring, attr);
1230 		break;
1231 	case CE_RING_STATUS:
1232 	default:
1233 		qdf_assert(0);
1234 		break;
1235 	}
1236 
1237 	Q_TARGET_ACCESS_END(scn);
1238 out:
1239 	return status;
1240 }
1241 
1242 static void ce_prepare_shadow_register_v2_cfg_legacy(struct hif_softc *scn,
1243 			    struct pld_shadow_reg_v2_cfg **shadow_config,
1244 			    int *num_shadow_registers_configured)
1245 {
1246 	*num_shadow_registers_configured = 0;
1247 	*shadow_config = NULL;
1248 }
1249 
1250 static bool ce_check_int_watermark(struct CE_state *CE_state,
1251 				   unsigned int *flags)
1252 {
1253 	uint32_t ce_int_status;
1254 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1255 	struct hif_softc *scn = CE_state->scn;
1256 
1257 	ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1258 	if (ce_int_status & CE_WATERMARK_MASK) {
1259 		/* Convert HW IS bits to software flags */
1260 		*flags =
1261 			(ce_int_status & CE_WATERMARK_MASK) >>
1262 			CE_WM_SHFT;
1263 		return true;
1264 	}
1265 
1266 	return false;
1267 }
1268 
1269 struct ce_ops ce_service_legacy = {
1270 	.ce_get_desc_size = ce_get_desc_size_legacy,
1271 	.ce_ring_setup = ce_ring_setup_legacy,
1272 	.ce_sendlist_send = ce_sendlist_send_legacy,
1273 	.ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_legacy,
1274 	.ce_revoke_recv_next = ce_revoke_recv_next_legacy,
1275 	.ce_cancel_send_next = ce_cancel_send_next_legacy,
1276 	.ce_recv_buf_enqueue = ce_recv_buf_enqueue_legacy,
1277 	.ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_legacy,
1278 	.ce_send_nolock = ce_send_nolock_legacy,
1279 	.watermark_int = ce_check_int_watermark,
1280 	.ce_completed_send_next_nolock = ce_completed_send_next_nolock_legacy,
1281 	.ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_legacy,
1282 	.ce_send_entries_done_nolock = ce_send_entries_done_nolock_legacy,
1283 	.ce_prepare_shadow_register_v2_cfg =
1284 		ce_prepare_shadow_register_v2_cfg_legacy,
1285 };
1286 
1287 struct ce_ops *ce_services_legacy()
1288 {
1289 	return &ce_service_legacy;
1290 }
1291 
1292 qdf_export_symbol(ce_services_legacy);
1293 
1294 void ce_service_legacy_init(void)
1295 {
1296 	ce_service_register_module(CE_SVC_LEGACY, &ce_services_legacy);
1297 }
1298