xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service.c (revision e1d3d092f61a07549ab97f6f1f0c86554e0c642f)
1 /*
2  * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 #include "hif.h"
29 #include "hif_io32.h"
30 #include "ce_api.h"
31 #include "ce_main.h"
32 #include "ce_internal.h"
33 #include "ce_reg.h"
34 #include "qdf_lock.h"
35 #include "regtable.h"
36 #include "hif_main.h"
37 #include "hif_debug.h"
38 #include "hif_napi.h"
39 
40 #ifdef IPA_OFFLOAD
41 #ifdef QCA_WIFI_3_0
42 #define CE_IPA_RING_INIT(ce_desc)                       \
43 	do {                                            \
44 		ce_desc->gather = 0;                    \
45 		ce_desc->enable_11h = 0;                \
46 		ce_desc->meta_data_low = 0;             \
47 		ce_desc->packet_result_offset = 64;     \
48 		ce_desc->toeplitz_hash_enable = 0;      \
49 		ce_desc->addr_y_search_disable = 0;     \
50 		ce_desc->addr_x_search_disable = 0;     \
51 		ce_desc->misc_int_disable = 0;          \
52 		ce_desc->target_int_disable = 0;        \
53 		ce_desc->host_int_disable = 0;          \
54 		ce_desc->dest_byte_swap = 0;            \
55 		ce_desc->byte_swap = 0;                 \
56 		ce_desc->type = 2;                      \
57 		ce_desc->tx_classify = 1;               \
58 		ce_desc->buffer_addr_hi = 0;            \
59 		ce_desc->meta_data = 0;                 \
60 		ce_desc->nbytes = 128;                  \
61 	} while (0)
62 #else
63 #define CE_IPA_RING_INIT(ce_desc)                       \
64 	do {                                            \
65 		ce_desc->byte_swap = 0;                 \
66 		ce_desc->nbytes = 60;                   \
67 		ce_desc->gather = 0;                    \
68 	} while (0)
69 #endif /* QCA_WIFI_3_0 */
70 #endif /* IPA_OFFLOAD */
71 
72 #ifndef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
73 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr)				\
74 	do {                                            		\
75 		x = CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, addr); 	\
76 	} while (0);
77 #else
78 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr)
79 #endif
80 
81 static int war1_allow_sleep;
82 /* io32 write workaround */
83 static int hif_ce_war1;
84 
85 /**
86  * hif_ce_war_disable() - disable ce war gobally
87  */
88 void hif_ce_war_disable(void)
89 {
90 	hif_ce_war1 = 0;
91 }
92 
93 /**
94  * hif_ce_war_enable() - enable ce war gobally
95  */
96 void hif_ce_war_enable(void)
97 {
98 	hif_ce_war1 = 1;
99 }
100 
101 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
102 
103 /**
104  * struct hif_ce_event - structure for detailing a ce event
105  * @type: what the event was
106  * @time: when it happened
107  * @descriptor: descriptor enqueued or dequeued
108  * @memory: virtual address that was used
109  * @index: location of the descriptor in the ce ring;
110  */
111 struct hif_ce_desc_event {
112 	uint16_t index;
113 	enum hif_ce_event_type type;
114 	uint64_t time;
115 	union ce_desc descriptor;
116 	void *memory;
117 };
118 
119 /* max history to record per copy engine */
120 #define HIF_CE_HISTORY_MAX 512
121 qdf_atomic_t hif_ce_desc_history_index[CE_COUNT_MAX];
122 struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
123 
124 
125 /**
126  * get_next_record_index() - get the next record index
127  * @table_index: atomic index variable to increment
128  * @array_size: array size of the circular buffer
129  *
130  * Increment the atomic index and reserve the value.
131  * Takes care of buffer wrap.
132  * Guaranteed to be thread safe as long as fewer than array_size contexts
133  * try to access the array.  If there are more than array_size contexts
134  * trying to access the array, full locking of the recording process would
135  * be needed to have sane logging.
136  */
137 static int get_next_record_index(qdf_atomic_t *table_index, int array_size)
138 {
139 	int record_index = qdf_atomic_inc_return(table_index);
140 
141 	if (record_index == array_size)
142 		qdf_atomic_sub(array_size, table_index);
143 
144 	while (record_index >= array_size)
145 		record_index -= array_size;
146 	return record_index;
147 }
148 
149 /**
150  * hif_record_ce_desc_event() - record ce descriptor events
151  * @scn: hif_softc
152  * @ce_id: which ce is the event occuring on
153  * @type: what happened
154  * @descriptor: pointer to the descriptor posted/completed
155  * @memory: virtual address of buffer related to the descriptor
156  * @index: index that the descriptor was/will be at.
157  */
158 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
159 				enum hif_ce_event_type type,
160 				union ce_desc *descriptor,
161 				void *memory, int index)
162 {
163 	int record_index = get_next_record_index(
164 			&hif_ce_desc_history_index[ce_id], HIF_CE_HISTORY_MAX);
165 
166 	struct hif_ce_desc_event *event =
167 		&hif_ce_desc_history[ce_id][record_index];
168 	event->type = type;
169 	event->time = qdf_get_log_timestamp();
170 
171 	if (descriptor != NULL)
172 		event->descriptor = *descriptor;
173 	else
174 		memset(&event->descriptor, 0, sizeof(union ce_desc));
175 	event->memory = memory;
176 	event->index = index;
177 }
178 
179 /**
180  * ce_init_ce_desc_event_log() - initialize the ce event log
181  * @ce_id: copy engine id for which we are initializing the log
182  * @size: size of array to dedicate
183  *
184  * Currently the passed size is ignored in favor of a precompiled value.
185  */
186 void ce_init_ce_desc_event_log(int ce_id, int size)
187 {
188 	qdf_atomic_init(&hif_ce_desc_history_index[ce_id]);
189 }
190 #else
191 void hif_record_ce_desc_event(struct hif_softc *scn,
192 		int ce_id, enum hif_ce_event_type type,
193 		union ce_desc *descriptor, void *memory,
194 		int index)
195 {
196 }
197 
198 inline void ce_init_ce_desc_event_log(int ce_id, int size)
199 {
200 }
201 #endif
202 
203 #ifdef NAPI_YIELD_BUDGET_BASED
204 bool hif_ce_service_should_yield(struct hif_softc *scn,
205 				 struct CE_state *ce_state)
206 {
207 	bool yield =  hif_max_num_receives_reached(scn, ce_state->receive_count);
208 	return yield;
209 }
210 #else
211 /**
212  * hif_ce_service_should_yield() - return true if the service is hogging the cpu
213  * @scn: hif context
214  * @ce_state: context of the copy engine being serviced
215  *
216  * Return: true if the service should yield
217  */
218 bool hif_ce_service_should_yield(struct hif_softc *scn,
219 				 struct CE_state *ce_state)
220 {
221 	bool yield, time_limit_reached, rxpkt_thresh_reached = 0;
222 
223 	time_limit_reached =
224 		sched_clock() > ce_state->ce_service_yield_time ? 1 : 0;
225 
226 	if (!time_limit_reached)
227 		rxpkt_thresh_reached = hif_max_num_receives_reached
228 					(scn, ce_state->receive_count);
229 
230 	yield =  time_limit_reached || rxpkt_thresh_reached;
231 
232 	if (yield)
233 		hif_napi_update_yield_stats(ce_state,
234 					    time_limit_reached,
235 					    rxpkt_thresh_reached);
236 	return yield;
237 }
238 #endif
239 /*
240  * Support for Copy Engine hardware, which is mainly used for
241  * communication between Host and Target over a PCIe interconnect.
242  */
243 
244 /*
245  * A single CopyEngine (CE) comprises two "rings":
246  *   a source ring
247  *   a destination ring
248  *
249  * Each ring consists of a number of descriptors which specify
250  * an address, length, and meta-data.
251  *
252  * Typically, one side of the PCIe interconnect (Host or Target)
253  * controls one ring and the other side controls the other ring.
254  * The source side chooses when to initiate a transfer and it
255  * chooses what to send (buffer address, length). The destination
256  * side keeps a supply of "anonymous receive buffers" available and
257  * it handles incoming data as it arrives (when the destination
258  * recieves an interrupt).
259  *
260  * The sender may send a simple buffer (address/length) or it may
261  * send a small list of buffers.  When a small list is sent, hardware
262  * "gathers" these and they end up in a single destination buffer
263  * with a single interrupt.
264  *
265  * There are several "contexts" managed by this layer -- more, it
266  * may seem -- than should be needed. These are provided mainly for
267  * maximum flexibility and especially to facilitate a simpler HIF
268  * implementation. There are per-CopyEngine recv, send, and watermark
269  * contexts. These are supplied by the caller when a recv, send,
270  * or watermark handler is established and they are echoed back to
271  * the caller when the respective callbacks are invoked. There is
272  * also a per-transfer context supplied by the caller when a buffer
273  * (or sendlist) is sent and when a buffer is enqueued for recv.
274  * These per-transfer contexts are echoed back to the caller when
275  * the buffer is sent/received.
276  * Target TX harsh result toeplitz_hash_result
277  */
278 
279 /*
280  * Guts of ce_send, used by both ce_send and ce_sendlist_send.
281  * The caller takes responsibility for any needed locking.
282  */
283 
284 static
285 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
286 				   u32 ctrl_addr, unsigned int write_index)
287 {
288 	if (hif_ce_war1) {
289 		void __iomem *indicator_addr;
290 
291 		indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
292 
293 		if (!war1_allow_sleep
294 		    && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
295 			hif_write32_mb(indicator_addr,
296 				      (CDC_WAR_MAGIC_STR | write_index));
297 		} else {
298 			unsigned long irq_flags;
299 
300 			local_irq_save(irq_flags);
301 			hif_write32_mb(indicator_addr, 1);
302 
303 			/*
304 			 * PCIE write waits for ACK in IPQ8K, there is no
305 			 * need to read back value.
306 			 */
307 			(void)hif_read32_mb(indicator_addr);
308 			(void)hif_read32_mb(indicator_addr); /* conservative */
309 
310 			CE_SRC_RING_WRITE_IDX_SET(scn,
311 						  ctrl_addr, write_index);
312 
313 			hif_write32_mb(indicator_addr, 0);
314 			local_irq_restore(irq_flags);
315 		}
316 	} else {
317 		CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
318 	}
319 }
320 
321 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
322 /**
323  * ce_validate_nbytes() - validate nbytes for slub builds on tx descriptors
324  * @nbytes: nbytes value being written into a send descriptor
325  * @ce_state: context of the copy engine
326 
327  * nbytes should be non-zero and less than max configured for the copy engine
328  *
329  * Return: none
330  */
331 static void ce_validate_nbytes(uint32_t nbytes, struct CE_state *ce_state)
332 {
333 	if (nbytes <= 0 || nbytes > ce_state->src_sz_max)
334 		QDF_BUG(0);
335 }
336 #else
337 static void ce_validate_nbytes(uint32_t nbytes, struct CE_state *ce_state)
338 {
339 }
340 #endif
341 
342 static int
343 ce_send_nolock_legacy(struct CE_handle *copyeng,
344 			   void *per_transfer_context,
345 			   qdf_dma_addr_t buffer,
346 			   uint32_t nbytes,
347 			   uint32_t transfer_id,
348 			   uint32_t flags,
349 			   uint32_t user_flags)
350 {
351 	int status;
352 	struct CE_state *CE_state = (struct CE_state *)copyeng;
353 	struct CE_ring_state *src_ring = CE_state->src_ring;
354 	uint32_t ctrl_addr = CE_state->ctrl_addr;
355 	unsigned int nentries_mask = src_ring->nentries_mask;
356 	unsigned int sw_index = src_ring->sw_index;
357 	unsigned int write_index = src_ring->write_index;
358 	uint64_t dma_addr = buffer;
359 	struct hif_softc *scn = CE_state->scn;
360 
361 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
362 		return QDF_STATUS_E_FAILURE;
363 	if (unlikely(CE_RING_DELTA(nentries_mask,
364 				write_index, sw_index - 1) <= 0)) {
365 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
366 		Q_TARGET_ACCESS_END(scn);
367 		return QDF_STATUS_E_FAILURE;
368 	}
369 	{
370 		enum hif_ce_event_type event_type;
371 		struct CE_src_desc *src_ring_base =
372 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
373 		struct CE_src_desc *shadow_base =
374 			(struct CE_src_desc *)src_ring->shadow_base;
375 		struct CE_src_desc *src_desc =
376 			CE_SRC_RING_TO_DESC(src_ring_base, write_index);
377 		struct CE_src_desc *shadow_src_desc =
378 			CE_SRC_RING_TO_DESC(shadow_base, write_index);
379 
380 		/* Update low 32 bits source descriptor address */
381 		shadow_src_desc->buffer_addr =
382 			(uint32_t)(dma_addr & 0xFFFFFFFF);
383 #ifdef QCA_WIFI_3_0
384 		shadow_src_desc->buffer_addr_hi =
385 			(uint32_t)((dma_addr >> 32) & 0x1F);
386 		user_flags |= shadow_src_desc->buffer_addr_hi;
387 		memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
388 			   sizeof(uint32_t));
389 #endif
390 		shadow_src_desc->target_int_disable = 0;
391 		shadow_src_desc->host_int_disable = 0;
392 
393 		shadow_src_desc->meta_data = transfer_id;
394 
395 		/*
396 		 * Set the swap bit if:
397 		 * typical sends on this CE are swapped (host is big-endian)
398 		 * and this send doesn't disable the swapping
399 		 * (data is not bytestream)
400 		 */
401 		shadow_src_desc->byte_swap =
402 			(((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
403 			 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
404 		shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
405 		shadow_src_desc->nbytes = nbytes;
406 		ce_validate_nbytes(nbytes, CE_state);
407 
408 		*src_desc = *shadow_src_desc;
409 
410 		src_ring->per_transfer_context[write_index] =
411 			per_transfer_context;
412 
413 		/* Update Source Ring Write Index */
414 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
415 
416 		/* WORKAROUND */
417 		if (shadow_src_desc->gather) {
418 			event_type = HIF_TX_GATHER_DESC_POST;
419 		} else if (qdf_unlikely(CE_state->state != CE_RUNNING)) {
420 			event_type = HIF_TX_DESC_SOFTWARE_POST;
421 			CE_state->state = CE_PENDING;
422 		} else {
423 			event_type = HIF_TX_DESC_POST;
424 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
425 						      write_index);
426 		}
427 
428 		/* src_ring->write index hasn't been updated event though
429 		 * the register has allready been written to.
430 		 */
431 		hif_record_ce_desc_event(scn, CE_state->id, event_type,
432 			(union ce_desc *) shadow_src_desc, per_transfer_context,
433 			src_ring->write_index);
434 
435 		src_ring->write_index = write_index;
436 		status = QDF_STATUS_SUCCESS;
437 	}
438 	Q_TARGET_ACCESS_END(scn);
439 	return status;
440 }
441 
442 int
443 ce_send(struct CE_handle *copyeng,
444 		void *per_transfer_context,
445 		qdf_dma_addr_t buffer,
446 		uint32_t nbytes,
447 		uint32_t transfer_id,
448 		uint32_t flags,
449 		uint32_t user_flag)
450 {
451 	struct CE_state *CE_state = (struct CE_state *)copyeng;
452 	int status;
453 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
454 
455 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
456 	status = hif_state->ce_services->ce_send_nolock(copyeng,
457 			per_transfer_context, buffer, nbytes,
458 			transfer_id, flags, user_flag);
459 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
460 
461 	return status;
462 }
463 
464 unsigned int ce_sendlist_sizeof(void)
465 {
466 	return sizeof(struct ce_sendlist);
467 }
468 
469 void ce_sendlist_init(struct ce_sendlist *sendlist)
470 {
471 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
472 
473 	sl->num_items = 0;
474 }
475 
476 int
477 ce_sendlist_buf_add(struct ce_sendlist *sendlist,
478 					qdf_dma_addr_t buffer,
479 					uint32_t nbytes,
480 					uint32_t flags,
481 					uint32_t user_flags)
482 {
483 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
484 	unsigned int num_items = sl->num_items;
485 	struct ce_sendlist_item *item;
486 
487 	if (num_items >= CE_SENDLIST_ITEMS_MAX) {
488 		QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
489 		return QDF_STATUS_E_RESOURCES;
490 	}
491 
492 	item = &sl->item[num_items];
493 	item->send_type = CE_SIMPLE_BUFFER_TYPE;
494 	item->data = buffer;
495 	item->u.nbytes = nbytes;
496 	item->flags = flags;
497 	item->user_flags = user_flags;
498 	sl->num_items = num_items + 1;
499 	return QDF_STATUS_SUCCESS;
500 }
501 
502 int
503 ce_sendlist_send(struct CE_handle *copyeng,
504 		 void *per_transfer_context,
505 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
506 {
507 	struct CE_state *CE_state = (struct CE_state *)copyeng;
508 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
509 
510 	return hif_state->ce_services->ce_sendlist_send(copyeng,
511 			per_transfer_context, sendlist, transfer_id);
512 }
513 
514 static int
515 ce_sendlist_send_legacy(struct CE_handle *copyeng,
516 		 void *per_transfer_context,
517 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
518 {
519 	int status = -ENOMEM;
520 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
521 	struct CE_state *CE_state = (struct CE_state *)copyeng;
522 	struct CE_ring_state *src_ring = CE_state->src_ring;
523 	unsigned int nentries_mask = src_ring->nentries_mask;
524 	unsigned int num_items = sl->num_items;
525 	unsigned int sw_index;
526 	unsigned int write_index;
527 	struct hif_softc *scn = CE_state->scn;
528 
529 	QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
530 
531 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
532 
533 	if (CE_state->scn->fastpath_mode_on && CE_state->htt_tx_data &&
534 	    Q_TARGET_ACCESS_BEGIN(scn) == 0) {
535 		src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR(
536 					       scn, CE_state->ctrl_addr);
537 		Q_TARGET_ACCESS_END(scn);
538 	}
539 
540 	sw_index = src_ring->sw_index;
541 	write_index = src_ring->write_index;
542 
543 	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
544 	    num_items) {
545 		struct ce_sendlist_item *item;
546 		int i;
547 
548 		/* handle all but the last item uniformly */
549 		for (i = 0; i < num_items - 1; i++) {
550 			item = &sl->item[i];
551 			/* TBDXXX: Support extensible sendlist_types? */
552 			QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
553 			status = ce_send_nolock_legacy(copyeng,
554 				CE_SENDLIST_ITEM_CTXT,
555 				(qdf_dma_addr_t) item->data,
556 				item->u.nbytes, transfer_id,
557 				item->flags | CE_SEND_FLAG_GATHER,
558 				item->user_flags);
559 			QDF_ASSERT(status == QDF_STATUS_SUCCESS);
560 		}
561 		/* provide valid context pointer for final item */
562 		item = &sl->item[i];
563 		/* TBDXXX: Support extensible sendlist_types? */
564 		QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
565 		status = ce_send_nolock_legacy(copyeng, per_transfer_context,
566 					(qdf_dma_addr_t) item->data,
567 					item->u.nbytes,
568 					transfer_id, item->flags,
569 					item->user_flags);
570 		QDF_ASSERT(status == QDF_STATUS_SUCCESS);
571 		QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
572 					QDF_NBUF_TX_PKT_CE);
573 		DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
574 			QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
575 			(uint8_t *)&(((qdf_nbuf_t)per_transfer_context)->data),
576 			sizeof(((qdf_nbuf_t)per_transfer_context)->data),
577 			QDF_TX));
578 	} else {
579 		/*
580 		 * Probably not worth the additional complexity to support
581 		 * partial sends with continuation or notification.  We expect
582 		 * to use large rings and small sendlists. If we can't handle
583 		 * the entire request at once, punt it back to the caller.
584 		 */
585 	}
586 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
587 
588 	return status;
589 }
590 
591 #ifdef WLAN_FEATURE_FASTPATH
592 #ifdef QCA_WIFI_3_0
593 static inline void
594 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
595 		      uint64_t dma_addr,
596 		      uint32_t user_flags)
597 {
598 	shadow_src_desc->buffer_addr_hi =
599 			(uint32_t)((dma_addr >> 32) & 0x1F);
600 	user_flags |= shadow_src_desc->buffer_addr_hi;
601 	memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
602 			sizeof(uint32_t));
603 }
604 #else
605 static inline void
606 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
607 		      uint64_t dma_addr,
608 		      uint32_t user_flags)
609 {
610 }
611 #endif
612 
613 #define SLOTS_PER_DATAPATH_TX 2
614 
615 /**
616  * ce_send_fast() CE layer Tx buffer posting function
617  * @copyeng: copy engine handle
618  * @msdu: msdu to be sent
619  * @transfer_id: transfer_id
620  * @download_len: packet download length
621  *
622  * Assumption : Called with an array of MSDU's
623  * Function:
624  * For each msdu in the array
625  * 1. Check no. of available entries
626  * 2. Create src ring entries (allocated in consistent memory
627  * 3. Write index to h/w
628  *
629  * Return: No. of packets that could be sent
630  */
631 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
632 		 unsigned int transfer_id, uint32_t download_len)
633 {
634 	struct CE_state *ce_state = (struct CE_state *)copyeng;
635 	struct hif_softc *scn = ce_state->scn;
636 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
637 	struct CE_ring_state *src_ring = ce_state->src_ring;
638 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
639 	unsigned int nentries_mask = src_ring->nentries_mask;
640 	unsigned int write_index;
641 	unsigned int sw_index;
642 	unsigned int frag_len;
643 	uint64_t dma_addr;
644 	uint32_t user_flags;
645 	enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE;
646 	bool ok_to_send = true;
647 
648 	qdf_spin_lock_bh(&ce_state->ce_index_lock);
649 
650 	/*
651 	 * Request runtime PM resume if it has already suspended and make
652 	 * sure there is no PCIe link access.
653 	 */
654 	if (hif_pm_runtime_get(hif_hdl) != 0)
655 		ok_to_send = false;
656 
657 	if (ok_to_send) {
658 		Q_TARGET_ACCESS_BEGIN(scn);
659 		DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
660 	}
661 
662 	write_index = src_ring->write_index;
663 	sw_index = src_ring->sw_index;
664 
665 	hif_record_ce_desc_event(scn, ce_state->id,
666 				FAST_TX_SOFTWARE_INDEX_UPDATE,
667 				NULL, NULL, sw_index);
668 
669 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
670 			 < SLOTS_PER_DATAPATH_TX)) {
671 		HIF_ERROR("Source ring full, required %d, available %d",
672 		      SLOTS_PER_DATAPATH_TX,
673 		      CE_RING_DELTA(nentries_mask, write_index, sw_index - 1));
674 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
675 		if (ok_to_send)
676 			Q_TARGET_ACCESS_END(scn);
677 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
678 		return 0;
679 	}
680 
681 	{
682 		struct CE_src_desc *src_ring_base =
683 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
684 		struct CE_src_desc *shadow_base =
685 			(struct CE_src_desc *)src_ring->shadow_base;
686 		struct CE_src_desc *src_desc =
687 			CE_SRC_RING_TO_DESC(src_ring_base, write_index);
688 		struct CE_src_desc *shadow_src_desc =
689 			CE_SRC_RING_TO_DESC(shadow_base, write_index);
690 
691 		hif_pm_runtime_get_noresume(hif_hdl);
692 
693 		/*
694 		 * First fill out the ring descriptor for the HTC HTT frame
695 		 * header. These are uncached writes. Should we use a local
696 		 * structure instead?
697 		 */
698 		/* HTT/HTC header can be passed as a argument */
699 		dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
700 		shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
701 							  0xFFFFFFFF);
702 		user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
703 		ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
704 			shadow_src_desc->meta_data = transfer_id;
705 		shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
706 		ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
707 		download_len -= shadow_src_desc->nbytes;
708 		/*
709 		 * HTC HTT header is a word stream, so byte swap if CE byte
710 		 * swap enabled
711 		 */
712 		shadow_src_desc->byte_swap = ((ce_state->attr_flags &
713 					CE_ATTR_BYTE_SWAP_DATA) != 0);
714 		/* For the first one, it still does not need to write */
715 		shadow_src_desc->gather = 1;
716 		*src_desc = *shadow_src_desc;
717 		/* By default we could initialize the transfer context to this
718 		 * value
719 		 */
720 		src_ring->per_transfer_context[write_index] =
721 			CE_SENDLIST_ITEM_CTXT;
722 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
723 
724 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
725 		shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
726 		/*
727 		 * Now fill out the ring descriptor for the actual data
728 		 * packet
729 		 */
730 		dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
731 		shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
732 							  0xFFFFFFFF);
733 		/*
734 		 * Clear packet offset for all but the first CE desc.
735 		 */
736 		user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
737 		ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
738 		shadow_src_desc->meta_data = transfer_id;
739 
740 		/* get actual packet length */
741 		frag_len = qdf_nbuf_get_frag_len(msdu, 1);
742 
743 		/* download remaining bytes of payload */
744 		shadow_src_desc->nbytes =  download_len;
745 		ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
746 		if (shadow_src_desc->nbytes > frag_len)
747 			shadow_src_desc->nbytes = frag_len;
748 
749 		/*  Data packet is a byte stream, so disable byte swap */
750 		shadow_src_desc->byte_swap = 0;
751 		/* For the last one, gather is not set */
752 		shadow_src_desc->gather    = 0;
753 		*src_desc = *shadow_src_desc;
754 		src_ring->per_transfer_context[write_index] = msdu;
755 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
756 
757 		DPTRACE(qdf_dp_trace(msdu,
758 			QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
759 			qdf_nbuf_data_addr(msdu),
760 			sizeof(qdf_nbuf_data(msdu)), QDF_TX));
761 	}
762 
763 	src_ring->write_index = write_index;
764 
765 	if (ok_to_send) {
766 		if (qdf_likely(ce_state->state == CE_RUNNING)) {
767 			type = FAST_TX_WRITE_INDEX_UPDATE;
768 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
769 				write_index);
770 			Q_TARGET_ACCESS_END(scn);
771 		} else
772 			ce_state->state = CE_PENDING;
773 		hif_pm_runtime_put(hif_hdl);
774 	}
775 
776 	hif_record_ce_desc_event(scn, ce_state->id, type,
777 				 NULL, NULL, write_index);
778 
779 	qdf_spin_unlock_bh(&ce_state->ce_index_lock);
780 
781 	/* sent 1 packet */
782 	return 1;
783 }
784 
785 /**
786  * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
787  * @scn: Handle to HIF context
788  *
789  * Return: true if fastpath is enabled else false.
790  */
791 static bool ce_is_fastpath_enabled(struct hif_softc *scn)
792 {
793 	return scn->fastpath_mode_on;
794 }
795 
796 /**
797  * ce_is_fastpath_handler_registered() - return true for datapath CEs and if
798  * fastpath is enabled.
799  * @ce_state: handle to copy engine
800  *
801  * Return: true if fastpath handler is registered for datapath CE.
802  */
803 static bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
804 {
805 	if (ce_state->fastpath_handler)
806 		return true;
807 	else
808 		return false;
809 }
810 
811 
812 #else
813 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
814 {
815 	return false;
816 }
817 
818 static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
819 {
820 	return false;
821 }
822 #endif /* WLAN_FEATURE_FASTPATH */
823 
824 #ifndef AH_NEED_TX_DATA_SWAP
825 #define AH_NEED_TX_DATA_SWAP 0
826 #endif
827 
828 /**
829  * ce_batch_send() - sends bunch of msdus at once
830  * @ce_tx_hdl : pointer to CE handle
831  * @msdu : list of msdus to be sent
832  * @transfer_id : transfer id
833  * @len : Downloaded length
834  * @sendhead : sendhead
835  *
836  * Assumption : Called with an array of MSDU's
837  * Function:
838  * For each msdu in the array
839  * 1. Send each msdu
840  * 2. Increment write index accordinlgy.
841  *
842  * Return: list of msds not sent
843  */
844 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,  qdf_nbuf_t msdu,
845 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
846 {
847 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
848 	struct hif_softc *scn = ce_state->scn;
849 	struct CE_ring_state *src_ring = ce_state->src_ring;
850 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
851 	/*  A_target_id_t targid = TARGID(scn);*/
852 
853 	uint32_t nentries_mask = src_ring->nentries_mask;
854 	uint32_t sw_index, write_index;
855 
856 	struct CE_src_desc *src_desc_base =
857 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
858 	uint32_t *src_desc;
859 
860 	struct CE_src_desc lsrc_desc = {0};
861 	int deltacount = 0;
862 	qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext;
863 
864 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
865 	sw_index = src_ring->sw_index;
866 	write_index = src_ring->write_index;
867 
868 	deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1);
869 
870 	while (msdu) {
871 		tempnext = qdf_nbuf_next(msdu);
872 
873 		if (deltacount < 2) {
874 			if (sendhead)
875 				return msdu;
876 			HIF_ERROR("%s: Out of descriptors", __func__);
877 			src_ring->write_index = write_index;
878 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
879 					write_index);
880 
881 			sw_index = src_ring->sw_index;
882 			write_index = src_ring->write_index;
883 
884 			deltacount = CE_RING_DELTA(nentries_mask, write_index,
885 					sw_index-1);
886 			if (freelist == NULL) {
887 				freelist = msdu;
888 				hfreelist = msdu;
889 			} else {
890 				qdf_nbuf_set_next(freelist, msdu);
891 				freelist = msdu;
892 			}
893 			qdf_nbuf_set_next(msdu, NULL);
894 			msdu = tempnext;
895 			continue;
896 		}
897 
898 		src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base,
899 				write_index);
900 
901 		src_desc[0]   = qdf_nbuf_get_frag_paddr(msdu, 0);
902 
903 		lsrc_desc.meta_data = transfer_id;
904 		if (len  > msdu->len)
905 			len =  msdu->len;
906 		lsrc_desc.nbytes = len;
907 		/*  Data packet is a byte stream, so disable byte swap */
908 		lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
909 		lsrc_desc.gather    = 0; /*For the last one, gather is not set*/
910 
911 		src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
912 
913 
914 		src_ring->per_transfer_context[write_index] = msdu;
915 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
916 
917 		if (sendhead)
918 			break;
919 		qdf_nbuf_set_next(msdu, NULL);
920 		msdu = tempnext;
921 
922 	}
923 
924 
925 	src_ring->write_index = write_index;
926 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
927 
928 	return hfreelist;
929 }
930 
931 /**
932  * ce_update_tx_ring() - Advance sw index.
933  * @ce_tx_hdl : pointer to CE handle
934  * @num_htt_cmpls : htt completions received.
935  *
936  * Function:
937  * Increment the value of sw index of src ring
938  * according to number of htt completions
939  * received.
940  *
941  * Return: void
942  */
943 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
944 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
945 {
946 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
947 	struct CE_ring_state *src_ring = ce_state->src_ring;
948 	uint32_t nentries_mask = src_ring->nentries_mask;
949 	/*
950 	 * Advance the s/w index:
951 	 * This effectively simulates completing the CE ring descriptors
952 	 */
953 	src_ring->sw_index =
954 		CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
955 				num_htt_cmpls);
956 }
957 #else
958 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
959 {}
960 #endif
961 
962 /**
963  * ce_send_single() - sends
964  * @ce_tx_hdl : pointer to CE handle
965  * @msdu : msdu to be sent
966  * @transfer_id : transfer id
967  * @len : Downloaded length
968  *
969  * Function:
970  * 1. Send one msdu
971  * 2. Increment write index of src ring accordinlgy.
972  *
973  * Return: int: CE sent status
974  */
975 int ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
976 		uint32_t transfer_id, u_int32_t len)
977 {
978 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
979 	struct hif_softc *scn = ce_state->scn;
980 	struct CE_ring_state *src_ring = ce_state->src_ring;
981 	uint32_t ctrl_addr = ce_state->ctrl_addr;
982 	/*A_target_id_t targid = TARGID(scn);*/
983 
984 	uint32_t nentries_mask = src_ring->nentries_mask;
985 	uint32_t sw_index, write_index;
986 
987 	struct CE_src_desc *src_desc_base =
988 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
989 	uint32_t *src_desc;
990 
991 	struct CE_src_desc lsrc_desc = {0};
992 
993 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
994 	sw_index = src_ring->sw_index;
995 	write_index = src_ring->write_index;
996 
997 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index,
998 					sw_index-1) < 1)) {
999 		/* ol_tx_stats_inc_ring_error(sc->scn->pdev_txrx_handle, 1); */
1000 		HIF_ERROR("%s: ce send fail %d %d %d", __func__, nentries_mask,
1001 			  write_index, sw_index);
1002 		return 1;
1003 	}
1004 
1005 	src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index);
1006 
1007 	src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
1008 
1009 	lsrc_desc.meta_data = transfer_id;
1010 	lsrc_desc.nbytes = len;
1011 	/*  Data packet is a byte stream, so disable byte swap */
1012 	lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
1013 	lsrc_desc.gather    = 0; /* For the last one, gather is not set */
1014 
1015 	src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
1016 
1017 
1018 	src_ring->per_transfer_context[write_index] = msdu;
1019 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
1020 
1021 	src_ring->write_index = write_index;
1022 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
1023 
1024 	return QDF_STATUS_SUCCESS;
1025 }
1026 
1027 /**
1028  * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
1029  * @coyeng: copy engine handle
1030  * @per_recv_context: virtual address of the nbuf
1031  * @buffer: physical address of the nbuf
1032  *
1033  * Return: 0 if the buffer is enqueued
1034  */
1035 int
1036 ce_recv_buf_enqueue(struct CE_handle *copyeng,
1037 		    void *per_recv_context, qdf_dma_addr_t buffer)
1038 {
1039 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1040 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
1041 
1042 	return hif_state->ce_services->ce_recv_buf_enqueue(copyeng,
1043 			per_recv_context, buffer);
1044 }
1045 
1046 /**
1047  * ce_recv_buf_enqueue_legacy() - enqueue a recv buffer into a copy engine
1048  * @coyeng: copy engine handle
1049  * @per_recv_context: virtual address of the nbuf
1050  * @buffer: physical address of the nbuf
1051  *
1052  * Return: 0 if the buffer is enqueued
1053  */
1054 static int
1055 ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng,
1056 		    void *per_recv_context, qdf_dma_addr_t buffer)
1057 {
1058 	int status;
1059 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1060 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1061 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1062 	unsigned int nentries_mask = dest_ring->nentries_mask;
1063 	unsigned int write_index;
1064 	unsigned int sw_index;
1065 	uint64_t dma_addr = buffer;
1066 	struct hif_softc *scn = CE_state->scn;
1067 
1068 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1069 	write_index = dest_ring->write_index;
1070 	sw_index = dest_ring->sw_index;
1071 
1072 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1073 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1074 		return -EIO;
1075 	}
1076 
1077 	if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
1078 	    (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) {
1079 		struct CE_dest_desc *dest_ring_base =
1080 			(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1081 		struct CE_dest_desc *dest_desc =
1082 			CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
1083 
1084 		/* Update low 32 bit destination descriptor */
1085 		dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
1086 #ifdef QCA_WIFI_3_0
1087 		dest_desc->buffer_addr_hi =
1088 			(uint32_t)((dma_addr >> 32) & 0x1F);
1089 #endif
1090 		dest_desc->nbytes = 0;
1091 
1092 		dest_ring->per_transfer_context[write_index] =
1093 			per_recv_context;
1094 
1095 		hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_POST,
1096 				(union ce_desc *) dest_desc, per_recv_context,
1097 				write_index);
1098 
1099 		/* Update Destination Ring Write Index */
1100 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
1101 		if (write_index != sw_index) {
1102 			CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
1103 			dest_ring->write_index = write_index;
1104 		}
1105 		status = QDF_STATUS_SUCCESS;
1106 	} else
1107 		status = QDF_STATUS_E_FAILURE;
1108 
1109 	Q_TARGET_ACCESS_END(scn);
1110 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1111 	return status;
1112 }
1113 
1114 void
1115 ce_send_watermarks_set(struct CE_handle *copyeng,
1116 		       unsigned int low_alert_nentries,
1117 		       unsigned int high_alert_nentries)
1118 {
1119 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1120 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1121 	struct hif_softc *scn = CE_state->scn;
1122 
1123 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
1124 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
1125 }
1126 
1127 void
1128 ce_recv_watermarks_set(struct CE_handle *copyeng,
1129 		       unsigned int low_alert_nentries,
1130 		       unsigned int high_alert_nentries)
1131 {
1132 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1133 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1134 	struct hif_softc *scn = CE_state->scn;
1135 
1136 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
1137 				low_alert_nentries);
1138 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
1139 				high_alert_nentries);
1140 }
1141 
1142 unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
1143 {
1144 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1145 	struct CE_ring_state *src_ring = CE_state->src_ring;
1146 	unsigned int nentries_mask = src_ring->nentries_mask;
1147 	unsigned int sw_index;
1148 	unsigned int write_index;
1149 
1150 	qdf_spin_lock(&CE_state->ce_index_lock);
1151 	sw_index = src_ring->sw_index;
1152 	write_index = src_ring->write_index;
1153 	qdf_spin_unlock(&CE_state->ce_index_lock);
1154 
1155 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
1156 }
1157 
1158 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
1159 {
1160 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1161 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1162 	unsigned int nentries_mask = dest_ring->nentries_mask;
1163 	unsigned int sw_index;
1164 	unsigned int write_index;
1165 
1166 	qdf_spin_lock(&CE_state->ce_index_lock);
1167 	sw_index = dest_ring->sw_index;
1168 	write_index = dest_ring->write_index;
1169 	qdf_spin_unlock(&CE_state->ce_index_lock);
1170 
1171 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
1172 }
1173 
1174 /*
1175  * Guts of ce_send_entries_done.
1176  * The caller takes responsibility for any necessary locking.
1177  */
1178 static unsigned int
1179 ce_send_entries_done_nolock_legacy(struct hif_softc *scn,
1180 			    struct CE_state *CE_state)
1181 {
1182 	struct CE_ring_state *src_ring = CE_state->src_ring;
1183 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1184 	unsigned int nentries_mask = src_ring->nentries_mask;
1185 	unsigned int sw_index;
1186 	unsigned int read_index;
1187 
1188 	sw_index = src_ring->sw_index;
1189 	read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
1190 
1191 	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
1192 }
1193 
1194 unsigned int ce_send_entries_done(struct CE_handle *copyeng)
1195 {
1196 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1197 	unsigned int nentries;
1198 	struct hif_softc *scn = CE_state->scn;
1199 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1200 
1201 	qdf_spin_lock(&CE_state->ce_index_lock);
1202 	nentries = hif_state->ce_services->ce_send_entries_done_nolock(
1203 						CE_state->scn, CE_state);
1204 	qdf_spin_unlock(&CE_state->ce_index_lock);
1205 
1206 	return nentries;
1207 }
1208 
1209 /*
1210  * Guts of ce_recv_entries_done.
1211  * The caller takes responsibility for any necessary locking.
1212  */
1213 static unsigned int
1214 ce_recv_entries_done_nolock_legacy(struct hif_softc *scn,
1215 			    struct CE_state *CE_state)
1216 {
1217 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1218 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1219 	unsigned int nentries_mask = dest_ring->nentries_mask;
1220 	unsigned int sw_index;
1221 	unsigned int read_index;
1222 
1223 	sw_index = dest_ring->sw_index;
1224 	read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
1225 
1226 	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
1227 }
1228 
1229 unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
1230 {
1231 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1232 	unsigned int nentries;
1233 	struct hif_softc *scn = CE_state->scn;
1234 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1235 
1236 	qdf_spin_lock(&CE_state->ce_index_lock);
1237 	nentries = hif_state->ce_services->ce_recv_entries_done_nolock(
1238 						CE_state->scn, CE_state);
1239 	qdf_spin_unlock(&CE_state->ce_index_lock);
1240 
1241 	return nentries;
1242 }
1243 
1244 /*
1245  * Guts of ce_completed_recv_next.
1246  * The caller takes responsibility for any necessary locking.
1247  */
1248 static int
1249 ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state,
1250 			      void **per_CE_contextp,
1251 			      void **per_transfer_contextp,
1252 			      qdf_dma_addr_t *bufferp,
1253 			      unsigned int *nbytesp,
1254 			      unsigned int *transfer_idp,
1255 			      unsigned int *flagsp)
1256 {
1257 	int status;
1258 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1259 	unsigned int nentries_mask = dest_ring->nentries_mask;
1260 	unsigned int sw_index = dest_ring->sw_index;
1261 	struct hif_softc *scn = CE_state->scn;
1262 	struct CE_dest_desc *dest_ring_base =
1263 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1264 	struct CE_dest_desc *dest_desc =
1265 		CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
1266 	int nbytes;
1267 	struct CE_dest_desc dest_desc_info;
1268 	/*
1269 	 * By copying the dest_desc_info element to local memory, we could
1270 	 * avoid extra memory read from non-cachable memory.
1271 	 */
1272 	dest_desc_info =  *dest_desc;
1273 	nbytes = dest_desc_info.nbytes;
1274 	if (nbytes == 0) {
1275 		/*
1276 		 * This closes a relatively unusual race where the Host
1277 		 * sees the updated DRRI before the update to the
1278 		 * corresponding descriptor has completed. We treat this
1279 		 * as a descriptor that is not yet done.
1280 		 */
1281 		status = QDF_STATUS_E_FAILURE;
1282 		goto done;
1283 	}
1284 
1285 	hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
1286 			(union ce_desc *) dest_desc,
1287 			dest_ring->per_transfer_context[sw_index],
1288 			sw_index);
1289 
1290 	dest_desc->nbytes = 0;
1291 
1292 	/* Return data from completed destination descriptor */
1293 	*bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
1294 	*nbytesp = nbytes;
1295 	*transfer_idp = dest_desc_info.meta_data;
1296 	*flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
1297 
1298 	if (per_CE_contextp)
1299 		*per_CE_contextp = CE_state->recv_context;
1300 
1301 	if (per_transfer_contextp) {
1302 		*per_transfer_contextp =
1303 			dest_ring->per_transfer_context[sw_index];
1304 	}
1305 	dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
1306 
1307 	/* Update sw_index */
1308 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1309 	dest_ring->sw_index = sw_index;
1310 	status = QDF_STATUS_SUCCESS;
1311 
1312 done:
1313 	return status;
1314 }
1315 
1316 int
1317 ce_completed_recv_next(struct CE_handle *copyeng,
1318 		       void **per_CE_contextp,
1319 		       void **per_transfer_contextp,
1320 		       qdf_dma_addr_t *bufferp,
1321 		       unsigned int *nbytesp,
1322 		       unsigned int *transfer_idp, unsigned int *flagsp)
1323 {
1324 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1325 	int status;
1326 	struct hif_softc *scn = CE_state->scn;
1327 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1328 	struct ce_ops *ce_services;
1329 
1330 	ce_services = hif_state->ce_services;
1331 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1332 	status =
1333 		ce_services->ce_completed_recv_next_nolock(CE_state,
1334 				per_CE_contextp, per_transfer_contextp, bufferp,
1335 					      nbytesp, transfer_idp, flagsp);
1336 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1337 
1338 	return status;
1339 }
1340 
1341 QDF_STATUS
1342 ce_revoke_recv_next(struct CE_handle *copyeng,
1343 		    void **per_CE_contextp,
1344 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
1345 {
1346 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1347 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
1348 
1349 	return hif_state->ce_services->ce_revoke_recv_next(copyeng,
1350 			per_CE_contextp, per_transfer_contextp, bufferp);
1351 }
1352 /* NB: Modeled after ce_completed_recv_next_nolock */
1353 static QDF_STATUS
1354 ce_revoke_recv_next_legacy(struct CE_handle *copyeng,
1355 		    void **per_CE_contextp,
1356 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
1357 {
1358 	struct CE_state *CE_state;
1359 	struct CE_ring_state *dest_ring;
1360 	unsigned int nentries_mask;
1361 	unsigned int sw_index;
1362 	unsigned int write_index;
1363 	QDF_STATUS status;
1364 	struct hif_softc *scn;
1365 
1366 	CE_state = (struct CE_state *)copyeng;
1367 	dest_ring = CE_state->dest_ring;
1368 	if (!dest_ring)
1369 		return QDF_STATUS_E_FAILURE;
1370 
1371 	scn = CE_state->scn;
1372 	qdf_spin_lock(&CE_state->ce_index_lock);
1373 	nentries_mask = dest_ring->nentries_mask;
1374 	sw_index = dest_ring->sw_index;
1375 	write_index = dest_ring->write_index;
1376 	if (write_index != sw_index) {
1377 		struct CE_dest_desc *dest_ring_base =
1378 			(struct CE_dest_desc *)dest_ring->
1379 			    base_addr_owner_space;
1380 		struct CE_dest_desc *dest_desc =
1381 			CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
1382 
1383 		/* Return data from completed destination descriptor */
1384 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
1385 
1386 		if (per_CE_contextp)
1387 			*per_CE_contextp = CE_state->recv_context;
1388 
1389 		if (per_transfer_contextp) {
1390 			*per_transfer_contextp =
1391 				dest_ring->per_transfer_context[sw_index];
1392 		}
1393 		dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
1394 
1395 		/* Update sw_index */
1396 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1397 		dest_ring->sw_index = sw_index;
1398 		status = QDF_STATUS_SUCCESS;
1399 	} else {
1400 		status = QDF_STATUS_E_FAILURE;
1401 	}
1402 	qdf_spin_unlock(&CE_state->ce_index_lock);
1403 
1404 	return status;
1405 }
1406 
1407 /*
1408  * Guts of ce_completed_send_next.
1409  * The caller takes responsibility for any necessary locking.
1410  */
1411 static int
1412 ce_completed_send_next_nolock_legacy(struct CE_state *CE_state,
1413 			      void **per_CE_contextp,
1414 			      void **per_transfer_contextp,
1415 			      qdf_dma_addr_t *bufferp,
1416 			      unsigned int *nbytesp,
1417 			      unsigned int *transfer_idp,
1418 			      unsigned int *sw_idx,
1419 			      unsigned int *hw_idx,
1420 			      uint32_t *toeplitz_hash_result)
1421 {
1422 	int status = QDF_STATUS_E_FAILURE;
1423 	struct CE_ring_state *src_ring = CE_state->src_ring;
1424 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1425 	unsigned int nentries_mask = src_ring->nentries_mask;
1426 	unsigned int sw_index = src_ring->sw_index;
1427 	unsigned int read_index;
1428 	struct hif_softc *scn = CE_state->scn;
1429 
1430 	if (src_ring->hw_index == sw_index) {
1431 		/*
1432 		 * The SW completion index has caught up with the cached
1433 		 * version of the HW completion index.
1434 		 * Update the cached HW completion index to see whether
1435 		 * the SW has really caught up to the HW, or if the cached
1436 		 * value of the HW index has become stale.
1437 		 */
1438 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1439 			return QDF_STATUS_E_FAILURE;
1440 		src_ring->hw_index =
1441 			CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
1442 		if (Q_TARGET_ACCESS_END(scn) < 0)
1443 			return QDF_STATUS_E_FAILURE;
1444 	}
1445 	read_index = src_ring->hw_index;
1446 
1447 	if (sw_idx)
1448 		*sw_idx = sw_index;
1449 
1450 	if (hw_idx)
1451 		*hw_idx = read_index;
1452 
1453 	if ((read_index != sw_index) && (read_index != 0xffffffff)) {
1454 		struct CE_src_desc *shadow_base =
1455 			(struct CE_src_desc *)src_ring->shadow_base;
1456 		struct CE_src_desc *shadow_src_desc =
1457 			CE_SRC_RING_TO_DESC(shadow_base, sw_index);
1458 #ifdef QCA_WIFI_3_0
1459 		struct CE_src_desc *src_ring_base =
1460 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
1461 		struct CE_src_desc *src_desc =
1462 			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1463 #endif
1464 		hif_record_ce_desc_event(scn, CE_state->id,
1465 				HIF_TX_DESC_COMPLETION,
1466 				(union ce_desc *) shadow_src_desc,
1467 				src_ring->per_transfer_context[sw_index],
1468 				sw_index);
1469 
1470 		/* Return data from completed source descriptor */
1471 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
1472 		*nbytesp = shadow_src_desc->nbytes;
1473 		*transfer_idp = shadow_src_desc->meta_data;
1474 #ifdef QCA_WIFI_3_0
1475 		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
1476 #else
1477 		*toeplitz_hash_result = 0;
1478 #endif
1479 		if (per_CE_contextp)
1480 			*per_CE_contextp = CE_state->send_context;
1481 
1482 		if (per_transfer_contextp) {
1483 			*per_transfer_contextp =
1484 				src_ring->per_transfer_context[sw_index];
1485 		}
1486 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
1487 
1488 		/* Update sw_index */
1489 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1490 		src_ring->sw_index = sw_index;
1491 		status = QDF_STATUS_SUCCESS;
1492 	}
1493 
1494 	return status;
1495 }
1496 
1497 QDF_STATUS
1498 ce_cancel_send_next(struct CE_handle *copyeng,
1499 		void **per_CE_contextp,
1500 		void **per_transfer_contextp,
1501 		qdf_dma_addr_t *bufferp,
1502 		unsigned int *nbytesp,
1503 		unsigned int *transfer_idp,
1504 		uint32_t *toeplitz_hash_result)
1505 {
1506 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1507 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
1508 
1509 	return hif_state->ce_services->ce_cancel_send_next
1510 		(copyeng, per_CE_contextp, per_transfer_contextp,
1511 		 bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
1512 }
1513 
1514 /* NB: Modeled after ce_completed_send_next */
1515 static QDF_STATUS
1516 ce_cancel_send_next_legacy(struct CE_handle *copyeng,
1517 		void **per_CE_contextp,
1518 		void **per_transfer_contextp,
1519 		qdf_dma_addr_t *bufferp,
1520 		unsigned int *nbytesp,
1521 		unsigned int *transfer_idp,
1522 		uint32_t *toeplitz_hash_result)
1523 {
1524 	struct CE_state *CE_state;
1525 	struct CE_ring_state *src_ring;
1526 	unsigned int nentries_mask;
1527 	unsigned int sw_index;
1528 	unsigned int write_index;
1529 	QDF_STATUS status;
1530 	struct hif_softc *scn;
1531 
1532 	CE_state = (struct CE_state *)copyeng;
1533 	src_ring = CE_state->src_ring;
1534 	if (!src_ring)
1535 		return QDF_STATUS_E_FAILURE;
1536 
1537 	scn = CE_state->scn;
1538 	qdf_spin_lock(&CE_state->ce_index_lock);
1539 	nentries_mask = src_ring->nentries_mask;
1540 	sw_index = src_ring->sw_index;
1541 	write_index = src_ring->write_index;
1542 
1543 	if (write_index != sw_index) {
1544 		struct CE_src_desc *src_ring_base =
1545 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
1546 		struct CE_src_desc *src_desc =
1547 			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1548 
1549 		/* Return data from completed source descriptor */
1550 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1551 		*nbytesp = src_desc->nbytes;
1552 		*transfer_idp = src_desc->meta_data;
1553 #ifdef QCA_WIFI_3_0
1554 		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
1555 #else
1556 		*toeplitz_hash_result = 0;
1557 #endif
1558 
1559 		if (per_CE_contextp)
1560 			*per_CE_contextp = CE_state->send_context;
1561 
1562 		if (per_transfer_contextp) {
1563 			*per_transfer_contextp =
1564 				src_ring->per_transfer_context[sw_index];
1565 		}
1566 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
1567 
1568 		/* Update sw_index */
1569 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1570 		src_ring->sw_index = sw_index;
1571 		status = QDF_STATUS_SUCCESS;
1572 	} else {
1573 		status = QDF_STATUS_E_FAILURE;
1574 	}
1575 	qdf_spin_unlock(&CE_state->ce_index_lock);
1576 
1577 	return status;
1578 }
1579 
1580 /* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
1581 #define CE_WM_SHFT 1
1582 
1583 int
1584 ce_completed_send_next(struct CE_handle *copyeng,
1585 		       void **per_CE_contextp,
1586 		       void **per_transfer_contextp,
1587 		       qdf_dma_addr_t *bufferp,
1588 		       unsigned int *nbytesp,
1589 		       unsigned int *transfer_idp,
1590 		       unsigned int *sw_idx,
1591 		       unsigned int *hw_idx,
1592 		       unsigned int *toeplitz_hash_result)
1593 {
1594 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1595 	struct hif_softc *scn = CE_state->scn;
1596 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1597 	struct ce_ops *ce_services;
1598 	int status;
1599 
1600 	ce_services = hif_state->ce_services;
1601 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1602 	status =
1603 		ce_services->ce_completed_send_next_nolock(CE_state,
1604 					per_CE_contextp, per_transfer_contextp,
1605 					bufferp, nbytesp, transfer_idp, sw_idx,
1606 					      hw_idx, toeplitz_hash_result);
1607 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1608 
1609 	return status;
1610 }
1611 
1612 #ifdef ATH_11AC_TXCOMPACT
1613 /* CE engine descriptor reap
1614  * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
1615  * does recieve and reaping of completed descriptor ,
1616  * This function only handles reaping of Tx complete descriptor.
1617  * The Function is called from threshold reap  poll routine
1618  * hif_send_complete_check so should not countain recieve functionality
1619  * within it .
1620  */
1621 
1622 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
1623 {
1624 	void *CE_context;
1625 	void *transfer_context;
1626 	qdf_dma_addr_t buf;
1627 	unsigned int nbytes;
1628 	unsigned int id;
1629 	unsigned int sw_idx, hw_idx;
1630 	uint32_t toeplitz_hash_result;
1631 	struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
1632 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1633 
1634 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1635 		return;
1636 
1637 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
1638 			NULL, NULL, 0);
1639 
1640 	/* Since this function is called from both user context and
1641 	 * tasklet context the spinlock has to lock the bottom halves.
1642 	 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
1643 	 * enabled in TX polling mode. If this is not the case, more
1644 	 * bottom halve spin lock changes are needed. Due to data path
1645 	 * performance concern, after internal discussion we've decided
1646 	 * to make minimum change, i.e., only address the issue occured
1647 	 * in this function. The possible negative effect of this minimum
1648 	 * change is that, in the future, if some other function will also
1649 	 * be opened to let the user context to use, those cases need to be
1650 	 * addressed by change spin_lock to spin_lock_bh also.
1651 	 */
1652 
1653 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1654 
1655 	if (CE_state->send_cb) {
1656 		{
1657 			struct ce_ops *ce_services = hif_state->ce_services;
1658 			/* Pop completed send buffers and call the
1659 			 * registered send callback for each
1660 			 */
1661 			while (ce_services->ce_completed_send_next_nolock
1662 				 (CE_state, &CE_context,
1663 				  &transfer_context, &buf,
1664 				  &nbytes, &id, &sw_idx, &hw_idx,
1665 				  &toeplitz_hash_result) ==
1666 				  QDF_STATUS_SUCCESS) {
1667 				if (ce_id != CE_HTT_H2T_MSG) {
1668 					qdf_spin_unlock_bh(
1669 						&CE_state->ce_index_lock);
1670 					CE_state->send_cb(
1671 						(struct CE_handle *)
1672 						CE_state, CE_context,
1673 						transfer_context, buf,
1674 						nbytes, id, sw_idx, hw_idx,
1675 						toeplitz_hash_result);
1676 					qdf_spin_lock_bh(
1677 						&CE_state->ce_index_lock);
1678 				} else {
1679 					struct HIF_CE_pipe_info *pipe_info =
1680 						(struct HIF_CE_pipe_info *)
1681 						CE_context;
1682 
1683 					qdf_spin_lock_bh(&pipe_info->
1684 						 completion_freeq_lock);
1685 					pipe_info->num_sends_allowed++;
1686 					qdf_spin_unlock_bh(&pipe_info->
1687 						   completion_freeq_lock);
1688 				}
1689 			}
1690 		}
1691 	}
1692 
1693 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1694 
1695 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
1696 			NULL, NULL, 0);
1697 	Q_TARGET_ACCESS_END(scn);
1698 }
1699 
1700 #endif /*ATH_11AC_TXCOMPACT */
1701 
1702 /*
1703  * Number of times to check for any pending tx/rx completion on
1704  * a copy engine, this count should be big enough. Once we hit
1705  * this threashold we'll not check for any Tx/Rx comlpetion in same
1706  * interrupt handling. Note that this threashold is only used for
1707  * Rx interrupt processing, this can be used tor Tx as well if we
1708  * suspect any infinite loop in checking for pending Tx completion.
1709  */
1710 #define CE_TXRX_COMP_CHECK_THRESHOLD 20
1711 
1712 #ifdef WLAN_FEATURE_FASTPATH
1713 /**
1714  * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler
1715  * @ce_state: handle to copy engine state
1716  * @cmpl_msdus: Rx msdus
1717  * @num_cmpls: number of Rx msdus
1718  * @ctrl_addr: CE control address
1719  *
1720  * Return: None
1721  */
1722 static void ce_fastpath_rx_handle(struct CE_state *ce_state,
1723 				  qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls,
1724 				  uint32_t ctrl_addr)
1725 {
1726 	struct hif_softc *scn = ce_state->scn;
1727 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
1728 	uint32_t nentries_mask = dest_ring->nentries_mask;
1729 	uint32_t write_index;
1730 
1731 	qdf_spin_unlock(&ce_state->ce_index_lock);
1732 	(ce_state->fastpath_handler)(ce_state->context,	cmpl_msdus, num_cmpls);
1733 	qdf_spin_lock(&ce_state->ce_index_lock);
1734 
1735 	/* Update Destination Ring Write Index */
1736 	write_index = dest_ring->write_index;
1737 	write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
1738 
1739 	hif_record_ce_desc_event(scn, ce_state->id,
1740 			FAST_RX_WRITE_INDEX_UPDATE,
1741 			NULL, NULL, write_index);
1742 
1743 	CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
1744 	dest_ring->write_index = write_index;
1745 }
1746 
1747 #define MSG_FLUSH_NUM 32
1748 /**
1749  * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs
1750  * @scn: hif_context
1751  * @ce_id: Copy engine ID
1752  * 1) Go through the CE ring, and find the completions
1753  * 2) For valid completions retrieve context (nbuf) for per_transfer_context[]
1754  * 3) Unmap buffer & accumulate in an array.
1755  * 4) Call message handler when array is full or when exiting the handler
1756  *
1757  * Return: void
1758  */
1759 
1760 static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
1761 {
1762 	struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
1763 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1764 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
1765 	struct CE_dest_desc *dest_ring_base =
1766 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1767 
1768 	uint32_t nentries_mask = dest_ring->nentries_mask;
1769 	uint32_t sw_index = dest_ring->sw_index;
1770 	uint32_t nbytes;
1771 	qdf_nbuf_t nbuf;
1772 	dma_addr_t paddr;
1773 	struct CE_dest_desc *dest_desc;
1774 	qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM];
1775 	uint32_t ctrl_addr = ce_state->ctrl_addr;
1776 	uint32_t nbuf_cmpl_idx = 0;
1777 	unsigned int more_comp_cnt = 0;
1778 
1779 more_data:
1780 	for (;;) {
1781 
1782 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base,
1783 						 sw_index);
1784 
1785 		/*
1786 		 * The following 2 reads are from non-cached memory
1787 		 */
1788 		nbytes = dest_desc->nbytes;
1789 
1790 		/* If completion is invalid, break */
1791 		if (qdf_unlikely(nbytes == 0))
1792 			break;
1793 
1794 
1795 		/*
1796 		 * Build the nbuf list from valid completions
1797 		 */
1798 		nbuf = dest_ring->per_transfer_context[sw_index];
1799 
1800 		/*
1801 		 * No lock is needed here, since this is the only thread
1802 		 * that accesses the sw_index
1803 		 */
1804 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1805 
1806 		/*
1807 		 * CAREFUL : Uncached write, but still less expensive,
1808 		 * since most modern caches use "write-combining" to
1809 		 * flush multiple cache-writes all at once.
1810 		 */
1811 		dest_desc->nbytes = 0;
1812 
1813 		/*
1814 		 * Per our understanding this is not required on our
1815 		 * since we are doing the same cache invalidation
1816 		 * operation on the same buffer twice in succession,
1817 		 * without any modifiication to this buffer by CPU in
1818 		 * between.
1819 		 * However, this code with 2 syncs in succession has
1820 		 * been undergoing some testing at a customer site,
1821 		 * and seemed to be showing no problems so far. Would
1822 		 * like to validate from the customer, that this line
1823 		 * is really not required, before we remove this line
1824 		 * completely.
1825 		 */
1826 		paddr = QDF_NBUF_CB_PADDR(nbuf);
1827 
1828 		qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev, paddr,
1829 				(skb_end_pointer(nbuf) - (nbuf)->data),
1830 				DMA_FROM_DEVICE);
1831 
1832 		qdf_nbuf_put_tail(nbuf, nbytes);
1833 
1834 		qdf_assert_always(nbuf->data != NULL);
1835 
1836 		QDF_NBUF_CB_RX_CTX_ID(nbuf) =
1837 				hif_get_rx_ctx_id(ce_state->id, hif_hdl);
1838 		cmpl_msdus[nbuf_cmpl_idx++] = nbuf;
1839 
1840 		/*
1841 		 * we are not posting the buffers back instead
1842 		 * reusing the buffers
1843 		 */
1844 		if (nbuf_cmpl_idx == MSG_FLUSH_NUM) {
1845 			hif_record_ce_desc_event(scn, ce_state->id,
1846 						 FAST_RX_SOFTWARE_INDEX_UPDATE,
1847 						 NULL, NULL, sw_index);
1848 			dest_ring->sw_index = sw_index;
1849 			ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1850 					      MSG_FLUSH_NUM, ctrl_addr);
1851 
1852 			ce_state->receive_count += MSG_FLUSH_NUM;
1853 			if (qdf_unlikely(hif_ce_service_should_yield(
1854 						scn, ce_state))) {
1855 				ce_state->force_break = 1;
1856 				qdf_atomic_set(&ce_state->rx_pending, 1);
1857 				return;
1858 			}
1859 
1860 			nbuf_cmpl_idx = 0;
1861 			more_comp_cnt = 0;
1862 		}
1863 	}
1864 
1865 	hif_record_ce_desc_event(scn, ce_state->id,
1866 				 FAST_RX_SOFTWARE_INDEX_UPDATE,
1867 				 NULL, NULL, sw_index);
1868 
1869 	dest_ring->sw_index = sw_index;
1870 
1871 	/*
1872 	 * If there are not enough completions to fill the array,
1873 	 * just call the message handler here
1874 	 */
1875 	if (nbuf_cmpl_idx) {
1876 		ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1877 				      nbuf_cmpl_idx, ctrl_addr);
1878 
1879 		ce_state->receive_count += nbuf_cmpl_idx;
1880 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
1881 			ce_state->force_break = 1;
1882 			qdf_atomic_set(&ce_state->rx_pending, 1);
1883 			return;
1884 		}
1885 
1886 		/* check for more packets after upper layer processing */
1887 		nbuf_cmpl_idx = 0;
1888 		more_comp_cnt = 0;
1889 		goto more_data;
1890 	}
1891 	qdf_atomic_set(&ce_state->rx_pending, 0);
1892 	CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1893 				   HOST_IS_COPY_COMPLETE_MASK);
1894 
1895 	if (ce_recv_entries_done_nolock_legacy(scn, ce_state)) {
1896 		if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1897 			goto more_data;
1898 		} else {
1899 			HIF_ERROR("%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1900 				  __func__, nentries_mask,
1901 				  ce_state->dest_ring->sw_index,
1902 				  CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr));
1903 		}
1904 	}
1905 #ifdef NAPI_YIELD_BUDGET_BASED
1906 	/* Caution : Before you modify this code, please refer hif_napi_poll function
1907 	to understand how napi_complete gets called and make the necessary changes
1908 	Force break has to be done till WIN disables the interrupt at source */
1909 	ce_state->force_break = 1;
1910 #endif
1911 }
1912 
1913 #else
1914 static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
1915 {
1916 }
1917 #endif /* WLAN_FEATURE_FASTPATH */
1918 
1919 /* Maximum amount of time in nano seconds before which the CE per engine service
1920  * should yield. ~1 jiffie.
1921  */
1922 #define CE_PER_ENGINE_SERVICE_MAX_YIELD_TIME_NS (10 * 1000 * 1000)
1923 
1924 /*
1925  * Guts of interrupt handler for per-engine interrupts on a particular CE.
1926  *
1927  * Invokes registered callbacks for recv_complete,
1928  * send_complete, and watermarks.
1929  *
1930  * Returns: number of messages processed
1931  */
1932 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
1933 {
1934 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1935 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1936 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1937 	void *CE_context;
1938 	void *transfer_context;
1939 	qdf_dma_addr_t buf;
1940 	unsigned int nbytes;
1941 	unsigned int id;
1942 	unsigned int flags;
1943 	unsigned int more_comp_cnt = 0;
1944 	unsigned int more_snd_comp_cnt = 0;
1945 	unsigned int sw_idx, hw_idx;
1946 	uint32_t toeplitz_hash_result;
1947 	uint32_t mode = hif_get_conparam(scn);
1948 
1949 	if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data))
1950 		return CE_state->receive_count;
1951 
1952 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1953 		HIF_ERROR("[premature rc=0]");
1954 		return 0; /* no work done */
1955 	}
1956 
1957 	/* Clear force_break flag and re-initialize receive_count to 0 */
1958 	CE_state->receive_count = 0;
1959 	CE_state->force_break = 0;
1960 	CE_state->ce_service_yield_time =
1961 		sched_clock() +
1962 		(unsigned long long)CE_PER_ENGINE_SERVICE_MAX_YIELD_TIME_NS;
1963 
1964 
1965 	qdf_spin_lock(&CE_state->ce_index_lock);
1966 	/*
1967 	 * With below check we make sure CE we are handling is datapath CE and
1968 	 * fastpath is enabled.
1969 	 */
1970 	if (ce_is_fastpath_handler_registered(CE_state)) {
1971 		/* For datapath only Rx CEs */
1972 		ce_per_engine_service_fast(scn, CE_id);
1973 		goto unlock_end;
1974 	}
1975 
1976 more_completions:
1977 	if (CE_state->recv_cb) {
1978 
1979 		/* Pop completed recv buffers and call
1980 		 * the registered recv callback for each
1981 		 */
1982 		while (hif_state->ce_services->ce_completed_recv_next_nolock
1983 				(CE_state, &CE_context, &transfer_context,
1984 				&buf, &nbytes, &id, &flags) ==
1985 				QDF_STATUS_SUCCESS) {
1986 			qdf_spin_unlock(&CE_state->ce_index_lock);
1987 			CE_state->recv_cb((struct CE_handle *)CE_state,
1988 					  CE_context, transfer_context, buf,
1989 					  nbytes, id, flags);
1990 
1991 			/*
1992 			 * EV #112693 -
1993 			 * [Peregrine][ES1][WB342][Win8x86][Performance]
1994 			 * BSoD_0x133 occurred in VHT80 UDP_DL
1995 			 * Break out DPC by force if number of loops in
1996 			 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
1997 			 * to avoid spending too long time in
1998 			 * DPC for each interrupt handling. Schedule another
1999 			 * DPC to avoid data loss if we had taken
2000 			 * force-break action before apply to Windows OS
2001 			 * only currently, Linux/MAC os can expand to their
2002 			 * platform if necessary
2003 			 */
2004 
2005 			/* Break the receive processes by
2006 			 * force if force_break set up
2007 			 */
2008 			if (qdf_unlikely(CE_state->force_break)) {
2009 				qdf_atomic_set(&CE_state->rx_pending, 1);
2010 				goto target_access_end;
2011 			}
2012 			qdf_spin_lock(&CE_state->ce_index_lock);
2013 		}
2014 	}
2015 
2016 	/*
2017 	 * Attention: We may experience potential infinite loop for below
2018 	 * While Loop during Sending Stress test.
2019 	 * Resolve the same way as Receive Case (Refer to EV #112693)
2020 	 */
2021 
2022 	if (CE_state->send_cb) {
2023 		/* Pop completed send buffers and call
2024 		 * the registered send callback for each
2025 		 */
2026 
2027 #ifdef ATH_11AC_TXCOMPACT
2028 		while (hif_state->ce_services->ce_completed_send_next_nolock
2029 			 (CE_state, &CE_context,
2030 			 &transfer_context, &buf, &nbytes,
2031 			 &id, &sw_idx, &hw_idx,
2032 			 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2033 
2034 			if (CE_id != CE_HTT_H2T_MSG ||
2035 			    QDF_IS_EPPING_ENABLED(mode)) {
2036 				qdf_spin_unlock(&CE_state->ce_index_lock);
2037 				CE_state->send_cb((struct CE_handle *)CE_state,
2038 						  CE_context, transfer_context,
2039 						  buf, nbytes, id, sw_idx,
2040 						  hw_idx, toeplitz_hash_result);
2041 				qdf_spin_lock(&CE_state->ce_index_lock);
2042 			} else {
2043 				struct HIF_CE_pipe_info *pipe_info =
2044 					(struct HIF_CE_pipe_info *)CE_context;
2045 
2046 				qdf_spin_lock(&pipe_info->
2047 					      completion_freeq_lock);
2048 				pipe_info->num_sends_allowed++;
2049 				qdf_spin_unlock(&pipe_info->
2050 						completion_freeq_lock);
2051 			}
2052 		}
2053 #else                           /*ATH_11AC_TXCOMPACT */
2054 		while (hif_state->ce_services->ce_completed_send_next_nolock
2055 			 (CE_state, &CE_context,
2056 			  &transfer_context, &buf, &nbytes,
2057 			  &id, &sw_idx, &hw_idx,
2058 			  &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2059 			qdf_spin_unlock(&CE_state->ce_index_lock);
2060 			CE_state->send_cb((struct CE_handle *)CE_state,
2061 				  CE_context, transfer_context, buf,
2062 				  nbytes, id, sw_idx, hw_idx,
2063 				  toeplitz_hash_result);
2064 			qdf_spin_lock(&CE_state->ce_index_lock);
2065 		}
2066 #endif /*ATH_11AC_TXCOMPACT */
2067 	}
2068 
2069 more_watermarks:
2070 	if (CE_state->misc_cbs) {
2071 		if (CE_state->watermark_cb &&
2072 				hif_state->ce_services->watermark_int(CE_state,
2073 					&flags)) {
2074 			qdf_spin_unlock(&CE_state->ce_index_lock);
2075 			/* Convert HW IS bits to software flags */
2076 			CE_state->watermark_cb((struct CE_handle *)CE_state,
2077 					CE_state->wm_context, flags);
2078 			qdf_spin_lock(&CE_state->ce_index_lock);
2079 		}
2080 	}
2081 
2082 	/*
2083 	 * Clear the misc interrupts (watermark) that were handled above,
2084 	 * and that will be checked again below.
2085 	 * Clear and check for copy-complete interrupts again, just in case
2086 	 * more copy completions happened while the misc interrupts were being
2087 	 * handled.
2088 	 */
2089 	if (!ce_srng_based(scn))
2090 		CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
2091 				   CE_WATERMARK_MASK |
2092 				   HOST_IS_COPY_COMPLETE_MASK);
2093 
2094 	/*
2095 	 * Now that per-engine interrupts are cleared, verify that
2096 	 * no recv interrupts arrive while processing send interrupts,
2097 	 * and no recv or send interrupts happened while processing
2098 	 * misc interrupts.Go back and check again.Keep checking until
2099 	 * we find no more events to process.
2100 	 */
2101 	if (CE_state->recv_cb &&
2102 		hif_state->ce_services->ce_recv_entries_done_nolock(scn,
2103 				CE_state)) {
2104 		if (QDF_IS_EPPING_ENABLED(mode) ||
2105 		    more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
2106 			goto more_completions;
2107 		} else {
2108 			HIF_ERROR(
2109 				"%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
2110 				__func__, CE_state->dest_ring->nentries_mask,
2111 				CE_state->dest_ring->sw_index,
2112 				CE_DEST_RING_READ_IDX_GET(scn,
2113 							  CE_state->ctrl_addr));
2114 		}
2115 	}
2116 
2117 	if (CE_state->send_cb &&
2118 		hif_state->ce_services->ce_send_entries_done_nolock(scn,
2119 				CE_state)) {
2120 		if (QDF_IS_EPPING_ENABLED(mode) ||
2121 		    more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
2122 			goto more_completions;
2123 		} else {
2124 			HIF_ERROR(
2125 				"%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
2126 				__func__, CE_state->src_ring->nentries_mask,
2127 				CE_state->src_ring->sw_index,
2128 				CE_SRC_RING_READ_IDX_GET(scn,
2129 							 CE_state->ctrl_addr));
2130 		}
2131 	}
2132 
2133 	if (CE_state->misc_cbs && CE_state->watermark_cb) {
2134 		if (hif_state->ce_services->watermark_int(CE_state, &flags))
2135 			goto more_watermarks;
2136 	}
2137 
2138 	qdf_atomic_set(&CE_state->rx_pending, 0);
2139 
2140 unlock_end:
2141 	qdf_spin_unlock(&CE_state->ce_index_lock);
2142 target_access_end:
2143 	if (Q_TARGET_ACCESS_END(scn) < 0)
2144 		HIF_ERROR("<--[premature rc=%d]", CE_state->receive_count);
2145 	return CE_state->receive_count;
2146 }
2147 
2148 /*
2149  * Handler for per-engine interrupts on ALL active CEs.
2150  * This is used in cases where the system is sharing a
2151  * single interrput for all CEs
2152  */
2153 
2154 void ce_per_engine_service_any(int irq, struct hif_softc *scn)
2155 {
2156 	int CE_id;
2157 	uint32_t intr_summary;
2158 
2159 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2160 		return;
2161 
2162 	if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
2163 		for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2164 			struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2165 
2166 			if (qdf_atomic_read(&CE_state->rx_pending)) {
2167 				qdf_atomic_set(&CE_state->rx_pending, 0);
2168 				ce_per_engine_service(scn, CE_id);
2169 			}
2170 		}
2171 
2172 		Q_TARGET_ACCESS_END(scn);
2173 		return;
2174 	}
2175 
2176 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
2177 
2178 	for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
2179 		if (intr_summary & (1 << CE_id))
2180 			intr_summary &= ~(1 << CE_id);
2181 		else
2182 			continue;       /* no intr pending on this CE */
2183 
2184 		ce_per_engine_service(scn, CE_id);
2185 	}
2186 
2187 	Q_TARGET_ACCESS_END(scn);
2188 }
2189 
2190 /*
2191  * Adjust interrupts for the copy complete handler.
2192  * If it's needed for either send or recv, then unmask
2193  * this interrupt; otherwise, mask it.
2194  *
2195  * Called with target_lock held.
2196  */
2197 static void
2198 ce_per_engine_handler_adjust_legacy(struct CE_state *CE_state,
2199 			     int disable_copy_compl_intr)
2200 {
2201 	uint32_t ctrl_addr = CE_state->ctrl_addr;
2202 	struct hif_softc *scn = CE_state->scn;
2203 
2204 	CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
2205 
2206 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2207 		return;
2208 
2209 	if ((!disable_copy_compl_intr) &&
2210 	    (CE_state->send_cb || CE_state->recv_cb))
2211 		CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
2212 	else
2213 		CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
2214 
2215 	if (CE_state->watermark_cb)
2216 		CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
2217 	 else
2218 		CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
2219 	Q_TARGET_ACCESS_END(scn);
2220 }
2221 
2222 /*Iterate the CE_state list and disable the compl interrupt
2223  * if it has been registered already.
2224  */
2225 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
2226 {
2227 	int CE_id;
2228 
2229 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2230 		return;
2231 
2232 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2233 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2234 		uint32_t ctrl_addr = CE_state->ctrl_addr;
2235 
2236 		/* if the interrupt is currently enabled, disable it */
2237 		if (!CE_state->disable_copy_compl_intr
2238 		    && (CE_state->send_cb || CE_state->recv_cb))
2239 			CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
2240 
2241 		if (CE_state->watermark_cb)
2242 			CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
2243 	}
2244 	Q_TARGET_ACCESS_END(scn);
2245 }
2246 
2247 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
2248 {
2249 	int CE_id;
2250 
2251 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2252 		return;
2253 
2254 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2255 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2256 		uint32_t ctrl_addr = CE_state->ctrl_addr;
2257 
2258 		/*
2259 		 * If the CE is supposed to have copy complete interrupts
2260 		 * enabled (i.e. there a callback registered, and the
2261 		 * "disable" flag is not set), then re-enable the interrupt.
2262 		 */
2263 		if (!CE_state->disable_copy_compl_intr
2264 		    && (CE_state->send_cb || CE_state->recv_cb))
2265 			CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
2266 
2267 		if (CE_state->watermark_cb)
2268 			CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
2269 	}
2270 	Q_TARGET_ACCESS_END(scn);
2271 }
2272 
2273 /**
2274  * ce_send_cb_register(): register completion handler
2275  * @copyeng: CE_state representing the ce we are adding the behavior to
2276  * @fn_ptr: callback that the ce should use when processing tx completions
2277  * @disable_interrupts: if the interupts should be enabled or not.
2278  *
2279  * Caller should guarantee that no transactions are in progress before
2280  * switching the callback function.
2281  *
2282  * Registers the send context before the fn pointer so that if the cb is valid
2283  * the context should be valid.
2284  *
2285  * Beware that currently this function will enable completion interrupts.
2286  */
2287 void
2288 ce_send_cb_register(struct CE_handle *copyeng,
2289 		    ce_send_cb fn_ptr,
2290 		    void *ce_send_context, int disable_interrupts)
2291 {
2292 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2293 	struct hif_softc *scn;
2294 	struct HIF_CE_state *hif_state;
2295 
2296 	if (CE_state == NULL) {
2297 		HIF_ERROR("%s: Error CE state = NULL", __func__);
2298 		return;
2299 	}
2300 	scn = CE_state->scn;
2301 	hif_state = HIF_GET_CE_STATE(scn);
2302 	if (hif_state == NULL) {
2303 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
2304 		return;
2305 	}
2306 	CE_state->send_context = ce_send_context;
2307 	CE_state->send_cb = fn_ptr;
2308 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
2309 							disable_interrupts);
2310 }
2311 
2312 /**
2313  * ce_recv_cb_register(): register completion handler
2314  * @copyeng: CE_state representing the ce we are adding the behavior to
2315  * @fn_ptr: callback that the ce should use when processing rx completions
2316  * @disable_interrupts: if the interupts should be enabled or not.
2317  *
2318  * Registers the send context before the fn pointer so that if the cb is valid
2319  * the context should be valid.
2320  *
2321  * Caller should guarantee that no transactions are in progress before
2322  * switching the callback function.
2323  */
2324 void
2325 ce_recv_cb_register(struct CE_handle *copyeng,
2326 		    CE_recv_cb fn_ptr,
2327 		    void *CE_recv_context, int disable_interrupts)
2328 {
2329 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2330 	struct hif_softc *scn;
2331 	struct HIF_CE_state *hif_state;
2332 
2333 	if (CE_state == NULL) {
2334 		HIF_ERROR("%s: ERROR CE state = NULL", __func__);
2335 		return;
2336 	}
2337 	scn = CE_state->scn;
2338 	hif_state = HIF_GET_CE_STATE(scn);
2339 	if (hif_state == NULL) {
2340 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
2341 		return;
2342 	}
2343 	CE_state->recv_context = CE_recv_context;
2344 	CE_state->recv_cb = fn_ptr;
2345 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
2346 							disable_interrupts);
2347 }
2348 
2349 /**
2350  * ce_watermark_cb_register(): register completion handler
2351  * @copyeng: CE_state representing the ce we are adding the behavior to
2352  * @fn_ptr: callback that the ce should use when processing watermark events
2353  *
2354  * Caller should guarantee that no watermark events are being processed before
2355  * switching the callback function.
2356  */
2357 void
2358 ce_watermark_cb_register(struct CE_handle *copyeng,
2359 			 CE_watermark_cb fn_ptr, void *CE_wm_context)
2360 {
2361 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2362 	struct hif_softc *scn = CE_state->scn;
2363 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2364 
2365 	CE_state->watermark_cb = fn_ptr;
2366 	CE_state->wm_context = CE_wm_context;
2367 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
2368 							0);
2369 	if (fn_ptr)
2370 		CE_state->misc_cbs = 1;
2371 }
2372 
2373 bool ce_get_rx_pending(struct hif_softc *scn)
2374 {
2375 	int CE_id;
2376 
2377 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2378 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2379 
2380 		if (qdf_atomic_read(&CE_state->rx_pending))
2381 			return true;
2382 	}
2383 
2384 	return false;
2385 }
2386 
2387 /**
2388  * ce_check_rx_pending() - ce_check_rx_pending
2389  * @CE_state: context of the copy engine to check
2390  *
2391  * Return: true if there per_engine_service
2392  *	didn't process all the rx descriptors.
2393  */
2394 bool ce_check_rx_pending(struct CE_state *CE_state)
2395 {
2396 	if (qdf_atomic_read(&CE_state->rx_pending))
2397 		return true;
2398 	else
2399 		return false;
2400 }
2401 
2402 #ifdef IPA_OFFLOAD
2403 /**
2404  * ce_ipa_get_resource() - get uc resource on copyengine
2405  * @ce: copyengine context
2406  * @ce_sr_base_paddr: copyengine source ring base physical address
2407  * @ce_sr_ring_size: copyengine source ring size
2408  * @ce_reg_paddr: copyengine register physical address
2409  *
2410  * Copy engine should release resource to micro controller
2411  * Micro controller needs
2412  *  - Copy engine source descriptor base address
2413  *  - Copy engine source descriptor size
2414  *  - PCI BAR address to access copy engine regiser
2415  *
2416  * Return: None
2417  */
2418 void ce_ipa_get_resource(struct CE_handle *ce,
2419 			 qdf_dma_addr_t *ce_sr_base_paddr,
2420 			 uint32_t *ce_sr_ring_size,
2421 			 qdf_dma_addr_t *ce_reg_paddr)
2422 {
2423 	struct CE_state *CE_state = (struct CE_state *)ce;
2424 	uint32_t ring_loop;
2425 	struct CE_src_desc *ce_desc;
2426 	qdf_dma_addr_t phy_mem_base;
2427 	struct hif_softc *scn = CE_state->scn;
2428 
2429 	if (CE_UNUSED == CE_state->state) {
2430 		*ce_sr_base_paddr = 0;
2431 		*ce_sr_ring_size = 0;
2432 		return;
2433 	}
2434 
2435 	/* Update default value for descriptor */
2436 	for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
2437 	     ring_loop++) {
2438 		ce_desc = (struct CE_src_desc *)
2439 			  ((char *)CE_state->src_ring->base_addr_owner_space +
2440 			   ring_loop * (sizeof(struct CE_src_desc)));
2441 		CE_IPA_RING_INIT(ce_desc);
2442 	}
2443 
2444 	/* Get BAR address */
2445 	hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
2446 
2447 	*ce_sr_base_paddr = CE_state->src_ring->base_addr_CE_space;
2448 	*ce_sr_ring_size = (uint32_t) (CE_state->src_ring->nentries *
2449 		sizeof(struct CE_src_desc));
2450 	*ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
2451 			SR_WR_INDEX_ADDRESS;
2452 }
2453 #endif /* IPA_OFFLOAD */
2454 
2455 static bool ce_check_int_watermark(struct CE_state *CE_state,
2456 				   unsigned int *flags)
2457 {
2458 	uint32_t ce_int_status;
2459 	uint32_t ctrl_addr = CE_state->ctrl_addr;
2460 	struct hif_softc *scn = CE_state->scn;
2461 
2462 	ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
2463 	if (ce_int_status & CE_WATERMARK_MASK) {
2464 		/* Convert HW IS bits to software flags */
2465 		*flags =
2466 			(ce_int_status & CE_WATERMARK_MASK) >>
2467 			CE_WM_SHFT;
2468 		return true;
2469 	}
2470 
2471 	return false;
2472 }
2473 
2474 static void ce_legacy_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
2475 			struct CE_ring_state *src_ring,
2476 			struct CE_attr *attr)
2477 {
2478 	uint32_t ctrl_addr;
2479 	uint64_t dma_addr;
2480 
2481 	QDF_ASSERT(ce_id < scn->ce_count);
2482 	ctrl_addr = CE_BASE_ADDRESS(ce_id);
2483 
2484 	src_ring->hw_index =
2485 		CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2486 	src_ring->sw_index = src_ring->hw_index;
2487 	src_ring->write_index =
2488 		CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2489 	dma_addr = src_ring->base_addr_CE_space;
2490 	CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
2491 			(uint32_t)(dma_addr & 0xFFFFFFFF));
2492 
2493 	/* if SR_BA_ADDRESS_HIGH register exists */
2494 	if (is_register_supported(SR_BA_ADDRESS_HIGH)) {
2495 		uint32_t tmp;
2496 
2497 		tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
2498 				scn, ctrl_addr);
2499 		tmp &= ~0x1F;
2500 		dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
2501 		CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
2502 				ctrl_addr, (uint32_t)dma_addr);
2503 	}
2504 	CE_SRC_RING_SZ_SET(scn, ctrl_addr, src_ring->nentries);
2505 	CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
2506 #ifdef BIG_ENDIAN_HOST
2507 	/* Enable source ring byte swap for big endian host */
2508 	CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
2509 #endif
2510 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
2511 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, src_ring->nentries);
2512 
2513 }
2514 
2515 static void ce_legacy_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
2516 				struct CE_ring_state *dest_ring,
2517 				struct CE_attr *attr)
2518 {
2519 	uint32_t ctrl_addr;
2520 	uint64_t dma_addr;
2521 
2522 	QDF_ASSERT(ce_id < scn->ce_count);
2523 	ctrl_addr = CE_BASE_ADDRESS(ce_id);
2524 	dest_ring->sw_index =
2525 		CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2526 	dest_ring->write_index =
2527 		CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2528 	dma_addr = dest_ring->base_addr_CE_space;
2529 	CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
2530 			(uint32_t)(dma_addr & 0xFFFFFFFF));
2531 
2532 	/* if DR_BA_ADDRESS_HIGH exists */
2533 	if (is_register_supported(DR_BA_ADDRESS_HIGH)) {
2534 		uint32_t tmp;
2535 
2536 		tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
2537 				ctrl_addr);
2538 		tmp &= ~0x1F;
2539 		dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
2540 		CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
2541 				ctrl_addr, (uint32_t)dma_addr);
2542 	}
2543 
2544 	CE_DEST_RING_SZ_SET(scn, ctrl_addr, dest_ring->nentries);
2545 #ifdef BIG_ENDIAN_HOST
2546 	/* Enable Dest ring byte swap for big endian host */
2547 	CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
2548 #endif
2549 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
2550 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, dest_ring->nentries);
2551 }
2552 
2553 static uint32_t ce_get_desc_size_legacy(uint8_t ring_type)
2554 {
2555 	switch (ring_type) {
2556 	case CE_RING_SRC:
2557 		return sizeof(struct CE_src_desc);
2558 	case CE_RING_DEST:
2559 		return sizeof(struct CE_dest_desc);
2560 	case CE_RING_STATUS:
2561 		qdf_assert(0);
2562 		return 0;
2563 	default:
2564 		return 0;
2565 	}
2566 
2567 	return 0;
2568 }
2569 
2570 static void ce_ring_setup_legacy(struct hif_softc *scn, uint8_t ring_type,
2571 		uint32_t ce_id, struct CE_ring_state *ring,
2572 		struct CE_attr *attr)
2573 {
2574 	switch (ring_type) {
2575 	case CE_RING_SRC:
2576 		ce_legacy_src_ring_setup(scn, ce_id, ring, attr);
2577 		break;
2578 	case CE_RING_DEST:
2579 		ce_legacy_dest_ring_setup(scn, ce_id, ring, attr);
2580 		break;
2581 	case CE_RING_STATUS:
2582 	default:
2583 		qdf_assert(0);
2584 		break;
2585 	}
2586 }
2587 
2588 static void ce_prepare_shadow_register_v2_cfg_legacy(struct hif_softc *scn,
2589 			    struct pld_shadow_reg_v2_cfg **shadow_config,
2590 			    int *num_shadow_registers_configured)
2591 {
2592 	*num_shadow_registers_configured = 0;
2593 	*shadow_config = NULL;
2594 }
2595 
2596 struct ce_ops ce_service_legacy = {
2597 	.ce_get_desc_size = ce_get_desc_size_legacy,
2598 	.ce_ring_setup = ce_ring_setup_legacy,
2599 	.ce_sendlist_send = ce_sendlist_send_legacy,
2600 	.ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_legacy,
2601 	.ce_revoke_recv_next = ce_revoke_recv_next_legacy,
2602 	.ce_cancel_send_next = ce_cancel_send_next_legacy,
2603 	.ce_recv_buf_enqueue = ce_recv_buf_enqueue_legacy,
2604 	.ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_legacy,
2605 	.ce_send_nolock = ce_send_nolock_legacy,
2606 	.watermark_int = ce_check_int_watermark,
2607 	.ce_completed_send_next_nolock = ce_completed_send_next_nolock_legacy,
2608 	.ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_legacy,
2609 	.ce_send_entries_done_nolock = ce_send_entries_done_nolock_legacy,
2610 	.ce_prepare_shadow_register_v2_cfg =
2611 		ce_prepare_shadow_register_v2_cfg_legacy,
2612 };
2613 
2614 
2615 struct ce_ops *ce_services_legacy()
2616 {
2617 	return &ce_service_legacy;
2618 }
2619