xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service.c (revision 302a1d9701784af5f4797b1a9fe07ae820b51907)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hif.h"
20 #include "hif_io32.h"
21 #include "ce_api.h"
22 #include "ce_main.h"
23 #include "ce_internal.h"
24 #include "ce_reg.h"
25 #include "qdf_lock.h"
26 #include "regtable.h"
27 #include "hif_main.h"
28 #include "hif_debug.h"
29 #include "hif_napi.h"
30 #include "qdf_module.h"
31 
32 #ifdef IPA_OFFLOAD
33 #ifdef QCA_WIFI_3_0
34 #define CE_IPA_RING_INIT(ce_desc)                       \
35 	do {                                            \
36 		ce_desc->gather = 0;                    \
37 		ce_desc->enable_11h = 0;                \
38 		ce_desc->meta_data_low = 0;             \
39 		ce_desc->packet_result_offset = 64;     \
40 		ce_desc->toeplitz_hash_enable = 0;      \
41 		ce_desc->addr_y_search_disable = 0;     \
42 		ce_desc->addr_x_search_disable = 0;     \
43 		ce_desc->misc_int_disable = 0;          \
44 		ce_desc->target_int_disable = 0;        \
45 		ce_desc->host_int_disable = 0;          \
46 		ce_desc->dest_byte_swap = 0;            \
47 		ce_desc->byte_swap = 0;                 \
48 		ce_desc->type = 2;                      \
49 		ce_desc->tx_classify = 1;               \
50 		ce_desc->buffer_addr_hi = 0;            \
51 		ce_desc->meta_data = 0;                 \
52 		ce_desc->nbytes = 128;                  \
53 	} while (0)
54 #else
55 #define CE_IPA_RING_INIT(ce_desc)                       \
56 	do {                                            \
57 		ce_desc->byte_swap = 0;                 \
58 		ce_desc->nbytes = 60;                   \
59 		ce_desc->gather = 0;                    \
60 	} while (0)
61 #endif /* QCA_WIFI_3_0 */
62 #endif /* IPA_OFFLOAD */
63 
64 #ifndef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
65 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr)				\
66 	do {                                            		\
67 		x = CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, addr); 	\
68 	} while (0);
69 #else
70 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr)
71 #endif
72 
73 static int war1_allow_sleep;
74 /* io32 write workaround */
75 static int hif_ce_war1;
76 
77 /**
78  * hif_ce_war_disable() - disable ce war gobally
79  */
80 void hif_ce_war_disable(void)
81 {
82 	hif_ce_war1 = 0;
83 }
84 
85 /**
86  * hif_ce_war_enable() - enable ce war gobally
87  */
88 void hif_ce_war_enable(void)
89 {
90 	hif_ce_war1 = 1;
91 }
92 
93 /*
94  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
95  * for defined here
96  */
97 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
98 
99 #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1)
100 #define CE_DEBUG_DATA_PER_ROW 16
101 
102 qdf_mutex_t ce_dbg_datamem_lock[CE_COUNT_MAX];
103 static const char *ce_event_type_to_str(enum hif_ce_event_type type);
104 
105 /**
106  * get_next_record_index() - get the next record index
107  * @table_index: atomic index variable to increment
108  * @array_size: array size of the circular buffer
109  *
110  * Increment the atomic index and reserve the value.
111  * Takes care of buffer wrap.
112  * Guaranteed to be thread safe as long as fewer than array_size contexts
113  * try to access the array.  If there are more than array_size contexts
114  * trying to access the array, full locking of the recording process would
115  * be needed to have sane logging.
116  */
117 static int get_next_record_index(qdf_atomic_t *table_index, int array_size)
118 {
119 	int record_index = qdf_atomic_inc_return(table_index);
120 
121 	if (record_index == array_size)
122 		qdf_atomic_sub(array_size, table_index);
123 
124 	while (record_index >= array_size)
125 		record_index -= array_size;
126 	return record_index;
127 }
128 
129 #if HIF_CE_DEBUG_DATA_BUF
130 /**
131  * hif_ce_desc_data_record() - Record data pointed by the CE descriptor
132  * @event: structure detailing a ce event
133  * @len: length of the data
134  * Return:
135  */
136 static void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
137 {
138 	uint8_t *data = NULL;
139 
140 	if (!event->data)
141 		return;
142 
143 	if (event->memory && len > 0)
144 		data = qdf_nbuf_data((qdf_nbuf_t)event->memory);
145 
146 	event->actual_data_len = 0;
147 	qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE);
148 
149 	if (data && len > 0) {
150 		qdf_mem_copy(event->data, data,
151 				((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ?
152 				 len : CE_DEBUG_MAX_DATA_BUF_SIZE));
153 		event->actual_data_len = len;
154 	}
155 }
156 #endif
157 
158 /**
159  * hif_record_ce_desc_event() - record ce descriptor events
160  * @scn: hif_softc
161  * @ce_id: which ce is the event occurring on
162  * @type: what happened
163  * @descriptor: pointer to the descriptor posted/completed
164  * @memory: virtual address of buffer related to the descriptor
165  * @index: index that the descriptor was/will be at.
166  */
167 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
168 				enum hif_ce_event_type type,
169 				union ce_desc *descriptor,
170 				void *memory, int index,
171 				int len)
172 {
173 	int record_index;
174 	struct hif_ce_desc_event *event;
175 
176 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
177 	struct hif_ce_desc_event *hist_ev = NULL;
178 
179 	if (ce_id < CE_COUNT_MAX)
180 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
181 	else
182 		return;
183 
184 	if (ce_id >= CE_COUNT_MAX)
185 		return;
186 
187 	if (!ce_hist->enable[ce_id])
188 		return;
189 
190 	if (!hist_ev)
191 		return;
192 
193 	record_index = get_next_record_index(
194 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
195 
196 	event = &hist_ev[record_index];
197 
198 	event->type = type;
199 	event->time = qdf_get_log_timestamp();
200 
201 	if (descriptor != NULL) {
202 		qdf_mem_copy(&event->descriptor, descriptor, sizeof(union ce_desc));
203 	} else {
204 		qdf_mem_zero(&event->descriptor, sizeof(union ce_desc));
205 	}
206 
207 	event->memory = memory;
208 	event->index = index;
209 
210 #if HIF_CE_DEBUG_DATA_BUF
211 	if (ce_hist->data_enable[ce_id])
212 		hif_ce_desc_data_record(event, len);
213 #endif
214 }
215 qdf_export_symbol(hif_record_ce_desc_event);
216 
217 /**
218  * ce_init_ce_desc_event_log() - initialize the ce event log
219  * @ce_id: copy engine id for which we are initializing the log
220  * @size: size of array to dedicate
221  *
222  * Currently the passed size is ignored in favor of a precompiled value.
223  */
224 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size)
225 {
226 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
227 	qdf_atomic_init(&ce_hist->history_index[ce_id]);
228 	qdf_mutex_create(&ce_dbg_datamem_lock[ce_id]);
229 }
230 
231 /**
232  * ce_deinit_ce_desc_event_log() - deinitialize the ce event log
233  * @ce_id: copy engine id for which we are deinitializing the log
234  *
235  */
236 inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
237 {
238 	qdf_mutex_destroy(&ce_dbg_datamem_lock[ce_id]);
239 }
240 
241 #else /* Note: For MCL, (HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
242 void hif_record_ce_desc_event(struct hif_softc *scn,
243 		int ce_id, enum hif_ce_event_type type,
244 		union ce_desc *descriptor, void *memory,
245 		int index, int len)
246 {
247 }
248 qdf_export_symbol(hif_record_ce_desc_event);
249 
250 inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id,
251 					int size)
252 {
253 }
254 
255 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
256 {
257 }
258 #endif /* Note: for MCL, HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
259 
260 #ifdef NAPI_YIELD_BUDGET_BASED
261 bool hif_ce_service_should_yield(struct hif_softc *scn,
262 				 struct CE_state *ce_state)
263 {
264 	bool yield =  hif_max_num_receives_reached(scn, ce_state->receive_count);
265 	return yield;
266 }
267 #else
268 /**
269  * hif_ce_service_should_yield() - return true if the service is hogging the cpu
270  * @scn: hif context
271  * @ce_state: context of the copy engine being serviced
272  *
273  * Return: true if the service should yield
274  */
275 bool hif_ce_service_should_yield(struct hif_softc *scn,
276 				 struct CE_state *ce_state)
277 {
278 	bool yield, time_limit_reached, rxpkt_thresh_reached = 0;
279 
280 	time_limit_reached =
281 		sched_clock() > ce_state->ce_service_yield_time ? 1 : 0;
282 
283 	if (!time_limit_reached)
284 		rxpkt_thresh_reached = hif_max_num_receives_reached
285 					(scn, ce_state->receive_count);
286 
287 	yield =  time_limit_reached || rxpkt_thresh_reached;
288 
289 	if (yield && ce_state->htt_rx_data)
290 		hif_napi_update_yield_stats(ce_state,
291 					    time_limit_reached,
292 					    rxpkt_thresh_reached);
293 	return yield;
294 }
295 #endif
296 /*
297  * Support for Copy Engine hardware, which is mainly used for
298  * communication between Host and Target over a PCIe interconnect.
299  */
300 
301 /*
302  * A single CopyEngine (CE) comprises two "rings":
303  *   a source ring
304  *   a destination ring
305  *
306  * Each ring consists of a number of descriptors which specify
307  * an address, length, and meta-data.
308  *
309  * Typically, one side of the PCIe interconnect (Host or Target)
310  * controls one ring and the other side controls the other ring.
311  * The source side chooses when to initiate a transfer and it
312  * chooses what to send (buffer address, length). The destination
313  * side keeps a supply of "anonymous receive buffers" available and
314  * it handles incoming data as it arrives (when the destination
315  * receives an interrupt).
316  *
317  * The sender may send a simple buffer (address/length) or it may
318  * send a small list of buffers.  When a small list is sent, hardware
319  * "gathers" these and they end up in a single destination buffer
320  * with a single interrupt.
321  *
322  * There are several "contexts" managed by this layer -- more, it
323  * may seem -- than should be needed. These are provided mainly for
324  * maximum flexibility and especially to facilitate a simpler HIF
325  * implementation. There are per-CopyEngine recv, send, and watermark
326  * contexts. These are supplied by the caller when a recv, send,
327  * or watermark handler is established and they are echoed back to
328  * the caller when the respective callbacks are invoked. There is
329  * also a per-transfer context supplied by the caller when a buffer
330  * (or sendlist) is sent and when a buffer is enqueued for recv.
331  * These per-transfer contexts are echoed back to the caller when
332  * the buffer is sent/received.
333  * Target TX harsh result toeplitz_hash_result
334  */
335 
336 /*
337  * Guts of ce_send, used by both ce_send and ce_sendlist_send.
338  * The caller takes responsibility for any needed locking.
339  */
340 
341 static
342 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
343 				   u32 ctrl_addr, unsigned int write_index)
344 {
345 	if (hif_ce_war1) {
346 		void __iomem *indicator_addr;
347 
348 		indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
349 
350 		if (!war1_allow_sleep
351 		    && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
352 			hif_write32_mb(scn, indicator_addr,
353 				       (CDC_WAR_MAGIC_STR | write_index));
354 		} else {
355 			unsigned long irq_flags;
356 
357 			local_irq_save(irq_flags);
358 			hif_write32_mb(scn, indicator_addr, 1);
359 
360 			/*
361 			 * PCIE write waits for ACK in IPQ8K, there is no
362 			 * need to read back value.
363 			 */
364 			(void)hif_read32_mb(scn, indicator_addr);
365 			/* conservative */
366 			(void)hif_read32_mb(scn, indicator_addr);
367 
368 			CE_SRC_RING_WRITE_IDX_SET(scn,
369 						  ctrl_addr, write_index);
370 
371 			hif_write32_mb(scn, indicator_addr, 0);
372 			local_irq_restore(irq_flags);
373 		}
374 	} else {
375 		CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
376 	}
377 }
378 
379 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
380 /**
381  * ce_validate_nbytes() - validate nbytes for slub builds on tx descriptors
382  * @nbytes: nbytes value being written into a send descriptor
383  * @ce_state: context of the copy engine
384 
385  * nbytes should be non-zero and less than max configured for the copy engine
386  *
387  * Return: none
388  */
389 static void ce_validate_nbytes(uint32_t nbytes, struct CE_state *ce_state)
390 {
391 	if (nbytes <= 0 || nbytes > ce_state->src_sz_max)
392 		QDF_BUG(0);
393 }
394 #else
395 static void ce_validate_nbytes(uint32_t nbytes, struct CE_state *ce_state)
396 {
397 }
398 #endif
399 
400 static int
401 ce_send_nolock_legacy(struct CE_handle *copyeng,
402 			   void *per_transfer_context,
403 			   qdf_dma_addr_t buffer,
404 			   uint32_t nbytes,
405 			   uint32_t transfer_id,
406 			   uint32_t flags,
407 			   uint32_t user_flags)
408 {
409 	int status;
410 	struct CE_state *CE_state = (struct CE_state *)copyeng;
411 	struct CE_ring_state *src_ring = CE_state->src_ring;
412 	uint32_t ctrl_addr = CE_state->ctrl_addr;
413 	unsigned int nentries_mask = src_ring->nentries_mask;
414 	unsigned int sw_index = src_ring->sw_index;
415 	unsigned int write_index = src_ring->write_index;
416 	uint64_t dma_addr = buffer;
417 	struct hif_softc *scn = CE_state->scn;
418 
419 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
420 		return QDF_STATUS_E_FAILURE;
421 	if (unlikely(CE_RING_DELTA(nentries_mask,
422 				write_index, sw_index - 1) <= 0)) {
423 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
424 		Q_TARGET_ACCESS_END(scn);
425 		return QDF_STATUS_E_FAILURE;
426 	}
427 	{
428 		enum hif_ce_event_type event_type;
429 		struct CE_src_desc *src_ring_base =
430 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
431 		struct CE_src_desc *shadow_base =
432 			(struct CE_src_desc *)src_ring->shadow_base;
433 		struct CE_src_desc *src_desc =
434 			CE_SRC_RING_TO_DESC(src_ring_base, write_index);
435 		struct CE_src_desc *shadow_src_desc =
436 			CE_SRC_RING_TO_DESC(shadow_base, write_index);
437 
438 		/* Update low 32 bits source descriptor address */
439 		shadow_src_desc->buffer_addr =
440 			(uint32_t)(dma_addr & 0xFFFFFFFF);
441 #ifdef QCA_WIFI_3_0
442 		shadow_src_desc->buffer_addr_hi =
443 			(uint32_t)((dma_addr >> 32) & 0x1F);
444 		user_flags |= shadow_src_desc->buffer_addr_hi;
445 		memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
446 			   sizeof(uint32_t));
447 #endif
448 		shadow_src_desc->target_int_disable = 0;
449 		shadow_src_desc->host_int_disable = 0;
450 
451 		shadow_src_desc->meta_data = transfer_id;
452 
453 		/*
454 		 * Set the swap bit if:
455 		 * typical sends on this CE are swapped (host is big-endian)
456 		 * and this send doesn't disable the swapping
457 		 * (data is not bytestream)
458 		 */
459 		shadow_src_desc->byte_swap =
460 			(((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
461 			 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
462 		shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
463 		shadow_src_desc->nbytes = nbytes;
464 		ce_validate_nbytes(nbytes, CE_state);
465 
466 		*src_desc = *shadow_src_desc;
467 
468 		src_ring->per_transfer_context[write_index] =
469 			per_transfer_context;
470 
471 		/* Update Source Ring Write Index */
472 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
473 
474 		/* WORKAROUND */
475 		if (shadow_src_desc->gather) {
476 			event_type = HIF_TX_GATHER_DESC_POST;
477 		} else if (qdf_unlikely(CE_state->state != CE_RUNNING)) {
478 			event_type = HIF_TX_DESC_SOFTWARE_POST;
479 			CE_state->state = CE_PENDING;
480 		} else {
481 			event_type = HIF_TX_DESC_POST;
482 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
483 						      write_index);
484 		}
485 
486 		/* src_ring->write index hasn't been updated event though
487 		 * the register has allready been written to.
488 		 */
489 		hif_record_ce_desc_event(scn, CE_state->id, event_type,
490 			(union ce_desc *) shadow_src_desc, per_transfer_context,
491 			src_ring->write_index, nbytes);
492 
493 		src_ring->write_index = write_index;
494 		status = QDF_STATUS_SUCCESS;
495 	}
496 	Q_TARGET_ACCESS_END(scn);
497 	return status;
498 }
499 
500 int
501 ce_send(struct CE_handle *copyeng,
502 		void *per_transfer_context,
503 		qdf_dma_addr_t buffer,
504 		uint32_t nbytes,
505 		uint32_t transfer_id,
506 		uint32_t flags,
507 		uint32_t user_flag)
508 {
509 	struct CE_state *CE_state = (struct CE_state *)copyeng;
510 	int status;
511 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
512 
513 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
514 	status = hif_state->ce_services->ce_send_nolock(copyeng,
515 			per_transfer_context, buffer, nbytes,
516 			transfer_id, flags, user_flag);
517 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
518 
519 	return status;
520 }
521 
522 unsigned int ce_sendlist_sizeof(void)
523 {
524 	return sizeof(struct ce_sendlist);
525 }
526 
527 void ce_sendlist_init(struct ce_sendlist *sendlist)
528 {
529 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
530 
531 	sl->num_items = 0;
532 }
533 
534 int
535 ce_sendlist_buf_add(struct ce_sendlist *sendlist,
536 					qdf_dma_addr_t buffer,
537 					uint32_t nbytes,
538 					uint32_t flags,
539 					uint32_t user_flags)
540 {
541 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
542 	unsigned int num_items = sl->num_items;
543 	struct ce_sendlist_item *item;
544 
545 	if (num_items >= CE_SENDLIST_ITEMS_MAX) {
546 		QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
547 		return QDF_STATUS_E_RESOURCES;
548 	}
549 
550 	item = &sl->item[num_items];
551 	item->send_type = CE_SIMPLE_BUFFER_TYPE;
552 	item->data = buffer;
553 	item->u.nbytes = nbytes;
554 	item->flags = flags;
555 	item->user_flags = user_flags;
556 	sl->num_items = num_items + 1;
557 	return QDF_STATUS_SUCCESS;
558 }
559 
560 int
561 ce_sendlist_send(struct CE_handle *copyeng,
562 		 void *per_transfer_context,
563 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
564 {
565 	struct CE_state *CE_state = (struct CE_state *)copyeng;
566 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
567 
568 	return hif_state->ce_services->ce_sendlist_send(copyeng,
569 			per_transfer_context, sendlist, transfer_id);
570 }
571 
572 static int
573 ce_sendlist_send_legacy(struct CE_handle *copyeng,
574 		 void *per_transfer_context,
575 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
576 {
577 	int status = -ENOMEM;
578 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
579 	struct CE_state *CE_state = (struct CE_state *)copyeng;
580 	struct CE_ring_state *src_ring = CE_state->src_ring;
581 	unsigned int nentries_mask = src_ring->nentries_mask;
582 	unsigned int num_items = sl->num_items;
583 	unsigned int sw_index;
584 	unsigned int write_index;
585 	struct hif_softc *scn = CE_state->scn;
586 
587 	QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
588 
589 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
590 
591 	if (CE_state->scn->fastpath_mode_on && CE_state->htt_tx_data &&
592 	    Q_TARGET_ACCESS_BEGIN(scn) == 0) {
593 		src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR(
594 					       scn, CE_state->ctrl_addr);
595 		Q_TARGET_ACCESS_END(scn);
596 	}
597 
598 	sw_index = src_ring->sw_index;
599 	write_index = src_ring->write_index;
600 
601 	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
602 	    num_items) {
603 		struct ce_sendlist_item *item;
604 		int i;
605 
606 		/* handle all but the last item uniformly */
607 		for (i = 0; i < num_items - 1; i++) {
608 			item = &sl->item[i];
609 			/* TBDXXX: Support extensible sendlist_types? */
610 			QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
611 			status = ce_send_nolock_legacy(copyeng,
612 				CE_SENDLIST_ITEM_CTXT,
613 				(qdf_dma_addr_t) item->data,
614 				item->u.nbytes, transfer_id,
615 				item->flags | CE_SEND_FLAG_GATHER,
616 				item->user_flags);
617 			QDF_ASSERT(status == QDF_STATUS_SUCCESS);
618 		}
619 		/* provide valid context pointer for final item */
620 		item = &sl->item[i];
621 		/* TBDXXX: Support extensible sendlist_types? */
622 		QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
623 		status = ce_send_nolock_legacy(copyeng, per_transfer_context,
624 					(qdf_dma_addr_t) item->data,
625 					item->u.nbytes,
626 					transfer_id, item->flags,
627 					item->user_flags);
628 		QDF_ASSERT(status == QDF_STATUS_SUCCESS);
629 		QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
630 					QDF_NBUF_TX_PKT_CE);
631 		DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
632 			QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
633 			QDF_TRACE_DEFAULT_PDEV_ID,
634 			(uint8_t *)&(((qdf_nbuf_t)per_transfer_context)->data),
635 			sizeof(((qdf_nbuf_t)per_transfer_context)->data),
636 			QDF_TX));
637 	} else {
638 		/*
639 		 * Probably not worth the additional complexity to support
640 		 * partial sends with continuation or notification.  We expect
641 		 * to use large rings and small sendlists. If we can't handle
642 		 * the entire request at once, punt it back to the caller.
643 		 */
644 	}
645 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
646 
647 	return status;
648 }
649 
650 #ifdef WLAN_FEATURE_FASTPATH
651 #ifdef QCA_WIFI_3_0
652 static inline void
653 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
654 		      uint64_t dma_addr,
655 		      uint32_t user_flags)
656 {
657 	shadow_src_desc->buffer_addr_hi =
658 			(uint32_t)((dma_addr >> 32) & 0x1F);
659 	user_flags |= shadow_src_desc->buffer_addr_hi;
660 	memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
661 			sizeof(uint32_t));
662 }
663 #else
664 static inline void
665 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
666 		      uint64_t dma_addr,
667 		      uint32_t user_flags)
668 {
669 }
670 #endif
671 
672 #define SLOTS_PER_DATAPATH_TX 2
673 
674 /**
675  * ce_send_fast() CE layer Tx buffer posting function
676  * @copyeng: copy engine handle
677  * @msdu: msdu to be sent
678  * @transfer_id: transfer_id
679  * @download_len: packet download length
680  *
681  * Assumption : Called with an array of MSDU's
682  * Function:
683  * For each msdu in the array
684  * 1. Check no. of available entries
685  * 2. Create src ring entries (allocated in consistent memory
686  * 3. Write index to h/w
687  *
688  * Return: No. of packets that could be sent
689  */
690 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
691 		 unsigned int transfer_id, uint32_t download_len)
692 {
693 	struct CE_state *ce_state = (struct CE_state *)copyeng;
694 	struct hif_softc *scn = ce_state->scn;
695 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
696 	struct CE_ring_state *src_ring = ce_state->src_ring;
697 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
698 	unsigned int nentries_mask = src_ring->nentries_mask;
699 	unsigned int write_index;
700 	unsigned int sw_index;
701 	unsigned int frag_len;
702 	uint64_t dma_addr;
703 	uint32_t user_flags;
704 	enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE;
705 	bool ok_to_send = true;
706 
707 	/*
708 	 * Create a log assuming the call will go through, and if not, we would
709 	 * add an error trace as well.
710 	 * Please add the same failure log for any additional error paths.
711 	 */
712 	DPTRACE(qdf_dp_trace(msdu,
713 			QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
714 			QDF_TRACE_DEFAULT_PDEV_ID,
715 			qdf_nbuf_data_addr(msdu),
716 			sizeof(qdf_nbuf_data(msdu)), QDF_TX));
717 
718 	qdf_spin_lock_bh(&ce_state->ce_index_lock);
719 
720 	/*
721 	 * Request runtime PM resume if it has already suspended and make
722 	 * sure there is no PCIe link access.
723 	 */
724 	if (hif_pm_runtime_get(hif_hdl) != 0)
725 		ok_to_send = false;
726 
727 	if (ok_to_send) {
728 		Q_TARGET_ACCESS_BEGIN(scn);
729 		DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
730 	}
731 
732 	write_index = src_ring->write_index;
733 	sw_index = src_ring->sw_index;
734 	hif_record_ce_desc_event(scn, ce_state->id,
735 				FAST_TX_SOFTWARE_INDEX_UPDATE,
736 				NULL, NULL, sw_index, 0);
737 
738 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
739 			 < SLOTS_PER_DATAPATH_TX)) {
740 		hif_err_rl("Source ring full, required %d, available %d",
741 			   SLOTS_PER_DATAPATH_TX,
742 			   CE_RING_DELTA(nentries_mask, write_index,
743 					 sw_index - 1));
744 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
745 		if (ok_to_send)
746 			Q_TARGET_ACCESS_END(scn);
747 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
748 
749 		DPTRACE(qdf_dp_trace(NULL,
750 				QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD,
751 				QDF_TRACE_DEFAULT_PDEV_ID,
752 				NULL, 0, QDF_TX));
753 
754 		return 0;
755 	}
756 
757 	{
758 		struct CE_src_desc *src_ring_base =
759 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
760 		struct CE_src_desc *shadow_base =
761 			(struct CE_src_desc *)src_ring->shadow_base;
762 		struct CE_src_desc *src_desc =
763 			CE_SRC_RING_TO_DESC(src_ring_base, write_index);
764 		struct CE_src_desc *shadow_src_desc =
765 			CE_SRC_RING_TO_DESC(shadow_base, write_index);
766 
767 		hif_pm_runtime_get_noresume(hif_hdl);
768 
769 		/*
770 		 * First fill out the ring descriptor for the HTC HTT frame
771 		 * header. These are uncached writes. Should we use a local
772 		 * structure instead?
773 		 */
774 		/* HTT/HTC header can be passed as a argument */
775 		dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
776 		shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
777 							  0xFFFFFFFF);
778 		user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
779 		ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
780 			shadow_src_desc->meta_data = transfer_id;
781 		shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
782 		ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
783 		download_len -= shadow_src_desc->nbytes;
784 		/*
785 		 * HTC HTT header is a word stream, so byte swap if CE byte
786 		 * swap enabled
787 		 */
788 		shadow_src_desc->byte_swap = ((ce_state->attr_flags &
789 					CE_ATTR_BYTE_SWAP_DATA) != 0);
790 		/* For the first one, it still does not need to write */
791 		shadow_src_desc->gather = 1;
792 		*src_desc = *shadow_src_desc;
793 		/* By default we could initialize the transfer context to this
794 		 * value
795 		 */
796 		src_ring->per_transfer_context[write_index] =
797 			CE_SENDLIST_ITEM_CTXT;
798 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
799 
800 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
801 		shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
802 		/*
803 		 * Now fill out the ring descriptor for the actual data
804 		 * packet
805 		 */
806 		dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
807 		shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
808 							  0xFFFFFFFF);
809 		/*
810 		 * Clear packet offset for all but the first CE desc.
811 		 */
812 		user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
813 		ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
814 		shadow_src_desc->meta_data = transfer_id;
815 
816 		/* get actual packet length */
817 		frag_len = qdf_nbuf_get_frag_len(msdu, 1);
818 
819 		/* download remaining bytes of payload */
820 		shadow_src_desc->nbytes =  download_len;
821 		ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
822 		if (shadow_src_desc->nbytes > frag_len)
823 			shadow_src_desc->nbytes = frag_len;
824 
825 		/*  Data packet is a byte stream, so disable byte swap */
826 		shadow_src_desc->byte_swap = 0;
827 		/* For the last one, gather is not set */
828 		shadow_src_desc->gather    = 0;
829 		*src_desc = *shadow_src_desc;
830 		src_ring->per_transfer_context[write_index] = msdu;
831 
832 		hif_record_ce_desc_event(scn, ce_state->id, type,
833 					(union ce_desc *)src_desc,
834 				src_ring->per_transfer_context[write_index],
835 					write_index, shadow_src_desc->nbytes);
836 
837 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
838 
839 		DPTRACE(qdf_dp_trace(msdu,
840 			QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
841 			QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(msdu),
842 			sizeof(qdf_nbuf_data(msdu)), QDF_TX));
843 	}
844 
845 	src_ring->write_index = write_index;
846 
847 	if (ok_to_send) {
848 		if (qdf_likely(ce_state->state == CE_RUNNING)) {
849 			type = FAST_TX_WRITE_INDEX_UPDATE;
850 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
851 				write_index);
852 			Q_TARGET_ACCESS_END(scn);
853 		} else
854 			ce_state->state = CE_PENDING;
855 		hif_pm_runtime_put(hif_hdl);
856 	}
857 
858 	qdf_spin_unlock_bh(&ce_state->ce_index_lock);
859 
860 	/* sent 1 packet */
861 	return 1;
862 }
863 
864 /**
865  * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
866  * @scn: Handle to HIF context
867  *
868  * Return: true if fastpath is enabled else false.
869  */
870 static bool ce_is_fastpath_enabled(struct hif_softc *scn)
871 {
872 	return scn->fastpath_mode_on;
873 }
874 
875 /**
876  * ce_is_fastpath_handler_registered() - return true for datapath CEs and if
877  * fastpath is enabled.
878  * @ce_state: handle to copy engine
879  *
880  * Return: true if fastpath handler is registered for datapath CE.
881  */
882 static bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
883 {
884 	if (ce_state->fastpath_handler)
885 		return true;
886 	else
887 		return false;
888 }
889 
890 
891 #else
892 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
893 {
894 	return false;
895 }
896 
897 static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
898 {
899 	return false;
900 }
901 #endif /* WLAN_FEATURE_FASTPATH */
902 
903 #ifndef AH_NEED_TX_DATA_SWAP
904 #define AH_NEED_TX_DATA_SWAP 0
905 #endif
906 
907 /**
908  * ce_batch_send() - sends bunch of msdus at once
909  * @ce_tx_hdl : pointer to CE handle
910  * @msdu : list of msdus to be sent
911  * @transfer_id : transfer id
912  * @len : Downloaded length
913  * @sendhead : sendhead
914  *
915  * Assumption : Called with an array of MSDU's
916  * Function:
917  * For each msdu in the array
918  * 1. Send each msdu
919  * 2. Increment write index accordinlgy.
920  *
921  * Return: list of msds not sent
922  */
923 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,  qdf_nbuf_t msdu,
924 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
925 {
926 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
927 	struct hif_softc *scn = ce_state->scn;
928 	struct CE_ring_state *src_ring = ce_state->src_ring;
929 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
930 	/*  A_target_id_t targid = TARGID(scn);*/
931 
932 	uint32_t nentries_mask = src_ring->nentries_mask;
933 	uint32_t sw_index, write_index;
934 
935 	struct CE_src_desc *src_desc_base =
936 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
937 	uint32_t *src_desc;
938 
939 	struct CE_src_desc lsrc_desc = {0};
940 	int deltacount = 0;
941 	qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext;
942 
943 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
944 	sw_index = src_ring->sw_index;
945 	write_index = src_ring->write_index;
946 
947 	deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1);
948 
949 	while (msdu) {
950 		tempnext = qdf_nbuf_next(msdu);
951 
952 		if (deltacount < 2) {
953 			if (sendhead)
954 				return msdu;
955 			HIF_ERROR("%s: Out of descriptors", __func__);
956 			src_ring->write_index = write_index;
957 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
958 					write_index);
959 
960 			sw_index = src_ring->sw_index;
961 			write_index = src_ring->write_index;
962 
963 			deltacount = CE_RING_DELTA(nentries_mask, write_index,
964 					sw_index-1);
965 			if (freelist == NULL) {
966 				freelist = msdu;
967 				hfreelist = msdu;
968 			} else {
969 				qdf_nbuf_set_next(freelist, msdu);
970 				freelist = msdu;
971 			}
972 			qdf_nbuf_set_next(msdu, NULL);
973 			msdu = tempnext;
974 			continue;
975 		}
976 
977 		src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base,
978 				write_index);
979 
980 		src_desc[0]   = qdf_nbuf_get_frag_paddr(msdu, 0);
981 
982 		lsrc_desc.meta_data = transfer_id;
983 		if (len  > msdu->len)
984 			len =  msdu->len;
985 		lsrc_desc.nbytes = len;
986 		/*  Data packet is a byte stream, so disable byte swap */
987 		lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
988 		lsrc_desc.gather    = 0; /*For the last one, gather is not set*/
989 
990 		src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
991 
992 
993 		src_ring->per_transfer_context[write_index] = msdu;
994 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
995 
996 		if (sendhead)
997 			break;
998 		qdf_nbuf_set_next(msdu, NULL);
999 		msdu = tempnext;
1000 
1001 	}
1002 
1003 
1004 	src_ring->write_index = write_index;
1005 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
1006 
1007 	return hfreelist;
1008 }
1009 
1010 /**
1011  * ce_update_tx_ring() - Advance sw index.
1012  * @ce_tx_hdl : pointer to CE handle
1013  * @num_htt_cmpls : htt completions received.
1014  *
1015  * Function:
1016  * Increment the value of sw index of src ring
1017  * according to number of htt completions
1018  * received.
1019  *
1020  * Return: void
1021  */
1022 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
1023 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
1024 {
1025 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
1026 	struct CE_ring_state *src_ring = ce_state->src_ring;
1027 	uint32_t nentries_mask = src_ring->nentries_mask;
1028 	/*
1029 	 * Advance the s/w index:
1030 	 * This effectively simulates completing the CE ring descriptors
1031 	 */
1032 	src_ring->sw_index =
1033 		CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
1034 				num_htt_cmpls);
1035 }
1036 #else
1037 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
1038 {}
1039 #endif
1040 
1041 /**
1042  * ce_send_single() - sends
1043  * @ce_tx_hdl : pointer to CE handle
1044  * @msdu : msdu to be sent
1045  * @transfer_id : transfer id
1046  * @len : Downloaded length
1047  *
1048  * Function:
1049  * 1. Send one msdu
1050  * 2. Increment write index of src ring accordinlgy.
1051  *
1052  * Return: int: CE sent status
1053  */
1054 int ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
1055 		uint32_t transfer_id, u_int32_t len)
1056 {
1057 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
1058 	struct hif_softc *scn = ce_state->scn;
1059 	struct CE_ring_state *src_ring = ce_state->src_ring;
1060 	uint32_t ctrl_addr = ce_state->ctrl_addr;
1061 	/*A_target_id_t targid = TARGID(scn);*/
1062 
1063 	uint32_t nentries_mask = src_ring->nentries_mask;
1064 	uint32_t sw_index, write_index;
1065 
1066 	struct CE_src_desc *src_desc_base =
1067 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
1068 	uint32_t *src_desc;
1069 
1070 	struct CE_src_desc lsrc_desc = {0};
1071 	enum hif_ce_event_type event_type;
1072 
1073 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
1074 	sw_index = src_ring->sw_index;
1075 	write_index = src_ring->write_index;
1076 
1077 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index,
1078 					sw_index-1) < 1)) {
1079 		/* ol_tx_stats_inc_ring_error(sc->scn->pdev_txrx_handle, 1); */
1080 		HIF_ERROR("%s: ce send fail %d %d %d", __func__, nentries_mask,
1081 			  write_index, sw_index);
1082 		return 1;
1083 	}
1084 
1085 	src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index);
1086 
1087 	src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
1088 
1089 	lsrc_desc.meta_data = transfer_id;
1090 	lsrc_desc.nbytes = len;
1091 	/*  Data packet is a byte stream, so disable byte swap */
1092 	lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
1093 	lsrc_desc.gather    = 0; /* For the last one, gather is not set */
1094 
1095 	src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
1096 
1097 
1098 	src_ring->per_transfer_context[write_index] = msdu;
1099 
1100 	if (((struct CE_src_desc *)src_desc)->gather)
1101 		event_type = HIF_TX_GATHER_DESC_POST;
1102 	else if (qdf_unlikely(ce_state->state != CE_RUNNING))
1103 		event_type = HIF_TX_DESC_SOFTWARE_POST;
1104 	else
1105 		event_type = HIF_TX_DESC_POST;
1106 
1107 	hif_record_ce_desc_event(scn, ce_state->id, event_type,
1108 				(union ce_desc *)src_desc, msdu,
1109 				write_index, len);
1110 
1111 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
1112 
1113 	src_ring->write_index = write_index;
1114 
1115 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
1116 
1117 	return QDF_STATUS_SUCCESS;
1118 }
1119 
1120 /**
1121  * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
1122  * @coyeng: copy engine handle
1123  * @per_recv_context: virtual address of the nbuf
1124  * @buffer: physical address of the nbuf
1125  *
1126  * Return: 0 if the buffer is enqueued
1127  */
1128 int
1129 ce_recv_buf_enqueue(struct CE_handle *copyeng,
1130 		    void *per_recv_context, qdf_dma_addr_t buffer)
1131 {
1132 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1133 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
1134 
1135 	return hif_state->ce_services->ce_recv_buf_enqueue(copyeng,
1136 			per_recv_context, buffer);
1137 }
1138 
1139 /**
1140  * ce_recv_buf_enqueue_legacy() - enqueue a recv buffer into a copy engine
1141  * @coyeng: copy engine handle
1142  * @per_recv_context: virtual address of the nbuf
1143  * @buffer: physical address of the nbuf
1144  *
1145  * Return: 0 if the buffer is enqueued
1146  */
1147 static int
1148 ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng,
1149 		    void *per_recv_context, qdf_dma_addr_t buffer)
1150 {
1151 	int status;
1152 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1153 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1154 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1155 	unsigned int nentries_mask = dest_ring->nentries_mask;
1156 	unsigned int write_index;
1157 	unsigned int sw_index;
1158 	uint64_t dma_addr = buffer;
1159 	struct hif_softc *scn = CE_state->scn;
1160 
1161 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1162 	write_index = dest_ring->write_index;
1163 	sw_index = dest_ring->sw_index;
1164 
1165 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1166 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1167 		return -EIO;
1168 	}
1169 
1170 	if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
1171 	    (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) {
1172 		struct CE_dest_desc *dest_ring_base =
1173 			(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1174 		struct CE_dest_desc *dest_desc =
1175 			CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
1176 
1177 		/* Update low 32 bit destination descriptor */
1178 		dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
1179 #ifdef QCA_WIFI_3_0
1180 		dest_desc->buffer_addr_hi =
1181 			(uint32_t)((dma_addr >> 32) & 0x1F);
1182 #endif
1183 		dest_desc->nbytes = 0;
1184 
1185 		dest_ring->per_transfer_context[write_index] =
1186 			per_recv_context;
1187 
1188 		hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_POST,
1189 				(union ce_desc *) dest_desc, per_recv_context,
1190 				write_index, 0);
1191 
1192 		/* Update Destination Ring Write Index */
1193 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
1194 		if (write_index != sw_index) {
1195 			CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
1196 			dest_ring->write_index = write_index;
1197 		}
1198 		status = QDF_STATUS_SUCCESS;
1199 	} else
1200 		status = QDF_STATUS_E_FAILURE;
1201 
1202 	Q_TARGET_ACCESS_END(scn);
1203 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1204 	return status;
1205 }
1206 
1207 void
1208 ce_send_watermarks_set(struct CE_handle *copyeng,
1209 		       unsigned int low_alert_nentries,
1210 		       unsigned int high_alert_nentries)
1211 {
1212 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1213 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1214 	struct hif_softc *scn = CE_state->scn;
1215 
1216 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
1217 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
1218 }
1219 
1220 void
1221 ce_recv_watermarks_set(struct CE_handle *copyeng,
1222 		       unsigned int low_alert_nentries,
1223 		       unsigned int high_alert_nentries)
1224 {
1225 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1226 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1227 	struct hif_softc *scn = CE_state->scn;
1228 
1229 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
1230 				low_alert_nentries);
1231 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
1232 				high_alert_nentries);
1233 }
1234 
1235 unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
1236 {
1237 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1238 	struct CE_ring_state *src_ring = CE_state->src_ring;
1239 	unsigned int nentries_mask = src_ring->nentries_mask;
1240 	unsigned int sw_index;
1241 	unsigned int write_index;
1242 
1243 	qdf_spin_lock(&CE_state->ce_index_lock);
1244 	sw_index = src_ring->sw_index;
1245 	write_index = src_ring->write_index;
1246 	qdf_spin_unlock(&CE_state->ce_index_lock);
1247 
1248 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
1249 }
1250 
1251 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
1252 {
1253 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1254 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1255 	unsigned int nentries_mask = dest_ring->nentries_mask;
1256 	unsigned int sw_index;
1257 	unsigned int write_index;
1258 
1259 	qdf_spin_lock(&CE_state->ce_index_lock);
1260 	sw_index = dest_ring->sw_index;
1261 	write_index = dest_ring->write_index;
1262 	qdf_spin_unlock(&CE_state->ce_index_lock);
1263 
1264 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
1265 }
1266 
1267 /*
1268  * Guts of ce_send_entries_done.
1269  * The caller takes responsibility for any necessary locking.
1270  */
1271 static unsigned int
1272 ce_send_entries_done_nolock_legacy(struct hif_softc *scn,
1273 			    struct CE_state *CE_state)
1274 {
1275 	struct CE_ring_state *src_ring = CE_state->src_ring;
1276 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1277 	unsigned int nentries_mask = src_ring->nentries_mask;
1278 	unsigned int sw_index;
1279 	unsigned int read_index;
1280 
1281 	sw_index = src_ring->sw_index;
1282 	read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
1283 
1284 	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
1285 }
1286 
1287 unsigned int ce_send_entries_done(struct CE_handle *copyeng)
1288 {
1289 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1290 	unsigned int nentries;
1291 	struct hif_softc *scn = CE_state->scn;
1292 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1293 
1294 	qdf_spin_lock(&CE_state->ce_index_lock);
1295 	nentries = hif_state->ce_services->ce_send_entries_done_nolock(
1296 						CE_state->scn, CE_state);
1297 	qdf_spin_unlock(&CE_state->ce_index_lock);
1298 
1299 	return nentries;
1300 }
1301 
1302 /*
1303  * Guts of ce_recv_entries_done.
1304  * The caller takes responsibility for any necessary locking.
1305  */
1306 static unsigned int
1307 ce_recv_entries_done_nolock_legacy(struct hif_softc *scn,
1308 			    struct CE_state *CE_state)
1309 {
1310 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1311 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1312 	unsigned int nentries_mask = dest_ring->nentries_mask;
1313 	unsigned int sw_index;
1314 	unsigned int read_index;
1315 
1316 	sw_index = dest_ring->sw_index;
1317 	read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
1318 
1319 	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
1320 }
1321 
1322 unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
1323 {
1324 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1325 	unsigned int nentries;
1326 	struct hif_softc *scn = CE_state->scn;
1327 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1328 
1329 	qdf_spin_lock(&CE_state->ce_index_lock);
1330 	nentries = hif_state->ce_services->ce_recv_entries_done_nolock(
1331 						CE_state->scn, CE_state);
1332 	qdf_spin_unlock(&CE_state->ce_index_lock);
1333 
1334 	return nentries;
1335 }
1336 
1337 /*
1338  * Guts of ce_completed_recv_next.
1339  * The caller takes responsibility for any necessary locking.
1340  */
1341 static int
1342 ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state,
1343 			      void **per_CE_contextp,
1344 			      void **per_transfer_contextp,
1345 			      qdf_dma_addr_t *bufferp,
1346 			      unsigned int *nbytesp,
1347 			      unsigned int *transfer_idp,
1348 			      unsigned int *flagsp)
1349 {
1350 	int status;
1351 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1352 	unsigned int nentries_mask = dest_ring->nentries_mask;
1353 	unsigned int sw_index = dest_ring->sw_index;
1354 	struct hif_softc *scn = CE_state->scn;
1355 	struct CE_dest_desc *dest_ring_base =
1356 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1357 	struct CE_dest_desc *dest_desc =
1358 		CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
1359 	int nbytes;
1360 	struct CE_dest_desc dest_desc_info;
1361 	/*
1362 	 * By copying the dest_desc_info element to local memory, we could
1363 	 * avoid extra memory read from non-cachable memory.
1364 	 */
1365 	dest_desc_info =  *dest_desc;
1366 	nbytes = dest_desc_info.nbytes;
1367 	if (nbytes == 0) {
1368 		/*
1369 		 * This closes a relatively unusual race where the Host
1370 		 * sees the updated DRRI before the update to the
1371 		 * corresponding descriptor has completed. We treat this
1372 		 * as a descriptor that is not yet done.
1373 		 */
1374 		status = QDF_STATUS_E_FAILURE;
1375 		goto done;
1376 	}
1377 
1378 	hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
1379 			(union ce_desc *) dest_desc,
1380 			dest_ring->per_transfer_context[sw_index],
1381 			sw_index, 0);
1382 
1383 	dest_desc->nbytes = 0;
1384 
1385 	/* Return data from completed destination descriptor */
1386 	*bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
1387 	*nbytesp = nbytes;
1388 	*transfer_idp = dest_desc_info.meta_data;
1389 	*flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
1390 
1391 	if (per_CE_contextp)
1392 		*per_CE_contextp = CE_state->recv_context;
1393 
1394 	if (per_transfer_contextp) {
1395 		*per_transfer_contextp =
1396 			dest_ring->per_transfer_context[sw_index];
1397 	}
1398 	dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
1399 
1400 	/* Update sw_index */
1401 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1402 	dest_ring->sw_index = sw_index;
1403 	status = QDF_STATUS_SUCCESS;
1404 
1405 done:
1406 	return status;
1407 }
1408 
1409 int
1410 ce_completed_recv_next(struct CE_handle *copyeng,
1411 		       void **per_CE_contextp,
1412 		       void **per_transfer_contextp,
1413 		       qdf_dma_addr_t *bufferp,
1414 		       unsigned int *nbytesp,
1415 		       unsigned int *transfer_idp, unsigned int *flagsp)
1416 {
1417 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1418 	int status;
1419 	struct hif_softc *scn = CE_state->scn;
1420 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1421 	struct ce_ops *ce_services;
1422 
1423 	ce_services = hif_state->ce_services;
1424 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1425 	status =
1426 		ce_services->ce_completed_recv_next_nolock(CE_state,
1427 				per_CE_contextp, per_transfer_contextp, bufferp,
1428 					      nbytesp, transfer_idp, flagsp);
1429 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1430 
1431 	return status;
1432 }
1433 
1434 QDF_STATUS
1435 ce_revoke_recv_next(struct CE_handle *copyeng,
1436 		    void **per_CE_contextp,
1437 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
1438 {
1439 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1440 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
1441 
1442 	return hif_state->ce_services->ce_revoke_recv_next(copyeng,
1443 			per_CE_contextp, per_transfer_contextp, bufferp);
1444 }
1445 /* NB: Modeled after ce_completed_recv_next_nolock */
1446 static QDF_STATUS
1447 ce_revoke_recv_next_legacy(struct CE_handle *copyeng,
1448 		    void **per_CE_contextp,
1449 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
1450 {
1451 	struct CE_state *CE_state;
1452 	struct CE_ring_state *dest_ring;
1453 	unsigned int nentries_mask;
1454 	unsigned int sw_index;
1455 	unsigned int write_index;
1456 	QDF_STATUS status;
1457 	struct hif_softc *scn;
1458 
1459 	CE_state = (struct CE_state *)copyeng;
1460 	dest_ring = CE_state->dest_ring;
1461 	if (!dest_ring)
1462 		return QDF_STATUS_E_FAILURE;
1463 
1464 	scn = CE_state->scn;
1465 	qdf_spin_lock(&CE_state->ce_index_lock);
1466 	nentries_mask = dest_ring->nentries_mask;
1467 	sw_index = dest_ring->sw_index;
1468 	write_index = dest_ring->write_index;
1469 	if (write_index != sw_index) {
1470 		struct CE_dest_desc *dest_ring_base =
1471 			(struct CE_dest_desc *)dest_ring->
1472 			    base_addr_owner_space;
1473 		struct CE_dest_desc *dest_desc =
1474 			CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
1475 
1476 		/* Return data from completed destination descriptor */
1477 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
1478 
1479 		if (per_CE_contextp)
1480 			*per_CE_contextp = CE_state->recv_context;
1481 
1482 		if (per_transfer_contextp) {
1483 			*per_transfer_contextp =
1484 				dest_ring->per_transfer_context[sw_index];
1485 		}
1486 		dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
1487 
1488 		/* Update sw_index */
1489 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1490 		dest_ring->sw_index = sw_index;
1491 		status = QDF_STATUS_SUCCESS;
1492 	} else {
1493 		status = QDF_STATUS_E_FAILURE;
1494 	}
1495 	qdf_spin_unlock(&CE_state->ce_index_lock);
1496 
1497 	return status;
1498 }
1499 
1500 /*
1501  * Guts of ce_completed_send_next.
1502  * The caller takes responsibility for any necessary locking.
1503  */
1504 static int
1505 ce_completed_send_next_nolock_legacy(struct CE_state *CE_state,
1506 			      void **per_CE_contextp,
1507 			      void **per_transfer_contextp,
1508 			      qdf_dma_addr_t *bufferp,
1509 			      unsigned int *nbytesp,
1510 			      unsigned int *transfer_idp,
1511 			      unsigned int *sw_idx,
1512 			      unsigned int *hw_idx,
1513 			      uint32_t *toeplitz_hash_result)
1514 {
1515 	int status = QDF_STATUS_E_FAILURE;
1516 	struct CE_ring_state *src_ring = CE_state->src_ring;
1517 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1518 	unsigned int nentries_mask = src_ring->nentries_mask;
1519 	unsigned int sw_index = src_ring->sw_index;
1520 	unsigned int read_index;
1521 	struct hif_softc *scn = CE_state->scn;
1522 
1523 	if (src_ring->hw_index == sw_index) {
1524 		/*
1525 		 * The SW completion index has caught up with the cached
1526 		 * version of the HW completion index.
1527 		 * Update the cached HW completion index to see whether
1528 		 * the SW has really caught up to the HW, or if the cached
1529 		 * value of the HW index has become stale.
1530 		 */
1531 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1532 			return QDF_STATUS_E_FAILURE;
1533 		src_ring->hw_index =
1534 			CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
1535 		if (Q_TARGET_ACCESS_END(scn) < 0)
1536 			return QDF_STATUS_E_FAILURE;
1537 	}
1538 	read_index = src_ring->hw_index;
1539 
1540 	if (sw_idx)
1541 		*sw_idx = sw_index;
1542 
1543 	if (hw_idx)
1544 		*hw_idx = read_index;
1545 
1546 	if ((read_index != sw_index) && (read_index != 0xffffffff)) {
1547 		struct CE_src_desc *shadow_base =
1548 			(struct CE_src_desc *)src_ring->shadow_base;
1549 		struct CE_src_desc *shadow_src_desc =
1550 			CE_SRC_RING_TO_DESC(shadow_base, sw_index);
1551 #ifdef QCA_WIFI_3_0
1552 		struct CE_src_desc *src_ring_base =
1553 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
1554 		struct CE_src_desc *src_desc =
1555 			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1556 #endif
1557 		hif_record_ce_desc_event(scn, CE_state->id,
1558 				HIF_TX_DESC_COMPLETION,
1559 				(union ce_desc *) shadow_src_desc,
1560 				src_ring->per_transfer_context[sw_index],
1561 				sw_index, shadow_src_desc->nbytes);
1562 
1563 		/* Return data from completed source descriptor */
1564 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
1565 		*nbytesp = shadow_src_desc->nbytes;
1566 		*transfer_idp = shadow_src_desc->meta_data;
1567 #ifdef QCA_WIFI_3_0
1568 		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
1569 #else
1570 		*toeplitz_hash_result = 0;
1571 #endif
1572 		if (per_CE_contextp)
1573 			*per_CE_contextp = CE_state->send_context;
1574 
1575 		if (per_transfer_contextp) {
1576 			*per_transfer_contextp =
1577 				src_ring->per_transfer_context[sw_index];
1578 		}
1579 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
1580 
1581 		/* Update sw_index */
1582 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1583 		src_ring->sw_index = sw_index;
1584 		status = QDF_STATUS_SUCCESS;
1585 	}
1586 
1587 	return status;
1588 }
1589 
1590 QDF_STATUS
1591 ce_cancel_send_next(struct CE_handle *copyeng,
1592 		void **per_CE_contextp,
1593 		void **per_transfer_contextp,
1594 		qdf_dma_addr_t *bufferp,
1595 		unsigned int *nbytesp,
1596 		unsigned int *transfer_idp,
1597 		uint32_t *toeplitz_hash_result)
1598 {
1599 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1600 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
1601 
1602 	return hif_state->ce_services->ce_cancel_send_next
1603 		(copyeng, per_CE_contextp, per_transfer_contextp,
1604 		 bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
1605 }
1606 
1607 /* NB: Modeled after ce_completed_send_next */
1608 static QDF_STATUS
1609 ce_cancel_send_next_legacy(struct CE_handle *copyeng,
1610 		void **per_CE_contextp,
1611 		void **per_transfer_contextp,
1612 		qdf_dma_addr_t *bufferp,
1613 		unsigned int *nbytesp,
1614 		unsigned int *transfer_idp,
1615 		uint32_t *toeplitz_hash_result)
1616 {
1617 	struct CE_state *CE_state;
1618 	struct CE_ring_state *src_ring;
1619 	unsigned int nentries_mask;
1620 	unsigned int sw_index;
1621 	unsigned int write_index;
1622 	QDF_STATUS status;
1623 	struct hif_softc *scn;
1624 
1625 	CE_state = (struct CE_state *)copyeng;
1626 	src_ring = CE_state->src_ring;
1627 	if (!src_ring)
1628 		return QDF_STATUS_E_FAILURE;
1629 
1630 	scn = CE_state->scn;
1631 	qdf_spin_lock(&CE_state->ce_index_lock);
1632 	nentries_mask = src_ring->nentries_mask;
1633 	sw_index = src_ring->sw_index;
1634 	write_index = src_ring->write_index;
1635 
1636 	if (write_index != sw_index) {
1637 		struct CE_src_desc *src_ring_base =
1638 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
1639 		struct CE_src_desc *src_desc =
1640 			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1641 
1642 		/* Return data from completed source descriptor */
1643 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1644 		*nbytesp = src_desc->nbytes;
1645 		*transfer_idp = src_desc->meta_data;
1646 #ifdef QCA_WIFI_3_0
1647 		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
1648 #else
1649 		*toeplitz_hash_result = 0;
1650 #endif
1651 
1652 		if (per_CE_contextp)
1653 			*per_CE_contextp = CE_state->send_context;
1654 
1655 		if (per_transfer_contextp) {
1656 			*per_transfer_contextp =
1657 				src_ring->per_transfer_context[sw_index];
1658 		}
1659 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
1660 
1661 		/* Update sw_index */
1662 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1663 		src_ring->sw_index = sw_index;
1664 		status = QDF_STATUS_SUCCESS;
1665 	} else {
1666 		status = QDF_STATUS_E_FAILURE;
1667 	}
1668 	qdf_spin_unlock(&CE_state->ce_index_lock);
1669 
1670 	return status;
1671 }
1672 
1673 /* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
1674 #define CE_WM_SHFT 1
1675 
1676 int
1677 ce_completed_send_next(struct CE_handle *copyeng,
1678 		       void **per_CE_contextp,
1679 		       void **per_transfer_contextp,
1680 		       qdf_dma_addr_t *bufferp,
1681 		       unsigned int *nbytesp,
1682 		       unsigned int *transfer_idp,
1683 		       unsigned int *sw_idx,
1684 		       unsigned int *hw_idx,
1685 		       unsigned int *toeplitz_hash_result)
1686 {
1687 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1688 	struct hif_softc *scn = CE_state->scn;
1689 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1690 	struct ce_ops *ce_services;
1691 	int status;
1692 
1693 	ce_services = hif_state->ce_services;
1694 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1695 	status =
1696 		ce_services->ce_completed_send_next_nolock(CE_state,
1697 					per_CE_contextp, per_transfer_contextp,
1698 					bufferp, nbytesp, transfer_idp, sw_idx,
1699 					      hw_idx, toeplitz_hash_result);
1700 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1701 
1702 	return status;
1703 }
1704 
1705 #ifdef ATH_11AC_TXCOMPACT
1706 /* CE engine descriptor reap
1707  * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
1708  * does receive and reaping of completed descriptor ,
1709  * This function only handles reaping of Tx complete descriptor.
1710  * The Function is called from threshold reap  poll routine
1711  * hif_send_complete_check so should not countain receive functionality
1712  * within it .
1713  */
1714 
1715 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
1716 {
1717 	void *CE_context;
1718 	void *transfer_context;
1719 	qdf_dma_addr_t buf;
1720 	unsigned int nbytes;
1721 	unsigned int id;
1722 	unsigned int sw_idx, hw_idx;
1723 	uint32_t toeplitz_hash_result;
1724 	struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
1725 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1726 
1727 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1728 		return;
1729 
1730 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
1731 			NULL, NULL, 0, 0);
1732 
1733 	/* Since this function is called from both user context and
1734 	 * tasklet context the spinlock has to lock the bottom halves.
1735 	 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
1736 	 * enabled in TX polling mode. If this is not the case, more
1737 	 * bottom halve spin lock changes are needed. Due to data path
1738 	 * performance concern, after internal discussion we've decided
1739 	 * to make minimum change, i.e., only address the issue occurred
1740 	 * in this function. The possible negative effect of this minimum
1741 	 * change is that, in the future, if some other function will also
1742 	 * be opened to let the user context to use, those cases need to be
1743 	 * addressed by change spin_lock to spin_lock_bh also.
1744 	 */
1745 
1746 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1747 
1748 	if (CE_state->send_cb) {
1749 		{
1750 			struct ce_ops *ce_services = hif_state->ce_services;
1751 			/* Pop completed send buffers and call the
1752 			 * registered send callback for each
1753 			 */
1754 			while (ce_services->ce_completed_send_next_nolock
1755 				 (CE_state, &CE_context,
1756 				  &transfer_context, &buf,
1757 				  &nbytes, &id, &sw_idx, &hw_idx,
1758 				  &toeplitz_hash_result) ==
1759 				  QDF_STATUS_SUCCESS) {
1760 				if (ce_id != CE_HTT_H2T_MSG) {
1761 					qdf_spin_unlock_bh(
1762 						&CE_state->ce_index_lock);
1763 					CE_state->send_cb(
1764 						(struct CE_handle *)
1765 						CE_state, CE_context,
1766 						transfer_context, buf,
1767 						nbytes, id, sw_idx, hw_idx,
1768 						toeplitz_hash_result);
1769 					qdf_spin_lock_bh(
1770 						&CE_state->ce_index_lock);
1771 				} else {
1772 					struct HIF_CE_pipe_info *pipe_info =
1773 						(struct HIF_CE_pipe_info *)
1774 						CE_context;
1775 
1776 					qdf_spin_lock_bh(&pipe_info->
1777 						 completion_freeq_lock);
1778 					pipe_info->num_sends_allowed++;
1779 					qdf_spin_unlock_bh(&pipe_info->
1780 						   completion_freeq_lock);
1781 				}
1782 			}
1783 		}
1784 	}
1785 
1786 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1787 
1788 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
1789 			NULL, NULL, 0, 0);
1790 	Q_TARGET_ACCESS_END(scn);
1791 }
1792 
1793 #endif /*ATH_11AC_TXCOMPACT */
1794 
1795 /*
1796  * Number of times to check for any pending tx/rx completion on
1797  * a copy engine, this count should be big enough. Once we hit
1798  * this threashold we'll not check for any Tx/Rx comlpetion in same
1799  * interrupt handling. Note that this threashold is only used for
1800  * Rx interrupt processing, this can be used tor Tx as well if we
1801  * suspect any infinite loop in checking for pending Tx completion.
1802  */
1803 #define CE_TXRX_COMP_CHECK_THRESHOLD 20
1804 
1805 #ifdef WLAN_FEATURE_FASTPATH
1806 /**
1807  * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler
1808  * @ce_state: handle to copy engine state
1809  * @cmpl_msdus: Rx msdus
1810  * @num_cmpls: number of Rx msdus
1811  * @ctrl_addr: CE control address
1812  *
1813  * Return: None
1814  */
1815 static void ce_fastpath_rx_handle(struct CE_state *ce_state,
1816 				  qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls,
1817 				  uint32_t ctrl_addr)
1818 {
1819 	struct hif_softc *scn = ce_state->scn;
1820 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
1821 	uint32_t nentries_mask = dest_ring->nentries_mask;
1822 	uint32_t write_index;
1823 
1824 	qdf_spin_unlock(&ce_state->ce_index_lock);
1825 	(ce_state->fastpath_handler)(ce_state->context,	cmpl_msdus, num_cmpls);
1826 	qdf_spin_lock(&ce_state->ce_index_lock);
1827 
1828 	/* Update Destination Ring Write Index */
1829 	write_index = dest_ring->write_index;
1830 	write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
1831 
1832 	hif_record_ce_desc_event(scn, ce_state->id,
1833 			FAST_RX_WRITE_INDEX_UPDATE,
1834 			NULL, NULL, write_index, 0);
1835 
1836 	CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
1837 	dest_ring->write_index = write_index;
1838 }
1839 
1840 /**
1841  * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs
1842  * @scn: hif_context
1843  * @ce_id: Copy engine ID
1844  * 1) Go through the CE ring, and find the completions
1845  * 2) For valid completions retrieve context (nbuf) for per_transfer_context[]
1846  * 3) Unmap buffer & accumulate in an array.
1847  * 4) Call message handler when array is full or when exiting the handler
1848  *
1849  * Return: void
1850  */
1851 
1852 static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
1853 {
1854 	struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
1855 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1856 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
1857 	struct CE_dest_desc *dest_ring_base =
1858 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1859 
1860 	uint32_t nentries_mask = dest_ring->nentries_mask;
1861 	uint32_t sw_index = dest_ring->sw_index;
1862 	uint32_t nbytes;
1863 	qdf_nbuf_t nbuf;
1864 	dma_addr_t paddr;
1865 	struct CE_dest_desc *dest_desc;
1866 	qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM];
1867 	uint32_t ctrl_addr = ce_state->ctrl_addr;
1868 	uint32_t nbuf_cmpl_idx = 0;
1869 	unsigned int more_comp_cnt = 0;
1870 
1871 more_data:
1872 	for (;;) {
1873 
1874 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base,
1875 						 sw_index);
1876 
1877 		/*
1878 		 * The following 2 reads are from non-cached memory
1879 		 */
1880 		nbytes = dest_desc->nbytes;
1881 
1882 		/* If completion is invalid, break */
1883 		if (qdf_unlikely(nbytes == 0))
1884 			break;
1885 
1886 
1887 		/*
1888 		 * Build the nbuf list from valid completions
1889 		 */
1890 		nbuf = dest_ring->per_transfer_context[sw_index];
1891 
1892 		/*
1893 		 * No lock is needed here, since this is the only thread
1894 		 * that accesses the sw_index
1895 		 */
1896 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1897 
1898 		/*
1899 		 * CAREFUL : Uncached write, but still less expensive,
1900 		 * since most modern caches use "write-combining" to
1901 		 * flush multiple cache-writes all at once.
1902 		 */
1903 		dest_desc->nbytes = 0;
1904 
1905 		/*
1906 		 * Per our understanding this is not required on our
1907 		 * since we are doing the same cache invalidation
1908 		 * operation on the same buffer twice in succession,
1909 		 * without any modifiication to this buffer by CPU in
1910 		 * between.
1911 		 * However, this code with 2 syncs in succession has
1912 		 * been undergoing some testing at a customer site,
1913 		 * and seemed to be showing no problems so far. Would
1914 		 * like to validate from the customer, that this line
1915 		 * is really not required, before we remove this line
1916 		 * completely.
1917 		 */
1918 		paddr = QDF_NBUF_CB_PADDR(nbuf);
1919 
1920 		qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev, paddr,
1921 				(skb_end_pointer(nbuf) - (nbuf)->data),
1922 				DMA_FROM_DEVICE);
1923 
1924 		qdf_nbuf_put_tail(nbuf, nbytes);
1925 
1926 		qdf_assert_always(nbuf->data != NULL);
1927 
1928 		QDF_NBUF_CB_RX_CTX_ID(nbuf) =
1929 				hif_get_rx_ctx_id(ce_state->id, hif_hdl);
1930 		cmpl_msdus[nbuf_cmpl_idx++] = nbuf;
1931 
1932 		/*
1933 		 * we are not posting the buffers back instead
1934 		 * reusing the buffers
1935 		 */
1936 		if (nbuf_cmpl_idx == scn->ce_service_max_rx_ind_flush) {
1937 			hif_record_ce_desc_event(scn, ce_state->id,
1938 						 FAST_RX_SOFTWARE_INDEX_UPDATE,
1939 						 NULL, NULL, sw_index, 0);
1940 			dest_ring->sw_index = sw_index;
1941 			ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1942 						nbuf_cmpl_idx, ctrl_addr);
1943 
1944 			ce_state->receive_count += nbuf_cmpl_idx;
1945 			if (qdf_unlikely(hif_ce_service_should_yield(
1946 						scn, ce_state))) {
1947 				ce_state->force_break = 1;
1948 				qdf_atomic_set(&ce_state->rx_pending, 1);
1949 				return;
1950 			}
1951 
1952 			nbuf_cmpl_idx = 0;
1953 			more_comp_cnt = 0;
1954 		}
1955 	}
1956 
1957 	hif_record_ce_desc_event(scn, ce_state->id,
1958 				 FAST_RX_SOFTWARE_INDEX_UPDATE,
1959 				 NULL, NULL, sw_index, 0);
1960 
1961 	dest_ring->sw_index = sw_index;
1962 
1963 	/*
1964 	 * If there are not enough completions to fill the array,
1965 	 * just call the message handler here
1966 	 */
1967 	if (nbuf_cmpl_idx) {
1968 		ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1969 				      nbuf_cmpl_idx, ctrl_addr);
1970 
1971 		ce_state->receive_count += nbuf_cmpl_idx;
1972 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
1973 			ce_state->force_break = 1;
1974 			qdf_atomic_set(&ce_state->rx_pending, 1);
1975 			return;
1976 		}
1977 
1978 		/* check for more packets after upper layer processing */
1979 		nbuf_cmpl_idx = 0;
1980 		more_comp_cnt = 0;
1981 		goto more_data;
1982 	}
1983 
1984 	hif_update_napi_max_poll_time(ce_state, ce_id, qdf_get_cpu());
1985 
1986 	qdf_atomic_set(&ce_state->rx_pending, 0);
1987 	if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1988 		CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1989 					   HOST_IS_COPY_COMPLETE_MASK);
1990 	} else {
1991 		hif_err_rl("%s: target access is not allowed", __func__);
1992 		return;
1993 	}
1994 
1995 	if (ce_recv_entries_done_nolock_legacy(scn, ce_state)) {
1996 		if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1997 			goto more_data;
1998 		} else {
1999 			HIF_ERROR("%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
2000 				  __func__, nentries_mask,
2001 				  ce_state->dest_ring->sw_index,
2002 				  CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr));
2003 		}
2004 	}
2005 #ifdef NAPI_YIELD_BUDGET_BASED
2006 	/* Caution : Before you modify this code, please refer hif_napi_poll function
2007 	to understand how napi_complete gets called and make the necessary changes
2008 	Force break has to be done till WIN disables the interrupt at source */
2009 	ce_state->force_break = 1;
2010 #endif
2011 }
2012 
2013 #else
2014 static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
2015 {
2016 }
2017 #endif /* WLAN_FEATURE_FASTPATH */
2018 
2019 /*
2020  * Guts of interrupt handler for per-engine interrupts on a particular CE.
2021  *
2022  * Invokes registered callbacks for recv_complete,
2023  * send_complete, and watermarks.
2024  *
2025  * Returns: number of messages processed
2026  */
2027 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
2028 {
2029 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2030 	uint32_t ctrl_addr = CE_state->ctrl_addr;
2031 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2032 	void *CE_context;
2033 	void *transfer_context;
2034 	qdf_dma_addr_t buf;
2035 	unsigned int nbytes;
2036 	unsigned int id;
2037 	unsigned int flags;
2038 	unsigned int more_comp_cnt = 0;
2039 	unsigned int more_snd_comp_cnt = 0;
2040 	unsigned int sw_idx, hw_idx;
2041 	uint32_t toeplitz_hash_result;
2042 	uint32_t mode = hif_get_conparam(scn);
2043 
2044 	if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data))
2045 		return CE_state->receive_count;
2046 
2047 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
2048 		HIF_ERROR("[premature rc=0]");
2049 		return 0; /* no work done */
2050 	}
2051 
2052 	/* Clear force_break flag and re-initialize receive_count to 0 */
2053 	CE_state->receive_count = 0;
2054 	CE_state->force_break = 0;
2055 	CE_state->ce_service_start_time = sched_clock();
2056 	CE_state->ce_service_yield_time =
2057 		CE_state->ce_service_start_time +
2058 		hif_get_ce_service_max_yield_time(
2059 			(struct hif_opaque_softc *)scn);
2060 
2061 	qdf_spin_lock(&CE_state->ce_index_lock);
2062 	/*
2063 	 * With below check we make sure CE we are handling is datapath CE and
2064 	 * fastpath is enabled.
2065 	 */
2066 	if (ce_is_fastpath_handler_registered(CE_state)) {
2067 		/* For datapath only Rx CEs */
2068 		ce_per_engine_service_fast(scn, CE_id);
2069 		goto unlock_end;
2070 	}
2071 
2072 more_completions:
2073 	if (CE_state->recv_cb) {
2074 
2075 		/* Pop completed recv buffers and call
2076 		 * the registered recv callback for each
2077 		 */
2078 		while (hif_state->ce_services->ce_completed_recv_next_nolock
2079 				(CE_state, &CE_context, &transfer_context,
2080 				&buf, &nbytes, &id, &flags) ==
2081 				QDF_STATUS_SUCCESS) {
2082 			qdf_spin_unlock(&CE_state->ce_index_lock);
2083 			CE_state->recv_cb((struct CE_handle *)CE_state,
2084 					  CE_context, transfer_context, buf,
2085 					  nbytes, id, flags);
2086 
2087 			/*
2088 			 * EV #112693 -
2089 			 * [Peregrine][ES1][WB342][Win8x86][Performance]
2090 			 * BSoD_0x133 occurred in VHT80 UDP_DL
2091 			 * Break out DPC by force if number of loops in
2092 			 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
2093 			 * to avoid spending too long time in
2094 			 * DPC for each interrupt handling. Schedule another
2095 			 * DPC to avoid data loss if we had taken
2096 			 * force-break action before apply to Windows OS
2097 			 * only currently, Linux/MAC os can expand to their
2098 			 * platform if necessary
2099 			 */
2100 
2101 			/* Break the receive processes by
2102 			 * force if force_break set up
2103 			 */
2104 			if (qdf_unlikely(CE_state->force_break)) {
2105 				qdf_atomic_set(&CE_state->rx_pending, 1);
2106 				goto target_access_end;
2107 			}
2108 			qdf_spin_lock(&CE_state->ce_index_lock);
2109 		}
2110 	}
2111 
2112 	/*
2113 	 * Attention: We may experience potential infinite loop for below
2114 	 * While Loop during Sending Stress test.
2115 	 * Resolve the same way as Receive Case (Refer to EV #112693)
2116 	 */
2117 
2118 	if (CE_state->send_cb) {
2119 		/* Pop completed send buffers and call
2120 		 * the registered send callback for each
2121 		 */
2122 
2123 #ifdef ATH_11AC_TXCOMPACT
2124 		while (hif_state->ce_services->ce_completed_send_next_nolock
2125 			 (CE_state, &CE_context,
2126 			 &transfer_context, &buf, &nbytes,
2127 			 &id, &sw_idx, &hw_idx,
2128 			 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2129 
2130 			if (CE_id != CE_HTT_H2T_MSG ||
2131 			    QDF_IS_EPPING_ENABLED(mode)) {
2132 				qdf_spin_unlock(&CE_state->ce_index_lock);
2133 				CE_state->send_cb((struct CE_handle *)CE_state,
2134 						  CE_context, transfer_context,
2135 						  buf, nbytes, id, sw_idx,
2136 						  hw_idx, toeplitz_hash_result);
2137 				qdf_spin_lock(&CE_state->ce_index_lock);
2138 			} else {
2139 				struct HIF_CE_pipe_info *pipe_info =
2140 					(struct HIF_CE_pipe_info *)CE_context;
2141 
2142 				qdf_spin_lock_bh(&pipe_info->
2143 					      completion_freeq_lock);
2144 				pipe_info->num_sends_allowed++;
2145 				qdf_spin_unlock_bh(&pipe_info->
2146 						completion_freeq_lock);
2147 			}
2148 		}
2149 #else                           /*ATH_11AC_TXCOMPACT */
2150 		while (hif_state->ce_services->ce_completed_send_next_nolock
2151 			 (CE_state, &CE_context,
2152 			  &transfer_context, &buf, &nbytes,
2153 			  &id, &sw_idx, &hw_idx,
2154 			  &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2155 			qdf_spin_unlock(&CE_state->ce_index_lock);
2156 			CE_state->send_cb((struct CE_handle *)CE_state,
2157 				  CE_context, transfer_context, buf,
2158 				  nbytes, id, sw_idx, hw_idx,
2159 				  toeplitz_hash_result);
2160 			qdf_spin_lock(&CE_state->ce_index_lock);
2161 		}
2162 #endif /*ATH_11AC_TXCOMPACT */
2163 	}
2164 
2165 more_watermarks:
2166 	if (CE_state->misc_cbs) {
2167 		if (CE_state->watermark_cb &&
2168 				hif_state->ce_services->watermark_int(CE_state,
2169 					&flags)) {
2170 			qdf_spin_unlock(&CE_state->ce_index_lock);
2171 			/* Convert HW IS bits to software flags */
2172 			CE_state->watermark_cb((struct CE_handle *)CE_state,
2173 					CE_state->wm_context, flags);
2174 			qdf_spin_lock(&CE_state->ce_index_lock);
2175 		}
2176 	}
2177 
2178 	/*
2179 	 * Clear the misc interrupts (watermark) that were handled above,
2180 	 * and that will be checked again below.
2181 	 * Clear and check for copy-complete interrupts again, just in case
2182 	 * more copy completions happened while the misc interrupts were being
2183 	 * handled.
2184 	 */
2185 	if (!ce_srng_based(scn)) {
2186 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
2187 			CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
2188 					   CE_WATERMARK_MASK |
2189 					   HOST_IS_COPY_COMPLETE_MASK);
2190 		} else {
2191 			hif_err_rl("%s: target access is not allowed",
2192 				   __func__);
2193 			goto unlock_end;
2194 		}
2195 	}
2196 
2197 	/*
2198 	 * Now that per-engine interrupts are cleared, verify that
2199 	 * no recv interrupts arrive while processing send interrupts,
2200 	 * and no recv or send interrupts happened while processing
2201 	 * misc interrupts.Go back and check again.Keep checking until
2202 	 * we find no more events to process.
2203 	 */
2204 	if (CE_state->recv_cb &&
2205 		hif_state->ce_services->ce_recv_entries_done_nolock(scn,
2206 				CE_state)) {
2207 		if (QDF_IS_EPPING_ENABLED(mode) ||
2208 		    more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
2209 			goto more_completions;
2210 		} else {
2211 			if (!ce_srng_based(scn)) {
2212 				HIF_ERROR(
2213 					"%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
2214 					__func__,
2215 					CE_state->dest_ring->nentries_mask,
2216 					CE_state->dest_ring->sw_index,
2217 					CE_DEST_RING_READ_IDX_GET(scn,
2218 							  CE_state->ctrl_addr));
2219 			}
2220 		}
2221 	}
2222 
2223 	if (CE_state->send_cb &&
2224 		hif_state->ce_services->ce_send_entries_done_nolock(scn,
2225 				CE_state)) {
2226 		if (QDF_IS_EPPING_ENABLED(mode) ||
2227 		    more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
2228 			goto more_completions;
2229 		} else {
2230 			if (!ce_srng_based(scn)) {
2231 				HIF_ERROR(
2232 					"%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
2233 					__func__,
2234 					CE_state->src_ring->nentries_mask,
2235 					CE_state->src_ring->sw_index,
2236 					CE_SRC_RING_READ_IDX_GET(scn,
2237 							 CE_state->ctrl_addr));
2238 			}
2239 		}
2240 	}
2241 
2242 	if (CE_state->misc_cbs && CE_state->watermark_cb) {
2243 		if (hif_state->ce_services->watermark_int(CE_state, &flags))
2244 			goto more_watermarks;
2245 	}
2246 
2247 	qdf_atomic_set(&CE_state->rx_pending, 0);
2248 
2249 unlock_end:
2250 	qdf_spin_unlock(&CE_state->ce_index_lock);
2251 target_access_end:
2252 	if (Q_TARGET_ACCESS_END(scn) < 0)
2253 		HIF_ERROR("<--[premature rc=%d]", CE_state->receive_count);
2254 	return CE_state->receive_count;
2255 }
2256 qdf_export_symbol(ce_per_engine_service);
2257 
2258 /*
2259  * Handler for per-engine interrupts on ALL active CEs.
2260  * This is used in cases where the system is sharing a
2261  * single interrput for all CEs
2262  */
2263 
2264 void ce_per_engine_service_any(int irq, struct hif_softc *scn)
2265 {
2266 	int CE_id;
2267 	uint32_t intr_summary;
2268 
2269 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2270 		return;
2271 
2272 	if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
2273 		for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2274 			struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2275 
2276 			if (qdf_atomic_read(&CE_state->rx_pending)) {
2277 				qdf_atomic_set(&CE_state->rx_pending, 0);
2278 				ce_per_engine_service(scn, CE_id);
2279 			}
2280 		}
2281 
2282 		Q_TARGET_ACCESS_END(scn);
2283 		return;
2284 	}
2285 
2286 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
2287 
2288 	for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
2289 		if (intr_summary & (1 << CE_id))
2290 			intr_summary &= ~(1 << CE_id);
2291 		else
2292 			continue;       /* no intr pending on this CE */
2293 
2294 		ce_per_engine_service(scn, CE_id);
2295 	}
2296 
2297 	Q_TARGET_ACCESS_END(scn);
2298 }
2299 
2300 /*
2301  * Adjust interrupts for the copy complete handler.
2302  * If it's needed for either send or recv, then unmask
2303  * this interrupt; otherwise, mask it.
2304  *
2305  * Called with target_lock held.
2306  */
2307 static void
2308 ce_per_engine_handler_adjust_legacy(struct CE_state *CE_state,
2309 			     int disable_copy_compl_intr)
2310 {
2311 	uint32_t ctrl_addr = CE_state->ctrl_addr;
2312 	struct hif_softc *scn = CE_state->scn;
2313 
2314 	CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
2315 
2316 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2317 		return;
2318 
2319 	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
2320 		hif_err_rl("%s: target access is not allowed", __func__);
2321 		return;
2322 	}
2323 
2324 	if ((!disable_copy_compl_intr) &&
2325 	    (CE_state->send_cb || CE_state->recv_cb))
2326 		CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
2327 	else
2328 		CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
2329 
2330 	if (CE_state->watermark_cb)
2331 		CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
2332 	 else
2333 		CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
2334 	Q_TARGET_ACCESS_END(scn);
2335 }
2336 
2337 /*Iterate the CE_state list and disable the compl interrupt
2338  * if it has been registered already.
2339  */
2340 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
2341 {
2342 	int CE_id;
2343 
2344 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2345 		return;
2346 
2347 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2348 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2349 		uint32_t ctrl_addr = CE_state->ctrl_addr;
2350 
2351 		/* if the interrupt is currently enabled, disable it */
2352 		if (!CE_state->disable_copy_compl_intr
2353 		    && (CE_state->send_cb || CE_state->recv_cb))
2354 			CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
2355 
2356 		if (CE_state->watermark_cb)
2357 			CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
2358 	}
2359 	Q_TARGET_ACCESS_END(scn);
2360 }
2361 
2362 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
2363 {
2364 	int CE_id;
2365 
2366 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2367 		return;
2368 
2369 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2370 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2371 		uint32_t ctrl_addr = CE_state->ctrl_addr;
2372 
2373 		/*
2374 		 * If the CE is supposed to have copy complete interrupts
2375 		 * enabled (i.e. there a callback registered, and the
2376 		 * "disable" flag is not set), then re-enable the interrupt.
2377 		 */
2378 		if (!CE_state->disable_copy_compl_intr
2379 		    && (CE_state->send_cb || CE_state->recv_cb))
2380 			CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
2381 
2382 		if (CE_state->watermark_cb)
2383 			CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
2384 	}
2385 	Q_TARGET_ACCESS_END(scn);
2386 }
2387 
2388 /**
2389  * ce_send_cb_register(): register completion handler
2390  * @copyeng: CE_state representing the ce we are adding the behavior to
2391  * @fn_ptr: callback that the ce should use when processing tx completions
2392  * @disable_interrupts: if the interupts should be enabled or not.
2393  *
2394  * Caller should guarantee that no transactions are in progress before
2395  * switching the callback function.
2396  *
2397  * Registers the send context before the fn pointer so that if the cb is valid
2398  * the context should be valid.
2399  *
2400  * Beware that currently this function will enable completion interrupts.
2401  */
2402 void
2403 ce_send_cb_register(struct CE_handle *copyeng,
2404 		    ce_send_cb fn_ptr,
2405 		    void *ce_send_context, int disable_interrupts)
2406 {
2407 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2408 	struct hif_softc *scn;
2409 	struct HIF_CE_state *hif_state;
2410 
2411 	if (CE_state == NULL) {
2412 		HIF_ERROR("%s: Error CE state = NULL", __func__);
2413 		return;
2414 	}
2415 	scn = CE_state->scn;
2416 	hif_state = HIF_GET_CE_STATE(scn);
2417 	if (hif_state == NULL) {
2418 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
2419 		return;
2420 	}
2421 	CE_state->send_context = ce_send_context;
2422 	CE_state->send_cb = fn_ptr;
2423 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
2424 							disable_interrupts);
2425 }
2426 
2427 /**
2428  * ce_recv_cb_register(): register completion handler
2429  * @copyeng: CE_state representing the ce we are adding the behavior to
2430  * @fn_ptr: callback that the ce should use when processing rx completions
2431  * @disable_interrupts: if the interupts should be enabled or not.
2432  *
2433  * Registers the send context before the fn pointer so that if the cb is valid
2434  * the context should be valid.
2435  *
2436  * Caller should guarantee that no transactions are in progress before
2437  * switching the callback function.
2438  */
2439 void
2440 ce_recv_cb_register(struct CE_handle *copyeng,
2441 		    CE_recv_cb fn_ptr,
2442 		    void *CE_recv_context, int disable_interrupts)
2443 {
2444 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2445 	struct hif_softc *scn;
2446 	struct HIF_CE_state *hif_state;
2447 
2448 	if (CE_state == NULL) {
2449 		HIF_ERROR("%s: ERROR CE state = NULL", __func__);
2450 		return;
2451 	}
2452 	scn = CE_state->scn;
2453 	hif_state = HIF_GET_CE_STATE(scn);
2454 	if (hif_state == NULL) {
2455 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
2456 		return;
2457 	}
2458 	CE_state->recv_context = CE_recv_context;
2459 	CE_state->recv_cb = fn_ptr;
2460 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
2461 							disable_interrupts);
2462 }
2463 
2464 /**
2465  * ce_watermark_cb_register(): register completion handler
2466  * @copyeng: CE_state representing the ce we are adding the behavior to
2467  * @fn_ptr: callback that the ce should use when processing watermark events
2468  *
2469  * Caller should guarantee that no watermark events are being processed before
2470  * switching the callback function.
2471  */
2472 void
2473 ce_watermark_cb_register(struct CE_handle *copyeng,
2474 			 CE_watermark_cb fn_ptr, void *CE_wm_context)
2475 {
2476 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2477 	struct hif_softc *scn = CE_state->scn;
2478 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2479 
2480 	CE_state->watermark_cb = fn_ptr;
2481 	CE_state->wm_context = CE_wm_context;
2482 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
2483 							0);
2484 	if (fn_ptr)
2485 		CE_state->misc_cbs = 1;
2486 }
2487 
2488 bool ce_get_rx_pending(struct hif_softc *scn)
2489 {
2490 	int CE_id;
2491 
2492 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2493 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2494 
2495 		if (qdf_atomic_read(&CE_state->rx_pending))
2496 			return true;
2497 	}
2498 
2499 	return false;
2500 }
2501 
2502 /**
2503  * ce_check_rx_pending() - ce_check_rx_pending
2504  * @CE_state: context of the copy engine to check
2505  *
2506  * Return: true if there per_engine_service
2507  *	didn't process all the rx descriptors.
2508  */
2509 bool ce_check_rx_pending(struct CE_state *CE_state)
2510 {
2511 	if (qdf_atomic_read(&CE_state->rx_pending))
2512 		return true;
2513 	else
2514 		return false;
2515 }
2516 qdf_export_symbol(ce_check_rx_pending);
2517 
2518 #ifdef IPA_OFFLOAD
2519 /**
2520  * ce_ipa_get_resource() - get uc resource on copyengine
2521  * @ce: copyengine context
2522  * @ce_sr: copyengine source ring resource info
2523  * @ce_sr_ring_size: copyengine source ring size
2524  * @ce_reg_paddr: copyengine register physical address
2525  *
2526  * Copy engine should release resource to micro controller
2527  * Micro controller needs
2528  *  - Copy engine source descriptor base address
2529  *  - Copy engine source descriptor size
2530  *  - PCI BAR address to access copy engine regiser
2531  *
2532  * Return: None
2533  */
2534 void ce_ipa_get_resource(struct CE_handle *ce,
2535 			 qdf_shared_mem_t **ce_sr,
2536 			 uint32_t *ce_sr_ring_size,
2537 			 qdf_dma_addr_t *ce_reg_paddr)
2538 {
2539 	struct CE_state *CE_state = (struct CE_state *)ce;
2540 	uint32_t ring_loop;
2541 	struct CE_src_desc *ce_desc;
2542 	qdf_dma_addr_t phy_mem_base;
2543 	struct hif_softc *scn = CE_state->scn;
2544 
2545 	if (CE_UNUSED == CE_state->state) {
2546 		*qdf_mem_get_dma_addr_ptr(scn->qdf_dev,
2547 			&CE_state->scn->ipa_ce_ring->mem_info) = 0;
2548 		*ce_sr_ring_size = 0;
2549 		return;
2550 	}
2551 
2552 	/* Update default value for descriptor */
2553 	for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
2554 	     ring_loop++) {
2555 		ce_desc = (struct CE_src_desc *)
2556 			  ((char *)CE_state->src_ring->base_addr_owner_space +
2557 			   ring_loop * (sizeof(struct CE_src_desc)));
2558 		CE_IPA_RING_INIT(ce_desc);
2559 	}
2560 
2561 	/* Get BAR address */
2562 	hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
2563 
2564 	*ce_sr = CE_state->scn->ipa_ce_ring;
2565 	*ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries *
2566 		sizeof(struct CE_src_desc));
2567 	*ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
2568 			SR_WR_INDEX_ADDRESS;
2569 }
2570 #endif /* IPA_OFFLOAD */
2571 
2572 static bool ce_check_int_watermark(struct CE_state *CE_state,
2573 				   unsigned int *flags)
2574 {
2575 	uint32_t ce_int_status;
2576 	uint32_t ctrl_addr = CE_state->ctrl_addr;
2577 	struct hif_softc *scn = CE_state->scn;
2578 
2579 	ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
2580 	if (ce_int_status & CE_WATERMARK_MASK) {
2581 		/* Convert HW IS bits to software flags */
2582 		*flags =
2583 			(ce_int_status & CE_WATERMARK_MASK) >>
2584 			CE_WM_SHFT;
2585 		return true;
2586 	}
2587 
2588 	return false;
2589 }
2590 
2591 static void ce_legacy_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
2592 			struct CE_ring_state *src_ring,
2593 			struct CE_attr *attr)
2594 {
2595 	uint32_t ctrl_addr;
2596 	uint64_t dma_addr;
2597 
2598 	QDF_ASSERT(ce_id < scn->ce_count);
2599 	ctrl_addr = CE_BASE_ADDRESS(ce_id);
2600 
2601 	src_ring->hw_index =
2602 		CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2603 	src_ring->sw_index = src_ring->hw_index;
2604 	src_ring->write_index =
2605 		CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2606 	dma_addr = src_ring->base_addr_CE_space;
2607 	CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
2608 			(uint32_t)(dma_addr & 0xFFFFFFFF));
2609 
2610 	/* if SR_BA_ADDRESS_HIGH register exists */
2611 	if (is_register_supported(SR_BA_ADDRESS_HIGH)) {
2612 		uint32_t tmp;
2613 
2614 		tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
2615 				scn, ctrl_addr);
2616 		tmp &= ~0x1F;
2617 		dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
2618 		CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
2619 				ctrl_addr, (uint32_t)dma_addr);
2620 	}
2621 	CE_SRC_RING_SZ_SET(scn, ctrl_addr, src_ring->nentries);
2622 	CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
2623 #ifdef BIG_ENDIAN_HOST
2624 	/* Enable source ring byte swap for big endian host */
2625 	CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
2626 #endif
2627 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
2628 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, src_ring->nentries);
2629 
2630 }
2631 
2632 static void ce_legacy_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
2633 				struct CE_ring_state *dest_ring,
2634 				struct CE_attr *attr)
2635 {
2636 	uint32_t ctrl_addr;
2637 	uint64_t dma_addr;
2638 
2639 	QDF_ASSERT(ce_id < scn->ce_count);
2640 	ctrl_addr = CE_BASE_ADDRESS(ce_id);
2641 	dest_ring->sw_index =
2642 		CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2643 	dest_ring->write_index =
2644 		CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2645 	dma_addr = dest_ring->base_addr_CE_space;
2646 	CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
2647 			(uint32_t)(dma_addr & 0xFFFFFFFF));
2648 
2649 	/* if DR_BA_ADDRESS_HIGH exists */
2650 	if (is_register_supported(DR_BA_ADDRESS_HIGH)) {
2651 		uint32_t tmp;
2652 
2653 		tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
2654 				ctrl_addr);
2655 		tmp &= ~0x1F;
2656 		dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
2657 		CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
2658 				ctrl_addr, (uint32_t)dma_addr);
2659 	}
2660 
2661 	CE_DEST_RING_SZ_SET(scn, ctrl_addr, dest_ring->nentries);
2662 #ifdef BIG_ENDIAN_HOST
2663 	/* Enable Dest ring byte swap for big endian host */
2664 	CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
2665 #endif
2666 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
2667 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, dest_ring->nentries);
2668 }
2669 
2670 static uint32_t ce_get_desc_size_legacy(uint8_t ring_type)
2671 {
2672 	switch (ring_type) {
2673 	case CE_RING_SRC:
2674 		return sizeof(struct CE_src_desc);
2675 	case CE_RING_DEST:
2676 		return sizeof(struct CE_dest_desc);
2677 	case CE_RING_STATUS:
2678 		qdf_assert(0);
2679 		return 0;
2680 	default:
2681 		return 0;
2682 	}
2683 
2684 	return 0;
2685 }
2686 
2687 static int ce_ring_setup_legacy(struct hif_softc *scn, uint8_t ring_type,
2688 		uint32_t ce_id, struct CE_ring_state *ring,
2689 		struct CE_attr *attr)
2690 {
2691 	int status = Q_TARGET_ACCESS_BEGIN(scn);
2692 
2693 	if (status < 0)
2694 		goto out;
2695 
2696 
2697 	switch (ring_type) {
2698 	case CE_RING_SRC:
2699 		ce_legacy_src_ring_setup(scn, ce_id, ring, attr);
2700 		break;
2701 	case CE_RING_DEST:
2702 		ce_legacy_dest_ring_setup(scn, ce_id, ring, attr);
2703 		break;
2704 	case CE_RING_STATUS:
2705 	default:
2706 		qdf_assert(0);
2707 		break;
2708 	}
2709 
2710 	Q_TARGET_ACCESS_END(scn);
2711 out:
2712 	return status;
2713 }
2714 
2715 static void ce_prepare_shadow_register_v2_cfg_legacy(struct hif_softc *scn,
2716 			    struct pld_shadow_reg_v2_cfg **shadow_config,
2717 			    int *num_shadow_registers_configured)
2718 {
2719 	*num_shadow_registers_configured = 0;
2720 	*shadow_config = NULL;
2721 }
2722 
2723 struct ce_ops ce_service_legacy = {
2724 	.ce_get_desc_size = ce_get_desc_size_legacy,
2725 	.ce_ring_setup = ce_ring_setup_legacy,
2726 	.ce_sendlist_send = ce_sendlist_send_legacy,
2727 	.ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_legacy,
2728 	.ce_revoke_recv_next = ce_revoke_recv_next_legacy,
2729 	.ce_cancel_send_next = ce_cancel_send_next_legacy,
2730 	.ce_recv_buf_enqueue = ce_recv_buf_enqueue_legacy,
2731 	.ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_legacy,
2732 	.ce_send_nolock = ce_send_nolock_legacy,
2733 	.watermark_int = ce_check_int_watermark,
2734 	.ce_completed_send_next_nolock = ce_completed_send_next_nolock_legacy,
2735 	.ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_legacy,
2736 	.ce_send_entries_done_nolock = ce_send_entries_done_nolock_legacy,
2737 	.ce_prepare_shadow_register_v2_cfg =
2738 		ce_prepare_shadow_register_v2_cfg_legacy,
2739 };
2740 
2741 
2742 struct ce_ops *ce_services_legacy()
2743 {
2744 	return &ce_service_legacy;
2745 }
2746 
2747 #if HIF_CE_DEBUG_DATA_BUF
2748 /**
2749  * hif_dump_desc_data_buf() - record ce descriptor events
2750  * @buf: buffer to copy to
2751  * @pos: Current position till which the buf is filled
2752  * @data: Data to be copied
2753  * @data_len: Length of the data to be copied
2754  */
2755 static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos,
2756 					uint8_t *data, uint32_t data_len)
2757 {
2758 	pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n",
2759 			CE_DEBUG_MAX_DATA_BUF_SIZE);
2760 
2761 	if ((data_len > 0) && data) {
2762 		if (data_len < 16) {
2763 			hex_dump_to_buffer(data,
2764 						CE_DEBUG_DATA_PER_ROW,
2765 						16, 1, buf + pos,
2766 						(ssize_t)PAGE_SIZE - pos,
2767 						false);
2768 			pos += CE_DEBUG_PRINT_BUF_SIZE(data_len);
2769 			pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
2770 		} else {
2771 			uint32_t rows = (data_len / 16) + 1;
2772 			uint32_t row = 0;
2773 
2774 			for (row = 0; row < rows; row++) {
2775 				hex_dump_to_buffer(data + (row * 16),
2776 							CE_DEBUG_DATA_PER_ROW,
2777 							16, 1, buf + pos,
2778 							(ssize_t)PAGE_SIZE
2779 							- pos, false);
2780 				pos +=
2781 				CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW);
2782 				pos += snprintf(buf + pos, PAGE_SIZE - pos,
2783 						"\n");
2784 			}
2785 		}
2786 	}
2787 
2788 	return pos;
2789 }
2790 #endif
2791 
2792 /*
2793  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
2794  * for defined here
2795  */
2796 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
2797 static const char *ce_event_type_to_str(enum hif_ce_event_type type)
2798 {
2799 	switch (type) {
2800 	case HIF_RX_DESC_POST:
2801 		return "HIF_RX_DESC_POST";
2802 	case HIF_RX_DESC_COMPLETION:
2803 		return "HIF_RX_DESC_COMPLETION";
2804 	case HIF_TX_GATHER_DESC_POST:
2805 		return "HIF_TX_GATHER_DESC_POST";
2806 	case HIF_TX_DESC_POST:
2807 		return "HIF_TX_DESC_POST";
2808 	case HIF_TX_DESC_SOFTWARE_POST:
2809 		return "HIF_TX_DESC_SOFTWARE_POST";
2810 	case HIF_TX_DESC_COMPLETION:
2811 		return "HIF_TX_DESC_COMPLETION";
2812 	case FAST_RX_WRITE_INDEX_UPDATE:
2813 		return "FAST_RX_WRITE_INDEX_UPDATE";
2814 	case FAST_RX_SOFTWARE_INDEX_UPDATE:
2815 		return "FAST_RX_SOFTWARE_INDEX_UPDATE";
2816 	case FAST_TX_WRITE_INDEX_UPDATE:
2817 		return "FAST_TX_WRITE_INDEX_UPDATE";
2818 	case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE:
2819 		return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE";
2820 	case FAST_TX_SOFTWARE_INDEX_UPDATE:
2821 		return "FAST_TX_SOFTWARE_INDEX_UPDATE";
2822 	case RESUME_WRITE_INDEX_UPDATE:
2823 		return "RESUME_WRITE_INDEX_UPDATE";
2824 	case HIF_IRQ_EVENT:
2825 		return "HIF_IRQ_EVENT";
2826 	case HIF_CE_TASKLET_ENTRY:
2827 		return "HIF_CE_TASKLET_ENTRY";
2828 	case HIF_CE_TASKLET_RESCHEDULE:
2829 		return "HIF_CE_TASKLET_RESCHEDULE";
2830 	case HIF_CE_TASKLET_EXIT:
2831 		return "HIF_CE_TASKLET_EXIT";
2832 	case HIF_CE_REAP_ENTRY:
2833 		return "HIF_CE_REAP_ENTRY";
2834 	case HIF_CE_REAP_EXIT:
2835 		return "HIF_CE_REAP_EXIT";
2836 	case NAPI_SCHEDULE:
2837 		return "NAPI_SCHEDULE";
2838 	case NAPI_POLL_ENTER:
2839 		return "NAPI_POLL_ENTER";
2840 	case NAPI_COMPLETE:
2841 		return "NAPI_COMPLETE";
2842 	case NAPI_POLL_EXIT:
2843 		return "NAPI_POLL_EXIT";
2844 	case HIF_RX_NBUF_ALLOC_FAILURE:
2845 		return "HIF_RX_NBUF_ALLOC_FAILURE";
2846 	case HIF_RX_NBUF_MAP_FAILURE:
2847 		return "HIF_RX_NBUF_MAP_FAILURE";
2848 	case HIF_RX_NBUF_ENQUEUE_FAILURE:
2849 		return "HIF_RX_NBUF_ENQUEUE_FAILURE";
2850 	default:
2851 		return "invalid";
2852 	}
2853 }
2854 
2855 /**
2856  * hif_dump_desc_event() - record ce descriptor events
2857  * @buf: Buffer to which to be copied
2858  * @ce_id: which ce is the event occurring on
2859  * @index: index that the descriptor was/will be at.
2860  */
2861 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf)
2862 {
2863 	struct hif_ce_desc_event *event;
2864 	uint64_t secs, usecs;
2865 	ssize_t len = 0;
2866 	struct ce_desc_hist *ce_hist = NULL;
2867 	struct hif_ce_desc_event *hist_ev = NULL;
2868 
2869 	if (!scn)
2870 		return -EINVAL;
2871 
2872 	ce_hist = &scn->hif_ce_desc_hist;
2873 
2874 	hist_ev =
2875 		(struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id];
2876 
2877 	if (!hist_ev) {
2878 		qdf_print("Low Memory");
2879 		return -EINVAL;
2880 	}
2881 
2882 	event = &hist_ev[ce_hist->hist_index];
2883 
2884 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
2885 		(ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
2886 		qdf_print("Invalid values");
2887 		return -EINVAL;
2888 	}
2889 
2890 	qdf_log_timestamp_to_secs(event->time, &secs, &usecs);
2891 
2892 	len += snprintf(buf, PAGE_SIZE - len,
2893 			"\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK",
2894 			secs, usecs, ce_hist->hist_id,
2895 			ce_event_type_to_str(event->type),
2896 			event->index, event->memory);
2897 #if HIF_CE_DEBUG_DATA_BUF
2898 	len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%d",
2899 			event->actual_data_len);
2900 #endif
2901 
2902 	len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: ");
2903 
2904 	hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc),
2905 				16, 1, buf + len,
2906 				(ssize_t)PAGE_SIZE - len, false);
2907 	len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc));
2908 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
2909 
2910 #if HIF_CE_DEBUG_DATA_BUF
2911 	if (ce_hist->data_enable[ce_hist->hist_id])
2912 		len = hif_dump_desc_data_buf(buf, len, event->data,
2913 						(event->actual_data_len <
2914 						 CE_DEBUG_MAX_DATA_BUF_SIZE) ?
2915 						event->actual_data_len :
2916 						CE_DEBUG_MAX_DATA_BUF_SIZE);
2917 #endif /*HIF_CE_DEBUG_DATA_BUF*/
2918 
2919 	len += snprintf(buf + len, PAGE_SIZE - len, "END\n");
2920 
2921 	return len;
2922 }
2923 
2924 /*
2925  * hif_store_desc_trace_buf_index() -
2926  * API to get the CE id and CE debug storage buffer index
2927  *
2928  * @dev: network device
2929  * @attr: sysfs attribute
2930  * @buf: data got from the user
2931  *
2932  * Return total length
2933  */
2934 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
2935 					const char *buf, size_t size)
2936 {
2937 	struct ce_desc_hist *ce_hist = NULL;
2938 
2939 	if (!scn)
2940 		return -EINVAL;
2941 
2942 	ce_hist = &scn->hif_ce_desc_hist;
2943 
2944 	if (!size) {
2945 		pr_err("%s: Invalid input buffer.\n", __func__);
2946 		return -EINVAL;
2947 	}
2948 
2949 	if (sscanf(buf, "%d %d", &ce_hist->hist_id,
2950 			&ce_hist->hist_index) != 2) {
2951 		pr_err("%s: Invalid input value.\n", __func__);
2952 		return -EINVAL;
2953 	}
2954 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
2955 	   (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
2956 		qdf_print("Invalid values");
2957 		return -EINVAL;
2958 	}
2959 
2960 	return size;
2961 }
2962 
2963 #endif  /*For MCL,  HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
2964 
2965 #if HIF_CE_DEBUG_DATA_BUF
2966 /*
2967  * hif_ce_en_desc_hist() -
2968  * API to enable recording the CE desc history
2969  *
2970  * @dev: network device
2971  * @attr: sysfs attribute
2972  * @buf: buffer to copy the data.
2973  *
2974  * Starts recording the ce desc history
2975  *
2976  * Return total length copied
2977  */
2978 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size)
2979 {
2980 	struct ce_desc_hist *ce_hist = NULL;
2981 	uint32_t cfg = 0;
2982 	uint32_t ce_id = 0;
2983 
2984 	if (!scn)
2985 		return -EINVAL;
2986 
2987 	ce_hist = &scn->hif_ce_desc_hist;
2988 
2989 	if (!size) {
2990 		pr_err("%s: Invalid input buffer.\n", __func__);
2991 		return -EINVAL;
2992 	}
2993 
2994 	if (sscanf(buf, "%d %d", &ce_id, &cfg) != 2) {
2995 		pr_err("%s: Invalid input: Enter CE Id<sp><1/0>.\n", __func__);
2996 		return -EINVAL;
2997 	}
2998 	if (ce_id >= CE_COUNT_MAX) {
2999 		qdf_print("Invalid value CE Id");
3000 		return -EINVAL;
3001 	}
3002 
3003 	if ((cfg > 1 || cfg < 0)) {
3004 		qdf_print("Invalid values: enter 0 or 1");
3005 		return -EINVAL;
3006 	}
3007 
3008 	if (!ce_hist->hist_ev[ce_id])
3009 		return -EINVAL;
3010 
3011 	qdf_mutex_acquire(&ce_dbg_datamem_lock[ce_id]);
3012 	if (cfg == 1) {
3013 		if (ce_hist->data_enable[ce_id] == 1) {
3014 			qdf_print("\nAlready Enabled");
3015 		} else {
3016 			if (alloc_mem_ce_debug_hist_data(scn, ce_id)
3017 							== QDF_STATUS_E_NOMEM){
3018 				ce_hist->data_enable[ce_id] = 0;
3019 				qdf_print("%s:Memory Alloc failed");
3020 			} else
3021 				ce_hist->data_enable[ce_id] = 1;
3022 		}
3023 	} else if (cfg == 0) {
3024 		if (ce_hist->data_enable[ce_id] == 0) {
3025 			qdf_print("\nAlready Disabled");
3026 		} else {
3027 			ce_hist->data_enable[ce_id] = 0;
3028 				free_mem_ce_debug_hist_data(scn, ce_id);
3029 		}
3030 	}
3031 	qdf_mutex_release(&ce_dbg_datamem_lock[ce_id]);
3032 
3033 	return size;
3034 }
3035 
3036 /*
3037  * hif_disp_ce_enable_desc_data_hist() -
3038  * API to display value of data_enable
3039  *
3040  * @dev: network device
3041  * @attr: sysfs attribute
3042  * @buf: buffer to copy the data.
3043  *
3044  * Return total length copied
3045  */
3046 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf)
3047 {
3048 	ssize_t len = 0;
3049 	uint32_t ce_id = 0;
3050 	struct ce_desc_hist *ce_hist = NULL;
3051 
3052 	if (!scn)
3053 		return -EINVAL;
3054 
3055 	ce_hist = &scn->hif_ce_desc_hist;
3056 
3057 	for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
3058 		len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n",
3059 				ce_id, ce_hist->data_enable[ce_id]);
3060 	}
3061 
3062 	return len;
3063 }
3064 #endif /* HIF_CE_DEBUG_DATA_BUF */
3065 
3066 #ifdef OL_ATH_SMART_LOGGING
3067 #define GUARD_SPACE 10
3068 #define LOG_ID_SZ 4
3069 /*
3070  * hif_log_src_ce_dump() - Copy all the CE SRC ring to buf
3071  * @src_ring: SRC ring state
3072  * @buf_cur: Current pointer in ring buffer
3073  * @buf_init:Start of the ring buffer
3074  * @buf_sz: Size of the ring buffer
3075  * @skb_sz: Max size of the SKB buffer to be copied
3076  *
3077  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
3078  * the given buf, skb_sz is the max buffer size to be copied
3079  *
3080  * Return: Current pointer in ring buffer
3081  */
3082 static uint8_t *hif_log_src_ce_dump(struct CE_ring_state *src_ring,
3083 				    uint8_t *buf_cur, uint8_t *buf_init,
3084 				    uint32_t buf_sz, uint32_t skb_sz)
3085 {
3086 	struct CE_src_desc *src_ring_base;
3087 	uint32_t len, entry;
3088 	struct CE_src_desc  *src_desc;
3089 	qdf_nbuf_t nbuf;
3090 	uint32_t available_buf;
3091 
3092 	src_ring_base = (struct CE_src_desc *)src_ring->base_addr_owner_space;
3093 	len = sizeof(struct CE_ring_state);
3094 	available_buf = buf_sz - (buf_cur - buf_init);
3095 	if (available_buf < (len + GUARD_SPACE)) {
3096 		buf_cur = buf_init;
3097 	}
3098 
3099 	qdf_mem_copy(buf_cur, src_ring, sizeof(struct CE_ring_state));
3100 	buf_cur += sizeof(struct CE_ring_state);
3101 
3102 	for (entry = 0; entry < src_ring->nentries; entry++) {
3103 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, entry);
3104 		nbuf = src_ring->per_transfer_context[entry];
3105 		if (nbuf) {
3106 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
3107 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
3108 
3109 			len = sizeof(struct CE_src_desc) + skb_cp_len
3110 				+ LOG_ID_SZ + sizeof(skb_cp_len);
3111 			available_buf = buf_sz - (buf_cur - buf_init);
3112 			if (available_buf < (len + GUARD_SPACE)) {
3113 				buf_cur = buf_init;
3114 			}
3115 			qdf_mem_copy(buf_cur, src_desc,
3116 				     sizeof(struct CE_src_desc));
3117 			buf_cur += sizeof(struct CE_src_desc);
3118 
3119 			available_buf = buf_sz - (buf_cur - buf_init);
3120 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
3121 						skb_cp_len);
3122 
3123 			if (skb_cp_len) {
3124 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
3125 					     skb_cp_len);
3126 				buf_cur += skb_cp_len;
3127 			}
3128 		} else {
3129 			len = sizeof(struct CE_src_desc) + LOG_ID_SZ;
3130 			available_buf = buf_sz - (buf_cur - buf_init);
3131 			if (available_buf < (len + GUARD_SPACE)) {
3132 				buf_cur = buf_init;
3133 			}
3134 			qdf_mem_copy(buf_cur, src_desc,
3135 				     sizeof(struct CE_src_desc));
3136 			buf_cur += sizeof(struct CE_src_desc);
3137 			available_buf = buf_sz - (buf_cur - buf_init);
3138 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
3139 		}
3140 	}
3141 
3142 	return buf_cur;
3143 }
3144 
3145 /*
3146  * hif_log_dest_ce_dump() - Copy all the CE DEST ring to buf
3147  * @dest_ring: SRC ring state
3148  * @buf_cur: Current pointer in ring buffer
3149  * @buf_init:Start of the ring buffer
3150  * @buf_sz: Size of the ring buffer
3151  * @skb_sz: Max size of the SKB buffer to be copied
3152  *
3153  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
3154  * the given buf, skb_sz is the max buffer size to be copied
3155  *
3156  * Return: Current pointer in ring buffer
3157  */
3158 static uint8_t *hif_log_dest_ce_dump(struct CE_ring_state *dest_ring,
3159 				     uint8_t *buf_cur, uint8_t *buf_init,
3160 				     uint32_t buf_sz, uint32_t skb_sz)
3161 {
3162 	struct CE_dest_desc *dest_ring_base;
3163 	uint32_t len, entry;
3164 	struct CE_dest_desc  *dest_desc;
3165 	qdf_nbuf_t nbuf;
3166 	uint32_t available_buf;
3167 
3168 	dest_ring_base =
3169 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
3170 
3171 	len = sizeof(struct CE_ring_state);
3172 	available_buf = buf_sz - (buf_cur - buf_init);
3173 	if (available_buf < (len + GUARD_SPACE)) {
3174 		buf_cur = buf_init;
3175 	}
3176 
3177 	qdf_mem_copy(buf_cur, dest_ring, sizeof(struct CE_ring_state));
3178 	buf_cur += sizeof(struct CE_ring_state);
3179 
3180 	for (entry = 0; entry < dest_ring->nentries; entry++) {
3181 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, entry);
3182 
3183 		nbuf = dest_ring->per_transfer_context[entry];
3184 		if (nbuf) {
3185 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
3186 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
3187 
3188 			len = sizeof(struct CE_dest_desc) + skb_cp_len
3189 				+ LOG_ID_SZ + sizeof(skb_cp_len);
3190 
3191 			available_buf = buf_sz - (buf_cur - buf_init);
3192 			if (available_buf < (len + GUARD_SPACE)) {
3193 				buf_cur = buf_init;
3194 			}
3195 
3196 			qdf_mem_copy(buf_cur, dest_desc,
3197 				     sizeof(struct CE_dest_desc));
3198 			buf_cur += sizeof(struct CE_dest_desc);
3199 			available_buf = buf_sz - (buf_cur - buf_init);
3200 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
3201 						skb_cp_len);
3202 			if (skb_cp_len) {
3203 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
3204 					     skb_cp_len);
3205 				buf_cur += skb_cp_len;
3206 			}
3207 		} else {
3208 			len = sizeof(struct CE_dest_desc) + LOG_ID_SZ;
3209 			available_buf = buf_sz - (buf_cur - buf_init);
3210 			if (available_buf < (len + GUARD_SPACE)) {
3211 				buf_cur = buf_init;
3212 			}
3213 			qdf_mem_copy(buf_cur, dest_desc,
3214 				     sizeof(struct CE_dest_desc));
3215 			buf_cur += sizeof(struct CE_dest_desc);
3216 			available_buf = buf_sz - (buf_cur - buf_init);
3217 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
3218 		}
3219 	}
3220 	return buf_cur;
3221 }
3222 
3223 /**
3224  * hif_log_ce_dump() - Copy all the CE DEST ring to buf
3225  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
3226  * and buffers pointed by them in to the given buf
3227  */
3228 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
3229 			 uint8_t *buf_init, uint32_t buf_sz,
3230 			 uint32_t ce, uint32_t skb_sz)
3231 {
3232 	struct CE_state *ce_state;
3233 	struct CE_ring_state *src_ring;
3234 	struct CE_ring_state *dest_ring;
3235 
3236 	ce_state = scn->ce_id_to_state[ce];
3237 	src_ring = ce_state->src_ring;
3238 	dest_ring = ce_state->dest_ring;
3239 
3240 	if (src_ring) {
3241 		buf_cur = hif_log_src_ce_dump(src_ring, buf_cur,
3242 					      buf_init, buf_sz, skb_sz);
3243 	} else if (dest_ring) {
3244 		buf_cur = hif_log_dest_ce_dump(dest_ring, buf_cur,
3245 					       buf_init, buf_sz, skb_sz);
3246 	}
3247 
3248 	return buf_cur;
3249 }
3250 #endif /* OL_ATH_SMART_LOGGING */
3251 
3252