xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service.c (revision 92d87f51612f6c3b2285266215edee8911647c2f)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hif.h"
20 #include "hif_io32.h"
21 #include "ce_api.h"
22 #include "ce_main.h"
23 #include "ce_internal.h"
24 #include "ce_reg.h"
25 #include "qdf_lock.h"
26 #include "regtable.h"
27 #include "hif_main.h"
28 #include "hif_debug.h"
29 #include "hif_napi.h"
30 #include "qdf_module.h"
31 
32 #ifdef IPA_OFFLOAD
33 #ifdef QCA_WIFI_3_0
34 #define CE_IPA_RING_INIT(ce_desc)                       \
35 	do {                                            \
36 		ce_desc->gather = 0;                    \
37 		ce_desc->enable_11h = 0;                \
38 		ce_desc->meta_data_low = 0;             \
39 		ce_desc->packet_result_offset = 64;     \
40 		ce_desc->toeplitz_hash_enable = 0;      \
41 		ce_desc->addr_y_search_disable = 0;     \
42 		ce_desc->addr_x_search_disable = 0;     \
43 		ce_desc->misc_int_disable = 0;          \
44 		ce_desc->target_int_disable = 0;        \
45 		ce_desc->host_int_disable = 0;          \
46 		ce_desc->dest_byte_swap = 0;            \
47 		ce_desc->byte_swap = 0;                 \
48 		ce_desc->type = 2;                      \
49 		ce_desc->tx_classify = 1;               \
50 		ce_desc->buffer_addr_hi = 0;            \
51 		ce_desc->meta_data = 0;                 \
52 		ce_desc->nbytes = 128;                  \
53 	} while (0)
54 #else
55 #define CE_IPA_RING_INIT(ce_desc)                       \
56 	do {                                            \
57 		ce_desc->byte_swap = 0;                 \
58 		ce_desc->nbytes = 60;                   \
59 		ce_desc->gather = 0;                    \
60 	} while (0)
61 #endif /* QCA_WIFI_3_0 */
62 #endif /* IPA_OFFLOAD */
63 
64 #ifndef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
65 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr)				\
66 	do {                                            		\
67 		x = CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, addr); 	\
68 	} while (0);
69 #else
70 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr)
71 #endif
72 
73 static int war1_allow_sleep;
74 /* io32 write workaround */
75 static int hif_ce_war1;
76 
77 /**
78  * hif_ce_war_disable() - disable ce war gobally
79  */
80 void hif_ce_war_disable(void)
81 {
82 	hif_ce_war1 = 0;
83 }
84 
85 /**
86  * hif_ce_war_enable() - enable ce war gobally
87  */
88 void hif_ce_war_enable(void)
89 {
90 	hif_ce_war1 = 1;
91 }
92 
93 /*
94  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
95  * for defined here
96  */
97 #if HIF_CE_DEBUG_DATA_BUF
98 
99 #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1)
100 #define CE_DEBUG_DATA_PER_ROW 16
101 
102 qdf_mutex_t ce_dbg_datamem_lock[CE_COUNT_MAX];
103 static const char *ce_event_type_to_str(enum hif_ce_event_type type);
104 
105 /**
106  * get_next_record_index() - get the next record index
107  * @table_index: atomic index variable to increment
108  * @array_size: array size of the circular buffer
109  *
110  * Increment the atomic index and reserve the value.
111  * Takes care of buffer wrap.
112  * Guaranteed to be thread safe as long as fewer than array_size contexts
113  * try to access the array.  If there are more than array_size contexts
114  * trying to access the array, full locking of the recording process would
115  * be needed to have sane logging.
116  */
117 static int get_next_record_index(qdf_atomic_t *table_index, int array_size)
118 {
119 	int record_index = qdf_atomic_inc_return(table_index);
120 
121 	if (record_index == array_size)
122 		qdf_atomic_sub(array_size, table_index);
123 
124 	while (record_index >= array_size)
125 		record_index -= array_size;
126 	return record_index;
127 }
128 
129 #if HIF_CE_DEBUG_DATA_BUF
130 /**
131  * hif_ce_desc_data_record() - Record data pointed by the CE descriptor
132  * @event: structure detailing a ce event
133  * @len: length of the data
134  * Return:
135  */
136 static void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
137 {
138 	uint8_t *data = NULL;
139 
140 	if (!event->data)
141 		return;
142 
143 	if (event->memory && len > 0)
144 		data = qdf_nbuf_data((qdf_nbuf_t)event->memory);
145 
146 	event->actual_data_len = 0;
147 	qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE);
148 
149 	if (data && len > 0) {
150 		qdf_mem_copy(event->data, data,
151 				((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ?
152 				 len : CE_DEBUG_MAX_DATA_BUF_SIZE));
153 		event->actual_data_len = len;
154 	}
155 }
156 #endif
157 
158 /**
159  * hif_record_ce_desc_event() - record ce descriptor events
160  * @scn: hif_softc
161  * @ce_id: which ce is the event occurring on
162  * @type: what happened
163  * @descriptor: pointer to the descriptor posted/completed
164  * @memory: virtual address of buffer related to the descriptor
165  * @index: index that the descriptor was/will be at.
166  */
167 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
168 				enum hif_ce_event_type type,
169 				union ce_desc *descriptor,
170 				void *memory, int index,
171 				int len)
172 {
173 	int record_index;
174 	struct hif_ce_desc_event *event;
175 
176 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
177 	struct hif_ce_desc_event *hist_ev = NULL;
178 
179 	if (ce_id < CE_COUNT_MAX)
180 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
181 	else
182 		return;
183 
184 	if (ce_id >= CE_COUNT_MAX)
185 		return;
186 
187 	if (!ce_hist->enable[ce_id])
188 		return;
189 
190 	if (!hist_ev)
191 		return;
192 
193 	record_index = get_next_record_index(
194 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
195 
196 	event = &hist_ev[record_index];
197 
198 	event->type = type;
199 	event->time = qdf_get_log_timestamp();
200 
201 	if (descriptor != NULL) {
202 		qdf_mem_copy(&event->descriptor, descriptor, sizeof(union ce_desc));
203 	} else {
204 		qdf_mem_zero(&event->descriptor, sizeof(union ce_desc));
205 	}
206 
207 	event->memory = memory;
208 	event->index = index;
209 
210 #if HIF_CE_DEBUG_DATA_BUF
211 	if (ce_hist->data_enable[ce_id])
212 		hif_ce_desc_data_record(event, len);
213 #endif
214 }
215 qdf_export_symbol(hif_record_ce_desc_event);
216 
217 /**
218  * ce_init_ce_desc_event_log() - initialize the ce event log
219  * @ce_id: copy engine id for which we are initializing the log
220  * @size: size of array to dedicate
221  *
222  * Currently the passed size is ignored in favor of a precompiled value.
223  */
224 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size)
225 {
226 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
227 	qdf_atomic_init(&ce_hist->history_index[ce_id]);
228 	qdf_mutex_create(&ce_dbg_datamem_lock[ce_id]);
229 }
230 
231 /**
232  * ce_deinit_ce_desc_event_log() - deinitialize the ce event log
233  * @ce_id: copy engine id for which we are deinitializing the log
234  *
235  */
236 inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
237 {
238 	qdf_mutex_destroy(&ce_dbg_datamem_lock[ce_id]);
239 }
240 
241 #else /* Note: For MCL, (HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
242 void hif_record_ce_desc_event(struct hif_softc *scn,
243 		int ce_id, enum hif_ce_event_type type,
244 		union ce_desc *descriptor, void *memory,
245 		int index, int len)
246 {
247 }
248 qdf_export_symbol(hif_record_ce_desc_event);
249 
250 inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id,
251 					int size)
252 {
253 }
254 
255 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
256 {
257 }
258 #endif /* Note: for MCL, HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
259 
260 #ifdef NAPI_YIELD_BUDGET_BASED
261 bool hif_ce_service_should_yield(struct hif_softc *scn,
262 				 struct CE_state *ce_state)
263 {
264 	bool yield =  hif_max_num_receives_reached(scn, ce_state->receive_count);
265 	return yield;
266 }
267 #else
268 /**
269  * hif_ce_service_should_yield() - return true if the service is hogging the cpu
270  * @scn: hif context
271  * @ce_state: context of the copy engine being serviced
272  *
273  * Return: true if the service should yield
274  */
275 bool hif_ce_service_should_yield(struct hif_softc *scn,
276 				 struct CE_state *ce_state)
277 {
278 	bool yield, time_limit_reached, rxpkt_thresh_reached = 0;
279 
280 	time_limit_reached =
281 		sched_clock() > ce_state->ce_service_yield_time ? 1 : 0;
282 
283 	if (!time_limit_reached)
284 		rxpkt_thresh_reached = hif_max_num_receives_reached
285 					(scn, ce_state->receive_count);
286 
287 	yield =  time_limit_reached || rxpkt_thresh_reached;
288 
289 	if (yield && ce_state->htt_rx_data)
290 		hif_napi_update_yield_stats(ce_state,
291 					    time_limit_reached,
292 					    rxpkt_thresh_reached);
293 	return yield;
294 }
295 #endif
296 /*
297  * Support for Copy Engine hardware, which is mainly used for
298  * communication between Host and Target over a PCIe interconnect.
299  */
300 
301 /*
302  * A single CopyEngine (CE) comprises two "rings":
303  *   a source ring
304  *   a destination ring
305  *
306  * Each ring consists of a number of descriptors which specify
307  * an address, length, and meta-data.
308  *
309  * Typically, one side of the PCIe interconnect (Host or Target)
310  * controls one ring and the other side controls the other ring.
311  * The source side chooses when to initiate a transfer and it
312  * chooses what to send (buffer address, length). The destination
313  * side keeps a supply of "anonymous receive buffers" available and
314  * it handles incoming data as it arrives (when the destination
315  * receives an interrupt).
316  *
317  * The sender may send a simple buffer (address/length) or it may
318  * send a small list of buffers.  When a small list is sent, hardware
319  * "gathers" these and they end up in a single destination buffer
320  * with a single interrupt.
321  *
322  * There are several "contexts" managed by this layer -- more, it
323  * may seem -- than should be needed. These are provided mainly for
324  * maximum flexibility and especially to facilitate a simpler HIF
325  * implementation. There are per-CopyEngine recv, send, and watermark
326  * contexts. These are supplied by the caller when a recv, send,
327  * or watermark handler is established and they are echoed back to
328  * the caller when the respective callbacks are invoked. There is
329  * also a per-transfer context supplied by the caller when a buffer
330  * (or sendlist) is sent and when a buffer is enqueued for recv.
331  * These per-transfer contexts are echoed back to the caller when
332  * the buffer is sent/received.
333  * Target TX harsh result toeplitz_hash_result
334  */
335 
336 /*
337  * Guts of ce_send, used by both ce_send and ce_sendlist_send.
338  * The caller takes responsibility for any needed locking.
339  */
340 
341 static
342 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
343 				   u32 ctrl_addr, unsigned int write_index)
344 {
345 	if (hif_ce_war1) {
346 		void __iomem *indicator_addr;
347 
348 		indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
349 
350 		if (!war1_allow_sleep
351 		    && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
352 			hif_write32_mb(indicator_addr,
353 				      (CDC_WAR_MAGIC_STR | write_index));
354 		} else {
355 			unsigned long irq_flags;
356 
357 			local_irq_save(irq_flags);
358 			hif_write32_mb(indicator_addr, 1);
359 
360 			/*
361 			 * PCIE write waits for ACK in IPQ8K, there is no
362 			 * need to read back value.
363 			 */
364 			(void)hif_read32_mb(indicator_addr);
365 			(void)hif_read32_mb(indicator_addr); /* conservative */
366 
367 			CE_SRC_RING_WRITE_IDX_SET(scn,
368 						  ctrl_addr, write_index);
369 
370 			hif_write32_mb(indicator_addr, 0);
371 			local_irq_restore(irq_flags);
372 		}
373 	} else {
374 		CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
375 	}
376 }
377 
378 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
379 /**
380  * ce_validate_nbytes() - validate nbytes for slub builds on tx descriptors
381  * @nbytes: nbytes value being written into a send descriptor
382  * @ce_state: context of the copy engine
383 
384  * nbytes should be non-zero and less than max configured for the copy engine
385  *
386  * Return: none
387  */
388 static void ce_validate_nbytes(uint32_t nbytes, struct CE_state *ce_state)
389 {
390 	if (nbytes <= 0 || nbytes > ce_state->src_sz_max)
391 		QDF_BUG(0);
392 }
393 #else
394 static void ce_validate_nbytes(uint32_t nbytes, struct CE_state *ce_state)
395 {
396 }
397 #endif
398 
399 static int
400 ce_send_nolock_legacy(struct CE_handle *copyeng,
401 			   void *per_transfer_context,
402 			   qdf_dma_addr_t buffer,
403 			   uint32_t nbytes,
404 			   uint32_t transfer_id,
405 			   uint32_t flags,
406 			   uint32_t user_flags)
407 {
408 	int status;
409 	struct CE_state *CE_state = (struct CE_state *)copyeng;
410 	struct CE_ring_state *src_ring = CE_state->src_ring;
411 	uint32_t ctrl_addr = CE_state->ctrl_addr;
412 	unsigned int nentries_mask = src_ring->nentries_mask;
413 	unsigned int sw_index = src_ring->sw_index;
414 	unsigned int write_index = src_ring->write_index;
415 	uint64_t dma_addr = buffer;
416 	struct hif_softc *scn = CE_state->scn;
417 
418 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
419 		return QDF_STATUS_E_FAILURE;
420 	if (unlikely(CE_RING_DELTA(nentries_mask,
421 				write_index, sw_index - 1) <= 0)) {
422 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
423 		Q_TARGET_ACCESS_END(scn);
424 		return QDF_STATUS_E_FAILURE;
425 	}
426 	{
427 		enum hif_ce_event_type event_type;
428 		struct CE_src_desc *src_ring_base =
429 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
430 		struct CE_src_desc *shadow_base =
431 			(struct CE_src_desc *)src_ring->shadow_base;
432 		struct CE_src_desc *src_desc =
433 			CE_SRC_RING_TO_DESC(src_ring_base, write_index);
434 		struct CE_src_desc *shadow_src_desc =
435 			CE_SRC_RING_TO_DESC(shadow_base, write_index);
436 
437 		/* Update low 32 bits source descriptor address */
438 		shadow_src_desc->buffer_addr =
439 			(uint32_t)(dma_addr & 0xFFFFFFFF);
440 #ifdef QCA_WIFI_3_0
441 		shadow_src_desc->buffer_addr_hi =
442 			(uint32_t)((dma_addr >> 32) & 0x1F);
443 		user_flags |= shadow_src_desc->buffer_addr_hi;
444 		memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
445 			   sizeof(uint32_t));
446 #endif
447 		shadow_src_desc->target_int_disable = 0;
448 		shadow_src_desc->host_int_disable = 0;
449 
450 		shadow_src_desc->meta_data = transfer_id;
451 
452 		/*
453 		 * Set the swap bit if:
454 		 * typical sends on this CE are swapped (host is big-endian)
455 		 * and this send doesn't disable the swapping
456 		 * (data is not bytestream)
457 		 */
458 		shadow_src_desc->byte_swap =
459 			(((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
460 			 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
461 		shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
462 		shadow_src_desc->nbytes = nbytes;
463 		ce_validate_nbytes(nbytes, CE_state);
464 
465 		*src_desc = *shadow_src_desc;
466 
467 		src_ring->per_transfer_context[write_index] =
468 			per_transfer_context;
469 
470 		/* Update Source Ring Write Index */
471 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
472 
473 		/* WORKAROUND */
474 		if (shadow_src_desc->gather) {
475 			event_type = HIF_TX_GATHER_DESC_POST;
476 		} else if (qdf_unlikely(CE_state->state != CE_RUNNING)) {
477 			event_type = HIF_TX_DESC_SOFTWARE_POST;
478 			CE_state->state = CE_PENDING;
479 		} else {
480 			event_type = HIF_TX_DESC_POST;
481 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
482 						      write_index);
483 		}
484 
485 		/* src_ring->write index hasn't been updated event though
486 		 * the register has allready been written to.
487 		 */
488 		hif_record_ce_desc_event(scn, CE_state->id, event_type,
489 			(union ce_desc *) shadow_src_desc, per_transfer_context,
490 			src_ring->write_index, nbytes);
491 
492 		src_ring->write_index = write_index;
493 		status = QDF_STATUS_SUCCESS;
494 	}
495 	Q_TARGET_ACCESS_END(scn);
496 	return status;
497 }
498 
499 int
500 ce_send(struct CE_handle *copyeng,
501 		void *per_transfer_context,
502 		qdf_dma_addr_t buffer,
503 		uint32_t nbytes,
504 		uint32_t transfer_id,
505 		uint32_t flags,
506 		uint32_t user_flag)
507 {
508 	struct CE_state *CE_state = (struct CE_state *)copyeng;
509 	int status;
510 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
511 
512 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
513 	status = hif_state->ce_services->ce_send_nolock(copyeng,
514 			per_transfer_context, buffer, nbytes,
515 			transfer_id, flags, user_flag);
516 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
517 
518 	return status;
519 }
520 
521 unsigned int ce_sendlist_sizeof(void)
522 {
523 	return sizeof(struct ce_sendlist);
524 }
525 
526 void ce_sendlist_init(struct ce_sendlist *sendlist)
527 {
528 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
529 
530 	sl->num_items = 0;
531 }
532 
533 int
534 ce_sendlist_buf_add(struct ce_sendlist *sendlist,
535 					qdf_dma_addr_t buffer,
536 					uint32_t nbytes,
537 					uint32_t flags,
538 					uint32_t user_flags)
539 {
540 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
541 	unsigned int num_items = sl->num_items;
542 	struct ce_sendlist_item *item;
543 
544 	if (num_items >= CE_SENDLIST_ITEMS_MAX) {
545 		QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
546 		return QDF_STATUS_E_RESOURCES;
547 	}
548 
549 	item = &sl->item[num_items];
550 	item->send_type = CE_SIMPLE_BUFFER_TYPE;
551 	item->data = buffer;
552 	item->u.nbytes = nbytes;
553 	item->flags = flags;
554 	item->user_flags = user_flags;
555 	sl->num_items = num_items + 1;
556 	return QDF_STATUS_SUCCESS;
557 }
558 
559 int
560 ce_sendlist_send(struct CE_handle *copyeng,
561 		 void *per_transfer_context,
562 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
563 {
564 	struct CE_state *CE_state = (struct CE_state *)copyeng;
565 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
566 
567 	return hif_state->ce_services->ce_sendlist_send(copyeng,
568 			per_transfer_context, sendlist, transfer_id);
569 }
570 
571 static int
572 ce_sendlist_send_legacy(struct CE_handle *copyeng,
573 		 void *per_transfer_context,
574 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
575 {
576 	int status = -ENOMEM;
577 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
578 	struct CE_state *CE_state = (struct CE_state *)copyeng;
579 	struct CE_ring_state *src_ring = CE_state->src_ring;
580 	unsigned int nentries_mask = src_ring->nentries_mask;
581 	unsigned int num_items = sl->num_items;
582 	unsigned int sw_index;
583 	unsigned int write_index;
584 	struct hif_softc *scn = CE_state->scn;
585 
586 	QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
587 
588 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
589 
590 	if (CE_state->scn->fastpath_mode_on && CE_state->htt_tx_data &&
591 	    Q_TARGET_ACCESS_BEGIN(scn) == 0) {
592 		src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR(
593 					       scn, CE_state->ctrl_addr);
594 		Q_TARGET_ACCESS_END(scn);
595 	}
596 
597 	sw_index = src_ring->sw_index;
598 	write_index = src_ring->write_index;
599 
600 	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
601 	    num_items) {
602 		struct ce_sendlist_item *item;
603 		int i;
604 
605 		/* handle all but the last item uniformly */
606 		for (i = 0; i < num_items - 1; i++) {
607 			item = &sl->item[i];
608 			/* TBDXXX: Support extensible sendlist_types? */
609 			QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
610 			status = ce_send_nolock_legacy(copyeng,
611 				CE_SENDLIST_ITEM_CTXT,
612 				(qdf_dma_addr_t) item->data,
613 				item->u.nbytes, transfer_id,
614 				item->flags | CE_SEND_FLAG_GATHER,
615 				item->user_flags);
616 			QDF_ASSERT(status == QDF_STATUS_SUCCESS);
617 		}
618 		/* provide valid context pointer for final item */
619 		item = &sl->item[i];
620 		/* TBDXXX: Support extensible sendlist_types? */
621 		QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
622 		status = ce_send_nolock_legacy(copyeng, per_transfer_context,
623 					(qdf_dma_addr_t) item->data,
624 					item->u.nbytes,
625 					transfer_id, item->flags,
626 					item->user_flags);
627 		QDF_ASSERT(status == QDF_STATUS_SUCCESS);
628 		QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
629 					QDF_NBUF_TX_PKT_CE);
630 		DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
631 			QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
632 			QDF_TRACE_DEFAULT_PDEV_ID,
633 			(uint8_t *)&(((qdf_nbuf_t)per_transfer_context)->data),
634 			sizeof(((qdf_nbuf_t)per_transfer_context)->data),
635 			QDF_TX));
636 	} else {
637 		/*
638 		 * Probably not worth the additional complexity to support
639 		 * partial sends with continuation or notification.  We expect
640 		 * to use large rings and small sendlists. If we can't handle
641 		 * the entire request at once, punt it back to the caller.
642 		 */
643 	}
644 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
645 
646 	return status;
647 }
648 
649 #ifdef WLAN_FEATURE_FASTPATH
650 #ifdef QCA_WIFI_3_0
651 static inline void
652 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
653 		      uint64_t dma_addr,
654 		      uint32_t user_flags)
655 {
656 	shadow_src_desc->buffer_addr_hi =
657 			(uint32_t)((dma_addr >> 32) & 0x1F);
658 	user_flags |= shadow_src_desc->buffer_addr_hi;
659 	memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
660 			sizeof(uint32_t));
661 }
662 #else
663 static inline void
664 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
665 		      uint64_t dma_addr,
666 		      uint32_t user_flags)
667 {
668 }
669 #endif
670 
671 #define SLOTS_PER_DATAPATH_TX 2
672 
673 /**
674  * ce_send_fast() CE layer Tx buffer posting function
675  * @copyeng: copy engine handle
676  * @msdu: msdu to be sent
677  * @transfer_id: transfer_id
678  * @download_len: packet download length
679  *
680  * Assumption : Called with an array of MSDU's
681  * Function:
682  * For each msdu in the array
683  * 1. Check no. of available entries
684  * 2. Create src ring entries (allocated in consistent memory
685  * 3. Write index to h/w
686  *
687  * Return: No. of packets that could be sent
688  */
689 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
690 		 unsigned int transfer_id, uint32_t download_len)
691 {
692 	struct CE_state *ce_state = (struct CE_state *)copyeng;
693 	struct hif_softc *scn = ce_state->scn;
694 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
695 	struct CE_ring_state *src_ring = ce_state->src_ring;
696 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
697 	unsigned int nentries_mask = src_ring->nentries_mask;
698 	unsigned int write_index;
699 	unsigned int sw_index;
700 	unsigned int frag_len;
701 	uint64_t dma_addr;
702 	uint32_t user_flags;
703 	enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE;
704 	bool ok_to_send = true;
705 
706 	/*
707 	 * Create a log assuming the call will go through, and if not, we would
708 	 * add an error trace as well.
709 	 * Please add the same failure log for any additional error paths.
710 	 */
711 	DPTRACE(qdf_dp_trace(msdu,
712 			QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
713 			QDF_TRACE_DEFAULT_PDEV_ID,
714 			qdf_nbuf_data_addr(msdu),
715 			sizeof(qdf_nbuf_data(msdu)), QDF_TX));
716 
717 	qdf_spin_lock_bh(&ce_state->ce_index_lock);
718 
719 	/*
720 	 * Request runtime PM resume if it has already suspended and make
721 	 * sure there is no PCIe link access.
722 	 */
723 	if (hif_pm_runtime_get(hif_hdl) != 0)
724 		ok_to_send = false;
725 
726 	if (ok_to_send) {
727 		Q_TARGET_ACCESS_BEGIN(scn);
728 		DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
729 	}
730 
731 	write_index = src_ring->write_index;
732 	sw_index = src_ring->sw_index;
733 	hif_record_ce_desc_event(scn, ce_state->id,
734 				FAST_TX_SOFTWARE_INDEX_UPDATE,
735 				NULL, NULL, sw_index, 0);
736 
737 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
738 			 < SLOTS_PER_DATAPATH_TX)) {
739 		HIF_ERROR("Source ring full, required %d, available %d",
740 		      SLOTS_PER_DATAPATH_TX,
741 		      CE_RING_DELTA(nentries_mask, write_index, sw_index - 1));
742 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
743 		if (ok_to_send)
744 			Q_TARGET_ACCESS_END(scn);
745 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
746 
747 		DPTRACE(qdf_dp_trace(NULL,
748 				QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD,
749 				QDF_TRACE_DEFAULT_PDEV_ID,
750 				NULL, 0, QDF_TX));
751 
752 		return 0;
753 	}
754 
755 	{
756 		struct CE_src_desc *src_ring_base =
757 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
758 		struct CE_src_desc *shadow_base =
759 			(struct CE_src_desc *)src_ring->shadow_base;
760 		struct CE_src_desc *src_desc =
761 			CE_SRC_RING_TO_DESC(src_ring_base, write_index);
762 		struct CE_src_desc *shadow_src_desc =
763 			CE_SRC_RING_TO_DESC(shadow_base, write_index);
764 
765 		hif_pm_runtime_get_noresume(hif_hdl);
766 
767 		/*
768 		 * First fill out the ring descriptor for the HTC HTT frame
769 		 * header. These are uncached writes. Should we use a local
770 		 * structure instead?
771 		 */
772 		/* HTT/HTC header can be passed as a argument */
773 		dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
774 		shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
775 							  0xFFFFFFFF);
776 		user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
777 		ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
778 			shadow_src_desc->meta_data = transfer_id;
779 		shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
780 		ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
781 		download_len -= shadow_src_desc->nbytes;
782 		/*
783 		 * HTC HTT header is a word stream, so byte swap if CE byte
784 		 * swap enabled
785 		 */
786 		shadow_src_desc->byte_swap = ((ce_state->attr_flags &
787 					CE_ATTR_BYTE_SWAP_DATA) != 0);
788 		/* For the first one, it still does not need to write */
789 		shadow_src_desc->gather = 1;
790 		*src_desc = *shadow_src_desc;
791 		/* By default we could initialize the transfer context to this
792 		 * value
793 		 */
794 		src_ring->per_transfer_context[write_index] =
795 			CE_SENDLIST_ITEM_CTXT;
796 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
797 
798 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
799 		shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
800 		/*
801 		 * Now fill out the ring descriptor for the actual data
802 		 * packet
803 		 */
804 		dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
805 		shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
806 							  0xFFFFFFFF);
807 		/*
808 		 * Clear packet offset for all but the first CE desc.
809 		 */
810 		user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
811 		ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
812 		shadow_src_desc->meta_data = transfer_id;
813 
814 		/* get actual packet length */
815 		frag_len = qdf_nbuf_get_frag_len(msdu, 1);
816 
817 		/* download remaining bytes of payload */
818 		shadow_src_desc->nbytes =  download_len;
819 		ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
820 		if (shadow_src_desc->nbytes > frag_len)
821 			shadow_src_desc->nbytes = frag_len;
822 
823 		/*  Data packet is a byte stream, so disable byte swap */
824 		shadow_src_desc->byte_swap = 0;
825 		/* For the last one, gather is not set */
826 		shadow_src_desc->gather    = 0;
827 		*src_desc = *shadow_src_desc;
828 		src_ring->per_transfer_context[write_index] = msdu;
829 
830 		hif_record_ce_desc_event(scn, ce_state->id, type,
831 					(union ce_desc *)src_desc,
832 				src_ring->per_transfer_context[write_index],
833 					write_index, shadow_src_desc->nbytes);
834 
835 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
836 
837 		DPTRACE(qdf_dp_trace(msdu,
838 			QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
839 			QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(msdu),
840 			sizeof(qdf_nbuf_data(msdu)), QDF_TX));
841 	}
842 
843 	src_ring->write_index = write_index;
844 
845 	if (ok_to_send) {
846 		if (qdf_likely(ce_state->state == CE_RUNNING)) {
847 			type = FAST_TX_WRITE_INDEX_UPDATE;
848 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
849 				write_index);
850 			Q_TARGET_ACCESS_END(scn);
851 		} else
852 			ce_state->state = CE_PENDING;
853 		hif_pm_runtime_put(hif_hdl);
854 	}
855 
856 	qdf_spin_unlock_bh(&ce_state->ce_index_lock);
857 
858 	/* sent 1 packet */
859 	return 1;
860 }
861 
862 /**
863  * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
864  * @scn: Handle to HIF context
865  *
866  * Return: true if fastpath is enabled else false.
867  */
868 static bool ce_is_fastpath_enabled(struct hif_softc *scn)
869 {
870 	return scn->fastpath_mode_on;
871 }
872 
873 /**
874  * ce_is_fastpath_handler_registered() - return true for datapath CEs and if
875  * fastpath is enabled.
876  * @ce_state: handle to copy engine
877  *
878  * Return: true if fastpath handler is registered for datapath CE.
879  */
880 static bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
881 {
882 	if (ce_state->fastpath_handler)
883 		return true;
884 	else
885 		return false;
886 }
887 
888 
889 #else
890 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
891 {
892 	return false;
893 }
894 
895 static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
896 {
897 	return false;
898 }
899 #endif /* WLAN_FEATURE_FASTPATH */
900 
901 #ifndef AH_NEED_TX_DATA_SWAP
902 #define AH_NEED_TX_DATA_SWAP 0
903 #endif
904 
905 /**
906  * ce_batch_send() - sends bunch of msdus at once
907  * @ce_tx_hdl : pointer to CE handle
908  * @msdu : list of msdus to be sent
909  * @transfer_id : transfer id
910  * @len : Downloaded length
911  * @sendhead : sendhead
912  *
913  * Assumption : Called with an array of MSDU's
914  * Function:
915  * For each msdu in the array
916  * 1. Send each msdu
917  * 2. Increment write index accordinlgy.
918  *
919  * Return: list of msds not sent
920  */
921 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,  qdf_nbuf_t msdu,
922 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
923 {
924 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
925 	struct hif_softc *scn = ce_state->scn;
926 	struct CE_ring_state *src_ring = ce_state->src_ring;
927 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
928 	/*  A_target_id_t targid = TARGID(scn);*/
929 
930 	uint32_t nentries_mask = src_ring->nentries_mask;
931 	uint32_t sw_index, write_index;
932 
933 	struct CE_src_desc *src_desc_base =
934 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
935 	uint32_t *src_desc;
936 
937 	struct CE_src_desc lsrc_desc = {0};
938 	int deltacount = 0;
939 	qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext;
940 
941 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
942 	sw_index = src_ring->sw_index;
943 	write_index = src_ring->write_index;
944 
945 	deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1);
946 
947 	while (msdu) {
948 		tempnext = qdf_nbuf_next(msdu);
949 
950 		if (deltacount < 2) {
951 			if (sendhead)
952 				return msdu;
953 			HIF_ERROR("%s: Out of descriptors", __func__);
954 			src_ring->write_index = write_index;
955 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
956 					write_index);
957 
958 			sw_index = src_ring->sw_index;
959 			write_index = src_ring->write_index;
960 
961 			deltacount = CE_RING_DELTA(nentries_mask, write_index,
962 					sw_index-1);
963 			if (freelist == NULL) {
964 				freelist = msdu;
965 				hfreelist = msdu;
966 			} else {
967 				qdf_nbuf_set_next(freelist, msdu);
968 				freelist = msdu;
969 			}
970 			qdf_nbuf_set_next(msdu, NULL);
971 			msdu = tempnext;
972 			continue;
973 		}
974 
975 		src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base,
976 				write_index);
977 
978 		src_desc[0]   = qdf_nbuf_get_frag_paddr(msdu, 0);
979 
980 		lsrc_desc.meta_data = transfer_id;
981 		if (len  > msdu->len)
982 			len =  msdu->len;
983 		lsrc_desc.nbytes = len;
984 		/*  Data packet is a byte stream, so disable byte swap */
985 		lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
986 		lsrc_desc.gather    = 0; /*For the last one, gather is not set*/
987 
988 		src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
989 
990 
991 		src_ring->per_transfer_context[write_index] = msdu;
992 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
993 
994 		if (sendhead)
995 			break;
996 		qdf_nbuf_set_next(msdu, NULL);
997 		msdu = tempnext;
998 
999 	}
1000 
1001 
1002 	src_ring->write_index = write_index;
1003 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
1004 
1005 	return hfreelist;
1006 }
1007 
1008 /**
1009  * ce_update_tx_ring() - Advance sw index.
1010  * @ce_tx_hdl : pointer to CE handle
1011  * @num_htt_cmpls : htt completions received.
1012  *
1013  * Function:
1014  * Increment the value of sw index of src ring
1015  * according to number of htt completions
1016  * received.
1017  *
1018  * Return: void
1019  */
1020 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
1021 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
1022 {
1023 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
1024 	struct CE_ring_state *src_ring = ce_state->src_ring;
1025 	uint32_t nentries_mask = src_ring->nentries_mask;
1026 	/*
1027 	 * Advance the s/w index:
1028 	 * This effectively simulates completing the CE ring descriptors
1029 	 */
1030 	src_ring->sw_index =
1031 		CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
1032 				num_htt_cmpls);
1033 }
1034 #else
1035 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
1036 {}
1037 #endif
1038 
1039 /**
1040  * ce_send_single() - sends
1041  * @ce_tx_hdl : pointer to CE handle
1042  * @msdu : msdu to be sent
1043  * @transfer_id : transfer id
1044  * @len : Downloaded length
1045  *
1046  * Function:
1047  * 1. Send one msdu
1048  * 2. Increment write index of src ring accordinlgy.
1049  *
1050  * Return: int: CE sent status
1051  */
1052 int ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
1053 		uint32_t transfer_id, u_int32_t len)
1054 {
1055 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
1056 	struct hif_softc *scn = ce_state->scn;
1057 	struct CE_ring_state *src_ring = ce_state->src_ring;
1058 	uint32_t ctrl_addr = ce_state->ctrl_addr;
1059 	/*A_target_id_t targid = TARGID(scn);*/
1060 
1061 	uint32_t nentries_mask = src_ring->nentries_mask;
1062 	uint32_t sw_index, write_index;
1063 
1064 	struct CE_src_desc *src_desc_base =
1065 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
1066 	uint32_t *src_desc;
1067 
1068 	struct CE_src_desc lsrc_desc = {0};
1069 	enum hif_ce_event_type event_type;
1070 
1071 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
1072 	sw_index = src_ring->sw_index;
1073 	write_index = src_ring->write_index;
1074 
1075 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index,
1076 					sw_index-1) < 1)) {
1077 		/* ol_tx_stats_inc_ring_error(sc->scn->pdev_txrx_handle, 1); */
1078 		HIF_ERROR("%s: ce send fail %d %d %d", __func__, nentries_mask,
1079 			  write_index, sw_index);
1080 		return 1;
1081 	}
1082 
1083 	src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index);
1084 
1085 	src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
1086 
1087 	lsrc_desc.meta_data = transfer_id;
1088 	lsrc_desc.nbytes = len;
1089 	/*  Data packet is a byte stream, so disable byte swap */
1090 	lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
1091 	lsrc_desc.gather    = 0; /* For the last one, gather is not set */
1092 
1093 	src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
1094 
1095 
1096 	src_ring->per_transfer_context[write_index] = msdu;
1097 
1098 	if (((struct CE_src_desc *)src_desc)->gather)
1099 		event_type = HIF_TX_GATHER_DESC_POST;
1100 	else if (qdf_unlikely(ce_state->state != CE_RUNNING))
1101 		event_type = HIF_TX_DESC_SOFTWARE_POST;
1102 	else
1103 		event_type = HIF_TX_DESC_POST;
1104 
1105 	hif_record_ce_desc_event(scn, ce_state->id, event_type,
1106 				(union ce_desc *)src_desc, msdu,
1107 				write_index, len);
1108 
1109 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
1110 
1111 	src_ring->write_index = write_index;
1112 
1113 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
1114 
1115 	return QDF_STATUS_SUCCESS;
1116 }
1117 
1118 /**
1119  * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
1120  * @coyeng: copy engine handle
1121  * @per_recv_context: virtual address of the nbuf
1122  * @buffer: physical address of the nbuf
1123  *
1124  * Return: 0 if the buffer is enqueued
1125  */
1126 int
1127 ce_recv_buf_enqueue(struct CE_handle *copyeng,
1128 		    void *per_recv_context, qdf_dma_addr_t buffer)
1129 {
1130 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1131 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
1132 
1133 	return hif_state->ce_services->ce_recv_buf_enqueue(copyeng,
1134 			per_recv_context, buffer);
1135 }
1136 
1137 /**
1138  * ce_recv_buf_enqueue_legacy() - enqueue a recv buffer into a copy engine
1139  * @coyeng: copy engine handle
1140  * @per_recv_context: virtual address of the nbuf
1141  * @buffer: physical address of the nbuf
1142  *
1143  * Return: 0 if the buffer is enqueued
1144  */
1145 static int
1146 ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng,
1147 		    void *per_recv_context, qdf_dma_addr_t buffer)
1148 {
1149 	int status;
1150 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1151 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1152 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1153 	unsigned int nentries_mask = dest_ring->nentries_mask;
1154 	unsigned int write_index;
1155 	unsigned int sw_index;
1156 	uint64_t dma_addr = buffer;
1157 	struct hif_softc *scn = CE_state->scn;
1158 
1159 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1160 	write_index = dest_ring->write_index;
1161 	sw_index = dest_ring->sw_index;
1162 
1163 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1164 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1165 		return -EIO;
1166 	}
1167 
1168 	if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
1169 	    (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) {
1170 		struct CE_dest_desc *dest_ring_base =
1171 			(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1172 		struct CE_dest_desc *dest_desc =
1173 			CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
1174 
1175 		/* Update low 32 bit destination descriptor */
1176 		dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
1177 #ifdef QCA_WIFI_3_0
1178 		dest_desc->buffer_addr_hi =
1179 			(uint32_t)((dma_addr >> 32) & 0x1F);
1180 #endif
1181 		dest_desc->nbytes = 0;
1182 
1183 		dest_ring->per_transfer_context[write_index] =
1184 			per_recv_context;
1185 
1186 		hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_POST,
1187 				(union ce_desc *) dest_desc, per_recv_context,
1188 				write_index, 0);
1189 
1190 		/* Update Destination Ring Write Index */
1191 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
1192 		if (write_index != sw_index) {
1193 			CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
1194 			dest_ring->write_index = write_index;
1195 		}
1196 		status = QDF_STATUS_SUCCESS;
1197 	} else
1198 		status = QDF_STATUS_E_FAILURE;
1199 
1200 	Q_TARGET_ACCESS_END(scn);
1201 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1202 	return status;
1203 }
1204 
1205 void
1206 ce_send_watermarks_set(struct CE_handle *copyeng,
1207 		       unsigned int low_alert_nentries,
1208 		       unsigned int high_alert_nentries)
1209 {
1210 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1211 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1212 	struct hif_softc *scn = CE_state->scn;
1213 
1214 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
1215 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
1216 }
1217 
1218 void
1219 ce_recv_watermarks_set(struct CE_handle *copyeng,
1220 		       unsigned int low_alert_nentries,
1221 		       unsigned int high_alert_nentries)
1222 {
1223 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1224 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1225 	struct hif_softc *scn = CE_state->scn;
1226 
1227 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
1228 				low_alert_nentries);
1229 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
1230 				high_alert_nentries);
1231 }
1232 
1233 unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
1234 {
1235 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1236 	struct CE_ring_state *src_ring = CE_state->src_ring;
1237 	unsigned int nentries_mask = src_ring->nentries_mask;
1238 	unsigned int sw_index;
1239 	unsigned int write_index;
1240 
1241 	qdf_spin_lock(&CE_state->ce_index_lock);
1242 	sw_index = src_ring->sw_index;
1243 	write_index = src_ring->write_index;
1244 	qdf_spin_unlock(&CE_state->ce_index_lock);
1245 
1246 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
1247 }
1248 
1249 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
1250 {
1251 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1252 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1253 	unsigned int nentries_mask = dest_ring->nentries_mask;
1254 	unsigned int sw_index;
1255 	unsigned int write_index;
1256 
1257 	qdf_spin_lock(&CE_state->ce_index_lock);
1258 	sw_index = dest_ring->sw_index;
1259 	write_index = dest_ring->write_index;
1260 	qdf_spin_unlock(&CE_state->ce_index_lock);
1261 
1262 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
1263 }
1264 
1265 /*
1266  * Guts of ce_send_entries_done.
1267  * The caller takes responsibility for any necessary locking.
1268  */
1269 static unsigned int
1270 ce_send_entries_done_nolock_legacy(struct hif_softc *scn,
1271 			    struct CE_state *CE_state)
1272 {
1273 	struct CE_ring_state *src_ring = CE_state->src_ring;
1274 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1275 	unsigned int nentries_mask = src_ring->nentries_mask;
1276 	unsigned int sw_index;
1277 	unsigned int read_index;
1278 
1279 	sw_index = src_ring->sw_index;
1280 	read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
1281 
1282 	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
1283 }
1284 
1285 unsigned int ce_send_entries_done(struct CE_handle *copyeng)
1286 {
1287 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1288 	unsigned int nentries;
1289 	struct hif_softc *scn = CE_state->scn;
1290 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1291 
1292 	qdf_spin_lock(&CE_state->ce_index_lock);
1293 	nentries = hif_state->ce_services->ce_send_entries_done_nolock(
1294 						CE_state->scn, CE_state);
1295 	qdf_spin_unlock(&CE_state->ce_index_lock);
1296 
1297 	return nentries;
1298 }
1299 
1300 /*
1301  * Guts of ce_recv_entries_done.
1302  * The caller takes responsibility for any necessary locking.
1303  */
1304 static unsigned int
1305 ce_recv_entries_done_nolock_legacy(struct hif_softc *scn,
1306 			    struct CE_state *CE_state)
1307 {
1308 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1309 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1310 	unsigned int nentries_mask = dest_ring->nentries_mask;
1311 	unsigned int sw_index;
1312 	unsigned int read_index;
1313 
1314 	sw_index = dest_ring->sw_index;
1315 	read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
1316 
1317 	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
1318 }
1319 
1320 unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
1321 {
1322 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1323 	unsigned int nentries;
1324 	struct hif_softc *scn = CE_state->scn;
1325 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1326 
1327 	qdf_spin_lock(&CE_state->ce_index_lock);
1328 	nentries = hif_state->ce_services->ce_recv_entries_done_nolock(
1329 						CE_state->scn, CE_state);
1330 	qdf_spin_unlock(&CE_state->ce_index_lock);
1331 
1332 	return nentries;
1333 }
1334 
1335 /*
1336  * Guts of ce_completed_recv_next.
1337  * The caller takes responsibility for any necessary locking.
1338  */
1339 static int
1340 ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state,
1341 			      void **per_CE_contextp,
1342 			      void **per_transfer_contextp,
1343 			      qdf_dma_addr_t *bufferp,
1344 			      unsigned int *nbytesp,
1345 			      unsigned int *transfer_idp,
1346 			      unsigned int *flagsp)
1347 {
1348 	int status;
1349 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1350 	unsigned int nentries_mask = dest_ring->nentries_mask;
1351 	unsigned int sw_index = dest_ring->sw_index;
1352 	struct hif_softc *scn = CE_state->scn;
1353 	struct CE_dest_desc *dest_ring_base =
1354 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1355 	struct CE_dest_desc *dest_desc =
1356 		CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
1357 	int nbytes;
1358 	struct CE_dest_desc dest_desc_info;
1359 	/*
1360 	 * By copying the dest_desc_info element to local memory, we could
1361 	 * avoid extra memory read from non-cachable memory.
1362 	 */
1363 	dest_desc_info =  *dest_desc;
1364 	nbytes = dest_desc_info.nbytes;
1365 	if (nbytes == 0) {
1366 		/*
1367 		 * This closes a relatively unusual race where the Host
1368 		 * sees the updated DRRI before the update to the
1369 		 * corresponding descriptor has completed. We treat this
1370 		 * as a descriptor that is not yet done.
1371 		 */
1372 		status = QDF_STATUS_E_FAILURE;
1373 		goto done;
1374 	}
1375 
1376 	hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
1377 			(union ce_desc *) dest_desc,
1378 			dest_ring->per_transfer_context[sw_index],
1379 			sw_index, 0);
1380 
1381 	dest_desc->nbytes = 0;
1382 
1383 	/* Return data from completed destination descriptor */
1384 	*bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
1385 	*nbytesp = nbytes;
1386 	*transfer_idp = dest_desc_info.meta_data;
1387 	*flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
1388 
1389 	if (per_CE_contextp)
1390 		*per_CE_contextp = CE_state->recv_context;
1391 
1392 	if (per_transfer_contextp) {
1393 		*per_transfer_contextp =
1394 			dest_ring->per_transfer_context[sw_index];
1395 	}
1396 	dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
1397 
1398 	/* Update sw_index */
1399 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1400 	dest_ring->sw_index = sw_index;
1401 	status = QDF_STATUS_SUCCESS;
1402 
1403 done:
1404 	return status;
1405 }
1406 
1407 int
1408 ce_completed_recv_next(struct CE_handle *copyeng,
1409 		       void **per_CE_contextp,
1410 		       void **per_transfer_contextp,
1411 		       qdf_dma_addr_t *bufferp,
1412 		       unsigned int *nbytesp,
1413 		       unsigned int *transfer_idp, unsigned int *flagsp)
1414 {
1415 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1416 	int status;
1417 	struct hif_softc *scn = CE_state->scn;
1418 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1419 	struct ce_ops *ce_services;
1420 
1421 	ce_services = hif_state->ce_services;
1422 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1423 	status =
1424 		ce_services->ce_completed_recv_next_nolock(CE_state,
1425 				per_CE_contextp, per_transfer_contextp, bufferp,
1426 					      nbytesp, transfer_idp, flagsp);
1427 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1428 
1429 	return status;
1430 }
1431 
1432 QDF_STATUS
1433 ce_revoke_recv_next(struct CE_handle *copyeng,
1434 		    void **per_CE_contextp,
1435 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
1436 {
1437 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1438 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
1439 
1440 	return hif_state->ce_services->ce_revoke_recv_next(copyeng,
1441 			per_CE_contextp, per_transfer_contextp, bufferp);
1442 }
1443 /* NB: Modeled after ce_completed_recv_next_nolock */
1444 static QDF_STATUS
1445 ce_revoke_recv_next_legacy(struct CE_handle *copyeng,
1446 		    void **per_CE_contextp,
1447 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
1448 {
1449 	struct CE_state *CE_state;
1450 	struct CE_ring_state *dest_ring;
1451 	unsigned int nentries_mask;
1452 	unsigned int sw_index;
1453 	unsigned int write_index;
1454 	QDF_STATUS status;
1455 	struct hif_softc *scn;
1456 
1457 	CE_state = (struct CE_state *)copyeng;
1458 	dest_ring = CE_state->dest_ring;
1459 	if (!dest_ring)
1460 		return QDF_STATUS_E_FAILURE;
1461 
1462 	scn = CE_state->scn;
1463 	qdf_spin_lock(&CE_state->ce_index_lock);
1464 	nentries_mask = dest_ring->nentries_mask;
1465 	sw_index = dest_ring->sw_index;
1466 	write_index = dest_ring->write_index;
1467 	if (write_index != sw_index) {
1468 		struct CE_dest_desc *dest_ring_base =
1469 			(struct CE_dest_desc *)dest_ring->
1470 			    base_addr_owner_space;
1471 		struct CE_dest_desc *dest_desc =
1472 			CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
1473 
1474 		/* Return data from completed destination descriptor */
1475 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
1476 
1477 		if (per_CE_contextp)
1478 			*per_CE_contextp = CE_state->recv_context;
1479 
1480 		if (per_transfer_contextp) {
1481 			*per_transfer_contextp =
1482 				dest_ring->per_transfer_context[sw_index];
1483 		}
1484 		dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
1485 
1486 		/* Update sw_index */
1487 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1488 		dest_ring->sw_index = sw_index;
1489 		status = QDF_STATUS_SUCCESS;
1490 	} else {
1491 		status = QDF_STATUS_E_FAILURE;
1492 	}
1493 	qdf_spin_unlock(&CE_state->ce_index_lock);
1494 
1495 	return status;
1496 }
1497 
1498 /*
1499  * Guts of ce_completed_send_next.
1500  * The caller takes responsibility for any necessary locking.
1501  */
1502 static int
1503 ce_completed_send_next_nolock_legacy(struct CE_state *CE_state,
1504 			      void **per_CE_contextp,
1505 			      void **per_transfer_contextp,
1506 			      qdf_dma_addr_t *bufferp,
1507 			      unsigned int *nbytesp,
1508 			      unsigned int *transfer_idp,
1509 			      unsigned int *sw_idx,
1510 			      unsigned int *hw_idx,
1511 			      uint32_t *toeplitz_hash_result)
1512 {
1513 	int status = QDF_STATUS_E_FAILURE;
1514 	struct CE_ring_state *src_ring = CE_state->src_ring;
1515 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1516 	unsigned int nentries_mask = src_ring->nentries_mask;
1517 	unsigned int sw_index = src_ring->sw_index;
1518 	unsigned int read_index;
1519 	struct hif_softc *scn = CE_state->scn;
1520 
1521 	if (src_ring->hw_index == sw_index) {
1522 		/*
1523 		 * The SW completion index has caught up with the cached
1524 		 * version of the HW completion index.
1525 		 * Update the cached HW completion index to see whether
1526 		 * the SW has really caught up to the HW, or if the cached
1527 		 * value of the HW index has become stale.
1528 		 */
1529 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1530 			return QDF_STATUS_E_FAILURE;
1531 		src_ring->hw_index =
1532 			CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
1533 		if (Q_TARGET_ACCESS_END(scn) < 0)
1534 			return QDF_STATUS_E_FAILURE;
1535 	}
1536 	read_index = src_ring->hw_index;
1537 
1538 	if (sw_idx)
1539 		*sw_idx = sw_index;
1540 
1541 	if (hw_idx)
1542 		*hw_idx = read_index;
1543 
1544 	if ((read_index != sw_index) && (read_index != 0xffffffff)) {
1545 		struct CE_src_desc *shadow_base =
1546 			(struct CE_src_desc *)src_ring->shadow_base;
1547 		struct CE_src_desc *shadow_src_desc =
1548 			CE_SRC_RING_TO_DESC(shadow_base, sw_index);
1549 #ifdef QCA_WIFI_3_0
1550 		struct CE_src_desc *src_ring_base =
1551 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
1552 		struct CE_src_desc *src_desc =
1553 			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1554 #endif
1555 		hif_record_ce_desc_event(scn, CE_state->id,
1556 				HIF_TX_DESC_COMPLETION,
1557 				(union ce_desc *) shadow_src_desc,
1558 				src_ring->per_transfer_context[sw_index],
1559 				sw_index, shadow_src_desc->nbytes);
1560 
1561 		/* Return data from completed source descriptor */
1562 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
1563 		*nbytesp = shadow_src_desc->nbytes;
1564 		*transfer_idp = shadow_src_desc->meta_data;
1565 #ifdef QCA_WIFI_3_0
1566 		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
1567 #else
1568 		*toeplitz_hash_result = 0;
1569 #endif
1570 		if (per_CE_contextp)
1571 			*per_CE_contextp = CE_state->send_context;
1572 
1573 		if (per_transfer_contextp) {
1574 			*per_transfer_contextp =
1575 				src_ring->per_transfer_context[sw_index];
1576 		}
1577 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
1578 
1579 		/* Update sw_index */
1580 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1581 		src_ring->sw_index = sw_index;
1582 		status = QDF_STATUS_SUCCESS;
1583 	}
1584 
1585 	return status;
1586 }
1587 
1588 QDF_STATUS
1589 ce_cancel_send_next(struct CE_handle *copyeng,
1590 		void **per_CE_contextp,
1591 		void **per_transfer_contextp,
1592 		qdf_dma_addr_t *bufferp,
1593 		unsigned int *nbytesp,
1594 		unsigned int *transfer_idp,
1595 		uint32_t *toeplitz_hash_result)
1596 {
1597 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1598 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
1599 
1600 	return hif_state->ce_services->ce_cancel_send_next
1601 		(copyeng, per_CE_contextp, per_transfer_contextp,
1602 		 bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
1603 }
1604 
1605 /* NB: Modeled after ce_completed_send_next */
1606 static QDF_STATUS
1607 ce_cancel_send_next_legacy(struct CE_handle *copyeng,
1608 		void **per_CE_contextp,
1609 		void **per_transfer_contextp,
1610 		qdf_dma_addr_t *bufferp,
1611 		unsigned int *nbytesp,
1612 		unsigned int *transfer_idp,
1613 		uint32_t *toeplitz_hash_result)
1614 {
1615 	struct CE_state *CE_state;
1616 	struct CE_ring_state *src_ring;
1617 	unsigned int nentries_mask;
1618 	unsigned int sw_index;
1619 	unsigned int write_index;
1620 	QDF_STATUS status;
1621 	struct hif_softc *scn;
1622 
1623 	CE_state = (struct CE_state *)copyeng;
1624 	src_ring = CE_state->src_ring;
1625 	if (!src_ring)
1626 		return QDF_STATUS_E_FAILURE;
1627 
1628 	scn = CE_state->scn;
1629 	qdf_spin_lock(&CE_state->ce_index_lock);
1630 	nentries_mask = src_ring->nentries_mask;
1631 	sw_index = src_ring->sw_index;
1632 	write_index = src_ring->write_index;
1633 
1634 	if (write_index != sw_index) {
1635 		struct CE_src_desc *src_ring_base =
1636 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
1637 		struct CE_src_desc *src_desc =
1638 			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1639 
1640 		/* Return data from completed source descriptor */
1641 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1642 		*nbytesp = src_desc->nbytes;
1643 		*transfer_idp = src_desc->meta_data;
1644 #ifdef QCA_WIFI_3_0
1645 		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
1646 #else
1647 		*toeplitz_hash_result = 0;
1648 #endif
1649 
1650 		if (per_CE_contextp)
1651 			*per_CE_contextp = CE_state->send_context;
1652 
1653 		if (per_transfer_contextp) {
1654 			*per_transfer_contextp =
1655 				src_ring->per_transfer_context[sw_index];
1656 		}
1657 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
1658 
1659 		/* Update sw_index */
1660 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1661 		src_ring->sw_index = sw_index;
1662 		status = QDF_STATUS_SUCCESS;
1663 	} else {
1664 		status = QDF_STATUS_E_FAILURE;
1665 	}
1666 	qdf_spin_unlock(&CE_state->ce_index_lock);
1667 
1668 	return status;
1669 }
1670 
1671 /* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
1672 #define CE_WM_SHFT 1
1673 
1674 int
1675 ce_completed_send_next(struct CE_handle *copyeng,
1676 		       void **per_CE_contextp,
1677 		       void **per_transfer_contextp,
1678 		       qdf_dma_addr_t *bufferp,
1679 		       unsigned int *nbytesp,
1680 		       unsigned int *transfer_idp,
1681 		       unsigned int *sw_idx,
1682 		       unsigned int *hw_idx,
1683 		       unsigned int *toeplitz_hash_result)
1684 {
1685 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1686 	struct hif_softc *scn = CE_state->scn;
1687 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1688 	struct ce_ops *ce_services;
1689 	int status;
1690 
1691 	ce_services = hif_state->ce_services;
1692 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1693 	status =
1694 		ce_services->ce_completed_send_next_nolock(CE_state,
1695 					per_CE_contextp, per_transfer_contextp,
1696 					bufferp, nbytesp, transfer_idp, sw_idx,
1697 					      hw_idx, toeplitz_hash_result);
1698 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1699 
1700 	return status;
1701 }
1702 
1703 #ifdef ATH_11AC_TXCOMPACT
1704 /* CE engine descriptor reap
1705  * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
1706  * does receive and reaping of completed descriptor ,
1707  * This function only handles reaping of Tx complete descriptor.
1708  * The Function is called from threshold reap  poll routine
1709  * hif_send_complete_check so should not countain receive functionality
1710  * within it .
1711  */
1712 
1713 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
1714 {
1715 	void *CE_context;
1716 	void *transfer_context;
1717 	qdf_dma_addr_t buf;
1718 	unsigned int nbytes;
1719 	unsigned int id;
1720 	unsigned int sw_idx, hw_idx;
1721 	uint32_t toeplitz_hash_result;
1722 	struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
1723 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1724 
1725 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1726 		return;
1727 
1728 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
1729 			NULL, NULL, 0, 0);
1730 
1731 	/* Since this function is called from both user context and
1732 	 * tasklet context the spinlock has to lock the bottom halves.
1733 	 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
1734 	 * enabled in TX polling mode. If this is not the case, more
1735 	 * bottom halve spin lock changes are needed. Due to data path
1736 	 * performance concern, after internal discussion we've decided
1737 	 * to make minimum change, i.e., only address the issue occurred
1738 	 * in this function. The possible negative effect of this minimum
1739 	 * change is that, in the future, if some other function will also
1740 	 * be opened to let the user context to use, those cases need to be
1741 	 * addressed by change spin_lock to spin_lock_bh also.
1742 	 */
1743 
1744 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1745 
1746 	if (CE_state->send_cb) {
1747 		{
1748 			struct ce_ops *ce_services = hif_state->ce_services;
1749 			/* Pop completed send buffers and call the
1750 			 * registered send callback for each
1751 			 */
1752 			while (ce_services->ce_completed_send_next_nolock
1753 				 (CE_state, &CE_context,
1754 				  &transfer_context, &buf,
1755 				  &nbytes, &id, &sw_idx, &hw_idx,
1756 				  &toeplitz_hash_result) ==
1757 				  QDF_STATUS_SUCCESS) {
1758 				if (ce_id != CE_HTT_H2T_MSG) {
1759 					qdf_spin_unlock_bh(
1760 						&CE_state->ce_index_lock);
1761 					CE_state->send_cb(
1762 						(struct CE_handle *)
1763 						CE_state, CE_context,
1764 						transfer_context, buf,
1765 						nbytes, id, sw_idx, hw_idx,
1766 						toeplitz_hash_result);
1767 					qdf_spin_lock_bh(
1768 						&CE_state->ce_index_lock);
1769 				} else {
1770 					struct HIF_CE_pipe_info *pipe_info =
1771 						(struct HIF_CE_pipe_info *)
1772 						CE_context;
1773 
1774 					qdf_spin_lock_bh(&pipe_info->
1775 						 completion_freeq_lock);
1776 					pipe_info->num_sends_allowed++;
1777 					qdf_spin_unlock_bh(&pipe_info->
1778 						   completion_freeq_lock);
1779 				}
1780 			}
1781 		}
1782 	}
1783 
1784 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1785 
1786 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
1787 			NULL, NULL, 0, 0);
1788 	Q_TARGET_ACCESS_END(scn);
1789 }
1790 
1791 #endif /*ATH_11AC_TXCOMPACT */
1792 
1793 /*
1794  * Number of times to check for any pending tx/rx completion on
1795  * a copy engine, this count should be big enough. Once we hit
1796  * this threashold we'll not check for any Tx/Rx comlpetion in same
1797  * interrupt handling. Note that this threashold is only used for
1798  * Rx interrupt processing, this can be used tor Tx as well if we
1799  * suspect any infinite loop in checking for pending Tx completion.
1800  */
1801 #define CE_TXRX_COMP_CHECK_THRESHOLD 20
1802 
1803 #ifdef WLAN_FEATURE_FASTPATH
1804 /**
1805  * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler
1806  * @ce_state: handle to copy engine state
1807  * @cmpl_msdus: Rx msdus
1808  * @num_cmpls: number of Rx msdus
1809  * @ctrl_addr: CE control address
1810  *
1811  * Return: None
1812  */
1813 static void ce_fastpath_rx_handle(struct CE_state *ce_state,
1814 				  qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls,
1815 				  uint32_t ctrl_addr)
1816 {
1817 	struct hif_softc *scn = ce_state->scn;
1818 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
1819 	uint32_t nentries_mask = dest_ring->nentries_mask;
1820 	uint32_t write_index;
1821 
1822 	qdf_spin_unlock(&ce_state->ce_index_lock);
1823 	(ce_state->fastpath_handler)(ce_state->context,	cmpl_msdus, num_cmpls);
1824 	qdf_spin_lock(&ce_state->ce_index_lock);
1825 
1826 	/* Update Destination Ring Write Index */
1827 	write_index = dest_ring->write_index;
1828 	write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
1829 
1830 	hif_record_ce_desc_event(scn, ce_state->id,
1831 			FAST_RX_WRITE_INDEX_UPDATE,
1832 			NULL, NULL, write_index, 0);
1833 
1834 	CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
1835 	dest_ring->write_index = write_index;
1836 }
1837 
1838 /**
1839  * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs
1840  * @scn: hif_context
1841  * @ce_id: Copy engine ID
1842  * 1) Go through the CE ring, and find the completions
1843  * 2) For valid completions retrieve context (nbuf) for per_transfer_context[]
1844  * 3) Unmap buffer & accumulate in an array.
1845  * 4) Call message handler when array is full or when exiting the handler
1846  *
1847  * Return: void
1848  */
1849 
1850 static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
1851 {
1852 	struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
1853 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1854 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
1855 	struct CE_dest_desc *dest_ring_base =
1856 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1857 
1858 	uint32_t nentries_mask = dest_ring->nentries_mask;
1859 	uint32_t sw_index = dest_ring->sw_index;
1860 	uint32_t nbytes;
1861 	qdf_nbuf_t nbuf;
1862 	dma_addr_t paddr;
1863 	struct CE_dest_desc *dest_desc;
1864 	qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM];
1865 	uint32_t ctrl_addr = ce_state->ctrl_addr;
1866 	uint32_t nbuf_cmpl_idx = 0;
1867 	unsigned int more_comp_cnt = 0;
1868 
1869 more_data:
1870 	for (;;) {
1871 
1872 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base,
1873 						 sw_index);
1874 
1875 		/*
1876 		 * The following 2 reads are from non-cached memory
1877 		 */
1878 		nbytes = dest_desc->nbytes;
1879 
1880 		/* If completion is invalid, break */
1881 		if (qdf_unlikely(nbytes == 0))
1882 			break;
1883 
1884 
1885 		/*
1886 		 * Build the nbuf list from valid completions
1887 		 */
1888 		nbuf = dest_ring->per_transfer_context[sw_index];
1889 
1890 		/*
1891 		 * No lock is needed here, since this is the only thread
1892 		 * that accesses the sw_index
1893 		 */
1894 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1895 
1896 		/*
1897 		 * CAREFUL : Uncached write, but still less expensive,
1898 		 * since most modern caches use "write-combining" to
1899 		 * flush multiple cache-writes all at once.
1900 		 */
1901 		dest_desc->nbytes = 0;
1902 
1903 		/*
1904 		 * Per our understanding this is not required on our
1905 		 * since we are doing the same cache invalidation
1906 		 * operation on the same buffer twice in succession,
1907 		 * without any modifiication to this buffer by CPU in
1908 		 * between.
1909 		 * However, this code with 2 syncs in succession has
1910 		 * been undergoing some testing at a customer site,
1911 		 * and seemed to be showing no problems so far. Would
1912 		 * like to validate from the customer, that this line
1913 		 * is really not required, before we remove this line
1914 		 * completely.
1915 		 */
1916 		paddr = QDF_NBUF_CB_PADDR(nbuf);
1917 
1918 		qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev, paddr,
1919 				(skb_end_pointer(nbuf) - (nbuf)->data),
1920 				DMA_FROM_DEVICE);
1921 
1922 		qdf_nbuf_put_tail(nbuf, nbytes);
1923 
1924 		qdf_assert_always(nbuf->data != NULL);
1925 
1926 		QDF_NBUF_CB_RX_CTX_ID(nbuf) =
1927 				hif_get_rx_ctx_id(ce_state->id, hif_hdl);
1928 		cmpl_msdus[nbuf_cmpl_idx++] = nbuf;
1929 
1930 		/*
1931 		 * we are not posting the buffers back instead
1932 		 * reusing the buffers
1933 		 */
1934 		if (nbuf_cmpl_idx == scn->ce_service_max_rx_ind_flush) {
1935 			hif_record_ce_desc_event(scn, ce_state->id,
1936 						 FAST_RX_SOFTWARE_INDEX_UPDATE,
1937 						 NULL, NULL, sw_index, 0);
1938 			dest_ring->sw_index = sw_index;
1939 			ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1940 						nbuf_cmpl_idx, ctrl_addr);
1941 
1942 			ce_state->receive_count += nbuf_cmpl_idx;
1943 			if (qdf_unlikely(hif_ce_service_should_yield(
1944 						scn, ce_state))) {
1945 				ce_state->force_break = 1;
1946 				qdf_atomic_set(&ce_state->rx_pending, 1);
1947 				return;
1948 			}
1949 
1950 			nbuf_cmpl_idx = 0;
1951 			more_comp_cnt = 0;
1952 		}
1953 	}
1954 
1955 	hif_record_ce_desc_event(scn, ce_state->id,
1956 				 FAST_RX_SOFTWARE_INDEX_UPDATE,
1957 				 NULL, NULL, sw_index, 0);
1958 
1959 	dest_ring->sw_index = sw_index;
1960 
1961 	/*
1962 	 * If there are not enough completions to fill the array,
1963 	 * just call the message handler here
1964 	 */
1965 	if (nbuf_cmpl_idx) {
1966 		ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1967 				      nbuf_cmpl_idx, ctrl_addr);
1968 
1969 		ce_state->receive_count += nbuf_cmpl_idx;
1970 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
1971 			ce_state->force_break = 1;
1972 			qdf_atomic_set(&ce_state->rx_pending, 1);
1973 			return;
1974 		}
1975 
1976 		/* check for more packets after upper layer processing */
1977 		nbuf_cmpl_idx = 0;
1978 		more_comp_cnt = 0;
1979 		goto more_data;
1980 	}
1981 
1982 	hif_update_napi_max_poll_time(ce_state, scn->napi_data.napis[ce_id],
1983 				      qdf_get_cpu());
1984 
1985 	qdf_atomic_set(&ce_state->rx_pending, 0);
1986 	if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1987 		CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1988 					   HOST_IS_COPY_COMPLETE_MASK);
1989 	} else {
1990 		hif_err_rl("%s: target access is not allowed", __func__);
1991 		return;
1992 	}
1993 
1994 	if (ce_recv_entries_done_nolock_legacy(scn, ce_state)) {
1995 		if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1996 			goto more_data;
1997 		} else {
1998 			HIF_ERROR("%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1999 				  __func__, nentries_mask,
2000 				  ce_state->dest_ring->sw_index,
2001 				  CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr));
2002 		}
2003 	}
2004 #ifdef NAPI_YIELD_BUDGET_BASED
2005 	/* Caution : Before you modify this code, please refer hif_napi_poll function
2006 	to understand how napi_complete gets called and make the necessary changes
2007 	Force break has to be done till WIN disables the interrupt at source */
2008 	ce_state->force_break = 1;
2009 #endif
2010 }
2011 
2012 #else
2013 static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
2014 {
2015 }
2016 #endif /* WLAN_FEATURE_FASTPATH */
2017 
2018 /*
2019  * Guts of interrupt handler for per-engine interrupts on a particular CE.
2020  *
2021  * Invokes registered callbacks for recv_complete,
2022  * send_complete, and watermarks.
2023  *
2024  * Returns: number of messages processed
2025  */
2026 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
2027 {
2028 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2029 	uint32_t ctrl_addr = CE_state->ctrl_addr;
2030 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2031 	void *CE_context;
2032 	void *transfer_context;
2033 	qdf_dma_addr_t buf;
2034 	unsigned int nbytes;
2035 	unsigned int id;
2036 	unsigned int flags;
2037 	unsigned int more_comp_cnt = 0;
2038 	unsigned int more_snd_comp_cnt = 0;
2039 	unsigned int sw_idx, hw_idx;
2040 	uint32_t toeplitz_hash_result;
2041 	uint32_t mode = hif_get_conparam(scn);
2042 
2043 	if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data))
2044 		return CE_state->receive_count;
2045 
2046 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
2047 		HIF_ERROR("[premature rc=0]");
2048 		return 0; /* no work done */
2049 	}
2050 
2051 	/* Clear force_break flag and re-initialize receive_count to 0 */
2052 	CE_state->receive_count = 0;
2053 	CE_state->force_break = 0;
2054 	CE_state->ce_service_start_time = sched_clock();
2055 	CE_state->ce_service_yield_time =
2056 		CE_state->ce_service_start_time +
2057 		hif_get_ce_service_max_yield_time(
2058 			(struct hif_opaque_softc *)scn);
2059 
2060 	qdf_spin_lock(&CE_state->ce_index_lock);
2061 	/*
2062 	 * With below check we make sure CE we are handling is datapath CE and
2063 	 * fastpath is enabled.
2064 	 */
2065 	if (ce_is_fastpath_handler_registered(CE_state)) {
2066 		/* For datapath only Rx CEs */
2067 		ce_per_engine_service_fast(scn, CE_id);
2068 		goto unlock_end;
2069 	}
2070 
2071 more_completions:
2072 	if (CE_state->recv_cb) {
2073 
2074 		/* Pop completed recv buffers and call
2075 		 * the registered recv callback for each
2076 		 */
2077 		while (hif_state->ce_services->ce_completed_recv_next_nolock
2078 				(CE_state, &CE_context, &transfer_context,
2079 				&buf, &nbytes, &id, &flags) ==
2080 				QDF_STATUS_SUCCESS) {
2081 			qdf_spin_unlock(&CE_state->ce_index_lock);
2082 			CE_state->recv_cb((struct CE_handle *)CE_state,
2083 					  CE_context, transfer_context, buf,
2084 					  nbytes, id, flags);
2085 
2086 			/*
2087 			 * EV #112693 -
2088 			 * [Peregrine][ES1][WB342][Win8x86][Performance]
2089 			 * BSoD_0x133 occurred in VHT80 UDP_DL
2090 			 * Break out DPC by force if number of loops in
2091 			 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
2092 			 * to avoid spending too long time in
2093 			 * DPC for each interrupt handling. Schedule another
2094 			 * DPC to avoid data loss if we had taken
2095 			 * force-break action before apply to Windows OS
2096 			 * only currently, Linux/MAC os can expand to their
2097 			 * platform if necessary
2098 			 */
2099 
2100 			/* Break the receive processes by
2101 			 * force if force_break set up
2102 			 */
2103 			if (qdf_unlikely(CE_state->force_break)) {
2104 				qdf_atomic_set(&CE_state->rx_pending, 1);
2105 				goto target_access_end;
2106 			}
2107 			qdf_spin_lock(&CE_state->ce_index_lock);
2108 		}
2109 	}
2110 
2111 	/*
2112 	 * Attention: We may experience potential infinite loop for below
2113 	 * While Loop during Sending Stress test.
2114 	 * Resolve the same way as Receive Case (Refer to EV #112693)
2115 	 */
2116 
2117 	if (CE_state->send_cb) {
2118 		/* Pop completed send buffers and call
2119 		 * the registered send callback for each
2120 		 */
2121 
2122 #ifdef ATH_11AC_TXCOMPACT
2123 		while (hif_state->ce_services->ce_completed_send_next_nolock
2124 			 (CE_state, &CE_context,
2125 			 &transfer_context, &buf, &nbytes,
2126 			 &id, &sw_idx, &hw_idx,
2127 			 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2128 
2129 			if (CE_id != CE_HTT_H2T_MSG ||
2130 			    QDF_IS_EPPING_ENABLED(mode)) {
2131 				qdf_spin_unlock(&CE_state->ce_index_lock);
2132 				CE_state->send_cb((struct CE_handle *)CE_state,
2133 						  CE_context, transfer_context,
2134 						  buf, nbytes, id, sw_idx,
2135 						  hw_idx, toeplitz_hash_result);
2136 				qdf_spin_lock(&CE_state->ce_index_lock);
2137 			} else {
2138 				struct HIF_CE_pipe_info *pipe_info =
2139 					(struct HIF_CE_pipe_info *)CE_context;
2140 
2141 				qdf_spin_lock(&pipe_info->
2142 					      completion_freeq_lock);
2143 				pipe_info->num_sends_allowed++;
2144 				qdf_spin_unlock(&pipe_info->
2145 						completion_freeq_lock);
2146 			}
2147 		}
2148 #else                           /*ATH_11AC_TXCOMPACT */
2149 		while (hif_state->ce_services->ce_completed_send_next_nolock
2150 			 (CE_state, &CE_context,
2151 			  &transfer_context, &buf, &nbytes,
2152 			  &id, &sw_idx, &hw_idx,
2153 			  &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2154 			qdf_spin_unlock(&CE_state->ce_index_lock);
2155 			CE_state->send_cb((struct CE_handle *)CE_state,
2156 				  CE_context, transfer_context, buf,
2157 				  nbytes, id, sw_idx, hw_idx,
2158 				  toeplitz_hash_result);
2159 			qdf_spin_lock(&CE_state->ce_index_lock);
2160 		}
2161 #endif /*ATH_11AC_TXCOMPACT */
2162 	}
2163 
2164 more_watermarks:
2165 	if (CE_state->misc_cbs) {
2166 		if (CE_state->watermark_cb &&
2167 				hif_state->ce_services->watermark_int(CE_state,
2168 					&flags)) {
2169 			qdf_spin_unlock(&CE_state->ce_index_lock);
2170 			/* Convert HW IS bits to software flags */
2171 			CE_state->watermark_cb((struct CE_handle *)CE_state,
2172 					CE_state->wm_context, flags);
2173 			qdf_spin_lock(&CE_state->ce_index_lock);
2174 		}
2175 	}
2176 
2177 	/*
2178 	 * Clear the misc interrupts (watermark) that were handled above,
2179 	 * and that will be checked again below.
2180 	 * Clear and check for copy-complete interrupts again, just in case
2181 	 * more copy completions happened while the misc interrupts were being
2182 	 * handled.
2183 	 */
2184 	if (!ce_srng_based(scn)) {
2185 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
2186 			CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
2187 					   CE_WATERMARK_MASK |
2188 					   HOST_IS_COPY_COMPLETE_MASK);
2189 		} else {
2190 			hif_err_rl("%s: target access is not allowed",
2191 				   __func__);
2192 			goto unlock_end;
2193 		}
2194 	}
2195 
2196 	/*
2197 	 * Now that per-engine interrupts are cleared, verify that
2198 	 * no recv interrupts arrive while processing send interrupts,
2199 	 * and no recv or send interrupts happened while processing
2200 	 * misc interrupts.Go back and check again.Keep checking until
2201 	 * we find no more events to process.
2202 	 */
2203 	if (CE_state->recv_cb &&
2204 		hif_state->ce_services->ce_recv_entries_done_nolock(scn,
2205 				CE_state)) {
2206 		if (QDF_IS_EPPING_ENABLED(mode) ||
2207 		    more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
2208 			goto more_completions;
2209 		} else {
2210 			if (!ce_srng_based(scn)) {
2211 				HIF_ERROR(
2212 					"%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
2213 					__func__,
2214 					CE_state->dest_ring->nentries_mask,
2215 					CE_state->dest_ring->sw_index,
2216 					CE_DEST_RING_READ_IDX_GET(scn,
2217 							  CE_state->ctrl_addr));
2218 			}
2219 		}
2220 	}
2221 
2222 	if (CE_state->send_cb &&
2223 		hif_state->ce_services->ce_send_entries_done_nolock(scn,
2224 				CE_state)) {
2225 		if (QDF_IS_EPPING_ENABLED(mode) ||
2226 		    more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
2227 			goto more_completions;
2228 		} else {
2229 			if (!ce_srng_based(scn)) {
2230 				HIF_ERROR(
2231 					"%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
2232 					__func__,
2233 					CE_state->src_ring->nentries_mask,
2234 					CE_state->src_ring->sw_index,
2235 					CE_SRC_RING_READ_IDX_GET(scn,
2236 							 CE_state->ctrl_addr));
2237 			}
2238 		}
2239 	}
2240 
2241 	if (CE_state->misc_cbs && CE_state->watermark_cb) {
2242 		if (hif_state->ce_services->watermark_int(CE_state, &flags))
2243 			goto more_watermarks;
2244 	}
2245 
2246 	qdf_atomic_set(&CE_state->rx_pending, 0);
2247 
2248 unlock_end:
2249 	qdf_spin_unlock(&CE_state->ce_index_lock);
2250 target_access_end:
2251 	if (Q_TARGET_ACCESS_END(scn) < 0)
2252 		HIF_ERROR("<--[premature rc=%d]", CE_state->receive_count);
2253 	return CE_state->receive_count;
2254 }
2255 qdf_export_symbol(ce_per_engine_service);
2256 
2257 /*
2258  * Handler for per-engine interrupts on ALL active CEs.
2259  * This is used in cases where the system is sharing a
2260  * single interrput for all CEs
2261  */
2262 
2263 void ce_per_engine_service_any(int irq, struct hif_softc *scn)
2264 {
2265 	int CE_id;
2266 	uint32_t intr_summary;
2267 
2268 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2269 		return;
2270 
2271 	if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
2272 		for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2273 			struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2274 
2275 			if (qdf_atomic_read(&CE_state->rx_pending)) {
2276 				qdf_atomic_set(&CE_state->rx_pending, 0);
2277 				ce_per_engine_service(scn, CE_id);
2278 			}
2279 		}
2280 
2281 		Q_TARGET_ACCESS_END(scn);
2282 		return;
2283 	}
2284 
2285 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
2286 
2287 	for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
2288 		if (intr_summary & (1 << CE_id))
2289 			intr_summary &= ~(1 << CE_id);
2290 		else
2291 			continue;       /* no intr pending on this CE */
2292 
2293 		ce_per_engine_service(scn, CE_id);
2294 	}
2295 
2296 	Q_TARGET_ACCESS_END(scn);
2297 }
2298 
2299 /*
2300  * Adjust interrupts for the copy complete handler.
2301  * If it's needed for either send or recv, then unmask
2302  * this interrupt; otherwise, mask it.
2303  *
2304  * Called with target_lock held.
2305  */
2306 static void
2307 ce_per_engine_handler_adjust_legacy(struct CE_state *CE_state,
2308 			     int disable_copy_compl_intr)
2309 {
2310 	uint32_t ctrl_addr = CE_state->ctrl_addr;
2311 	struct hif_softc *scn = CE_state->scn;
2312 
2313 	CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
2314 
2315 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2316 		return;
2317 
2318 	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
2319 		hif_err_rl("%s: target access is not allowed", __func__);
2320 		return;
2321 	}
2322 
2323 	if ((!disable_copy_compl_intr) &&
2324 	    (CE_state->send_cb || CE_state->recv_cb))
2325 		CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
2326 	else
2327 		CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
2328 
2329 	if (CE_state->watermark_cb)
2330 		CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
2331 	 else
2332 		CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
2333 	Q_TARGET_ACCESS_END(scn);
2334 }
2335 
2336 /*Iterate the CE_state list and disable the compl interrupt
2337  * if it has been registered already.
2338  */
2339 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
2340 {
2341 	int CE_id;
2342 
2343 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2344 		return;
2345 
2346 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2347 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2348 		uint32_t ctrl_addr = CE_state->ctrl_addr;
2349 
2350 		/* if the interrupt is currently enabled, disable it */
2351 		if (!CE_state->disable_copy_compl_intr
2352 		    && (CE_state->send_cb || CE_state->recv_cb))
2353 			CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
2354 
2355 		if (CE_state->watermark_cb)
2356 			CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
2357 	}
2358 	Q_TARGET_ACCESS_END(scn);
2359 }
2360 
2361 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
2362 {
2363 	int CE_id;
2364 
2365 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2366 		return;
2367 
2368 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2369 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2370 		uint32_t ctrl_addr = CE_state->ctrl_addr;
2371 
2372 		/*
2373 		 * If the CE is supposed to have copy complete interrupts
2374 		 * enabled (i.e. there a callback registered, and the
2375 		 * "disable" flag is not set), then re-enable the interrupt.
2376 		 */
2377 		if (!CE_state->disable_copy_compl_intr
2378 		    && (CE_state->send_cb || CE_state->recv_cb))
2379 			CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
2380 
2381 		if (CE_state->watermark_cb)
2382 			CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
2383 	}
2384 	Q_TARGET_ACCESS_END(scn);
2385 }
2386 
2387 /**
2388  * ce_send_cb_register(): register completion handler
2389  * @copyeng: CE_state representing the ce we are adding the behavior to
2390  * @fn_ptr: callback that the ce should use when processing tx completions
2391  * @disable_interrupts: if the interupts should be enabled or not.
2392  *
2393  * Caller should guarantee that no transactions are in progress before
2394  * switching the callback function.
2395  *
2396  * Registers the send context before the fn pointer so that if the cb is valid
2397  * the context should be valid.
2398  *
2399  * Beware that currently this function will enable completion interrupts.
2400  */
2401 void
2402 ce_send_cb_register(struct CE_handle *copyeng,
2403 		    ce_send_cb fn_ptr,
2404 		    void *ce_send_context, int disable_interrupts)
2405 {
2406 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2407 	struct hif_softc *scn;
2408 	struct HIF_CE_state *hif_state;
2409 
2410 	if (CE_state == NULL) {
2411 		HIF_ERROR("%s: Error CE state = NULL", __func__);
2412 		return;
2413 	}
2414 	scn = CE_state->scn;
2415 	hif_state = HIF_GET_CE_STATE(scn);
2416 	if (hif_state == NULL) {
2417 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
2418 		return;
2419 	}
2420 	CE_state->send_context = ce_send_context;
2421 	CE_state->send_cb = fn_ptr;
2422 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
2423 							disable_interrupts);
2424 }
2425 
2426 /**
2427  * ce_recv_cb_register(): register completion handler
2428  * @copyeng: CE_state representing the ce we are adding the behavior to
2429  * @fn_ptr: callback that the ce should use when processing rx completions
2430  * @disable_interrupts: if the interupts should be enabled or not.
2431  *
2432  * Registers the send context before the fn pointer so that if the cb is valid
2433  * the context should be valid.
2434  *
2435  * Caller should guarantee that no transactions are in progress before
2436  * switching the callback function.
2437  */
2438 void
2439 ce_recv_cb_register(struct CE_handle *copyeng,
2440 		    CE_recv_cb fn_ptr,
2441 		    void *CE_recv_context, int disable_interrupts)
2442 {
2443 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2444 	struct hif_softc *scn;
2445 	struct HIF_CE_state *hif_state;
2446 
2447 	if (CE_state == NULL) {
2448 		HIF_ERROR("%s: ERROR CE state = NULL", __func__);
2449 		return;
2450 	}
2451 	scn = CE_state->scn;
2452 	hif_state = HIF_GET_CE_STATE(scn);
2453 	if (hif_state == NULL) {
2454 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
2455 		return;
2456 	}
2457 	CE_state->recv_context = CE_recv_context;
2458 	CE_state->recv_cb = fn_ptr;
2459 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
2460 							disable_interrupts);
2461 }
2462 
2463 /**
2464  * ce_watermark_cb_register(): register completion handler
2465  * @copyeng: CE_state representing the ce we are adding the behavior to
2466  * @fn_ptr: callback that the ce should use when processing watermark events
2467  *
2468  * Caller should guarantee that no watermark events are being processed before
2469  * switching the callback function.
2470  */
2471 void
2472 ce_watermark_cb_register(struct CE_handle *copyeng,
2473 			 CE_watermark_cb fn_ptr, void *CE_wm_context)
2474 {
2475 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2476 	struct hif_softc *scn = CE_state->scn;
2477 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2478 
2479 	CE_state->watermark_cb = fn_ptr;
2480 	CE_state->wm_context = CE_wm_context;
2481 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
2482 							0);
2483 	if (fn_ptr)
2484 		CE_state->misc_cbs = 1;
2485 }
2486 
2487 bool ce_get_rx_pending(struct hif_softc *scn)
2488 {
2489 	int CE_id;
2490 
2491 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2492 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2493 
2494 		if (qdf_atomic_read(&CE_state->rx_pending))
2495 			return true;
2496 	}
2497 
2498 	return false;
2499 }
2500 
2501 /**
2502  * ce_check_rx_pending() - ce_check_rx_pending
2503  * @CE_state: context of the copy engine to check
2504  *
2505  * Return: true if there per_engine_service
2506  *	didn't process all the rx descriptors.
2507  */
2508 bool ce_check_rx_pending(struct CE_state *CE_state)
2509 {
2510 	if (qdf_atomic_read(&CE_state->rx_pending))
2511 		return true;
2512 	else
2513 		return false;
2514 }
2515 qdf_export_symbol(ce_check_rx_pending);
2516 
2517 #ifdef IPA_OFFLOAD
2518 /**
2519  * ce_ipa_get_resource() - get uc resource on copyengine
2520  * @ce: copyengine context
2521  * @ce_sr: copyengine source ring resource info
2522  * @ce_sr_ring_size: copyengine source ring size
2523  * @ce_reg_paddr: copyengine register physical address
2524  *
2525  * Copy engine should release resource to micro controller
2526  * Micro controller needs
2527  *  - Copy engine source descriptor base address
2528  *  - Copy engine source descriptor size
2529  *  - PCI BAR address to access copy engine regiser
2530  *
2531  * Return: None
2532  */
2533 void ce_ipa_get_resource(struct CE_handle *ce,
2534 			 qdf_shared_mem_t **ce_sr,
2535 			 uint32_t *ce_sr_ring_size,
2536 			 qdf_dma_addr_t *ce_reg_paddr)
2537 {
2538 	struct CE_state *CE_state = (struct CE_state *)ce;
2539 	uint32_t ring_loop;
2540 	struct CE_src_desc *ce_desc;
2541 	qdf_dma_addr_t phy_mem_base;
2542 	struct hif_softc *scn = CE_state->scn;
2543 
2544 	if (CE_UNUSED == CE_state->state) {
2545 		*qdf_mem_get_dma_addr_ptr(scn->qdf_dev,
2546 			&CE_state->scn->ipa_ce_ring->mem_info) = 0;
2547 		*ce_sr_ring_size = 0;
2548 		return;
2549 	}
2550 
2551 	/* Update default value for descriptor */
2552 	for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
2553 	     ring_loop++) {
2554 		ce_desc = (struct CE_src_desc *)
2555 			  ((char *)CE_state->src_ring->base_addr_owner_space +
2556 			   ring_loop * (sizeof(struct CE_src_desc)));
2557 		CE_IPA_RING_INIT(ce_desc);
2558 	}
2559 
2560 	/* Get BAR address */
2561 	hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
2562 
2563 	*ce_sr = CE_state->scn->ipa_ce_ring;
2564 	*ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries *
2565 		sizeof(struct CE_src_desc));
2566 	*ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
2567 			SR_WR_INDEX_ADDRESS;
2568 }
2569 #endif /* IPA_OFFLOAD */
2570 
2571 static bool ce_check_int_watermark(struct CE_state *CE_state,
2572 				   unsigned int *flags)
2573 {
2574 	uint32_t ce_int_status;
2575 	uint32_t ctrl_addr = CE_state->ctrl_addr;
2576 	struct hif_softc *scn = CE_state->scn;
2577 
2578 	ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
2579 	if (ce_int_status & CE_WATERMARK_MASK) {
2580 		/* Convert HW IS bits to software flags */
2581 		*flags =
2582 			(ce_int_status & CE_WATERMARK_MASK) >>
2583 			CE_WM_SHFT;
2584 		return true;
2585 	}
2586 
2587 	return false;
2588 }
2589 
2590 static void ce_legacy_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
2591 			struct CE_ring_state *src_ring,
2592 			struct CE_attr *attr)
2593 {
2594 	uint32_t ctrl_addr;
2595 	uint64_t dma_addr;
2596 
2597 	QDF_ASSERT(ce_id < scn->ce_count);
2598 	ctrl_addr = CE_BASE_ADDRESS(ce_id);
2599 
2600 	src_ring->hw_index =
2601 		CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2602 	src_ring->sw_index = src_ring->hw_index;
2603 	src_ring->write_index =
2604 		CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2605 	dma_addr = src_ring->base_addr_CE_space;
2606 	CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
2607 			(uint32_t)(dma_addr & 0xFFFFFFFF));
2608 
2609 	/* if SR_BA_ADDRESS_HIGH register exists */
2610 	if (is_register_supported(SR_BA_ADDRESS_HIGH)) {
2611 		uint32_t tmp;
2612 
2613 		tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
2614 				scn, ctrl_addr);
2615 		tmp &= ~0x1F;
2616 		dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
2617 		CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
2618 				ctrl_addr, (uint32_t)dma_addr);
2619 	}
2620 	CE_SRC_RING_SZ_SET(scn, ctrl_addr, src_ring->nentries);
2621 	CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
2622 #ifdef BIG_ENDIAN_HOST
2623 	/* Enable source ring byte swap for big endian host */
2624 	CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
2625 #endif
2626 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
2627 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, src_ring->nentries);
2628 
2629 }
2630 
2631 static void ce_legacy_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
2632 				struct CE_ring_state *dest_ring,
2633 				struct CE_attr *attr)
2634 {
2635 	uint32_t ctrl_addr;
2636 	uint64_t dma_addr;
2637 
2638 	QDF_ASSERT(ce_id < scn->ce_count);
2639 	ctrl_addr = CE_BASE_ADDRESS(ce_id);
2640 	dest_ring->sw_index =
2641 		CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2642 	dest_ring->write_index =
2643 		CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2644 	dma_addr = dest_ring->base_addr_CE_space;
2645 	CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
2646 			(uint32_t)(dma_addr & 0xFFFFFFFF));
2647 
2648 	/* if DR_BA_ADDRESS_HIGH exists */
2649 	if (is_register_supported(DR_BA_ADDRESS_HIGH)) {
2650 		uint32_t tmp;
2651 
2652 		tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
2653 				ctrl_addr);
2654 		tmp &= ~0x1F;
2655 		dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
2656 		CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
2657 				ctrl_addr, (uint32_t)dma_addr);
2658 	}
2659 
2660 	CE_DEST_RING_SZ_SET(scn, ctrl_addr, dest_ring->nentries);
2661 #ifdef BIG_ENDIAN_HOST
2662 	/* Enable Dest ring byte swap for big endian host */
2663 	CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
2664 #endif
2665 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
2666 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, dest_ring->nentries);
2667 }
2668 
2669 static uint32_t ce_get_desc_size_legacy(uint8_t ring_type)
2670 {
2671 	switch (ring_type) {
2672 	case CE_RING_SRC:
2673 		return sizeof(struct CE_src_desc);
2674 	case CE_RING_DEST:
2675 		return sizeof(struct CE_dest_desc);
2676 	case CE_RING_STATUS:
2677 		qdf_assert(0);
2678 		return 0;
2679 	default:
2680 		return 0;
2681 	}
2682 
2683 	return 0;
2684 }
2685 
2686 static int ce_ring_setup_legacy(struct hif_softc *scn, uint8_t ring_type,
2687 		uint32_t ce_id, struct CE_ring_state *ring,
2688 		struct CE_attr *attr)
2689 {
2690 	int status = Q_TARGET_ACCESS_BEGIN(scn);
2691 
2692 	if (status < 0)
2693 		goto out;
2694 
2695 
2696 	switch (ring_type) {
2697 	case CE_RING_SRC:
2698 		ce_legacy_src_ring_setup(scn, ce_id, ring, attr);
2699 		break;
2700 	case CE_RING_DEST:
2701 		ce_legacy_dest_ring_setup(scn, ce_id, ring, attr);
2702 		break;
2703 	case CE_RING_STATUS:
2704 	default:
2705 		qdf_assert(0);
2706 		break;
2707 	}
2708 
2709 	Q_TARGET_ACCESS_END(scn);
2710 out:
2711 	return status;
2712 }
2713 
2714 static void ce_prepare_shadow_register_v2_cfg_legacy(struct hif_softc *scn,
2715 			    struct pld_shadow_reg_v2_cfg **shadow_config,
2716 			    int *num_shadow_registers_configured)
2717 {
2718 	*num_shadow_registers_configured = 0;
2719 	*shadow_config = NULL;
2720 }
2721 
2722 struct ce_ops ce_service_legacy = {
2723 	.ce_get_desc_size = ce_get_desc_size_legacy,
2724 	.ce_ring_setup = ce_ring_setup_legacy,
2725 	.ce_sendlist_send = ce_sendlist_send_legacy,
2726 	.ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_legacy,
2727 	.ce_revoke_recv_next = ce_revoke_recv_next_legacy,
2728 	.ce_cancel_send_next = ce_cancel_send_next_legacy,
2729 	.ce_recv_buf_enqueue = ce_recv_buf_enqueue_legacy,
2730 	.ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_legacy,
2731 	.ce_send_nolock = ce_send_nolock_legacy,
2732 	.watermark_int = ce_check_int_watermark,
2733 	.ce_completed_send_next_nolock = ce_completed_send_next_nolock_legacy,
2734 	.ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_legacy,
2735 	.ce_send_entries_done_nolock = ce_send_entries_done_nolock_legacy,
2736 	.ce_prepare_shadow_register_v2_cfg =
2737 		ce_prepare_shadow_register_v2_cfg_legacy,
2738 };
2739 
2740 
2741 struct ce_ops *ce_services_legacy()
2742 {
2743 	return &ce_service_legacy;
2744 }
2745 
2746 #if HIF_CE_DEBUG_DATA_BUF
2747 /**
2748  * hif_dump_desc_data_buf() - record ce descriptor events
2749  * @buf: buffer to copy to
2750  * @pos: Current position till which the buf is filled
2751  * @data: Data to be copied
2752  * @data_len: Length of the data to be copied
2753  */
2754 static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos,
2755 					uint8_t *data, uint32_t data_len)
2756 {
2757 	pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n",
2758 			CE_DEBUG_MAX_DATA_BUF_SIZE);
2759 
2760 	if ((data_len > 0) && data) {
2761 		if (data_len < 16) {
2762 			hex_dump_to_buffer(data,
2763 						CE_DEBUG_DATA_PER_ROW,
2764 						16, 1, buf + pos,
2765 						(ssize_t)PAGE_SIZE - pos,
2766 						false);
2767 			pos += CE_DEBUG_PRINT_BUF_SIZE(data_len);
2768 			pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
2769 		} else {
2770 			uint32_t rows = (data_len / 16) + 1;
2771 			uint32_t row = 0;
2772 
2773 			for (row = 0; row < rows; row++) {
2774 				hex_dump_to_buffer(data + (row * 16),
2775 							CE_DEBUG_DATA_PER_ROW,
2776 							16, 1, buf + pos,
2777 							(ssize_t)PAGE_SIZE
2778 							- pos, false);
2779 				pos +=
2780 				CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW);
2781 				pos += snprintf(buf + pos, PAGE_SIZE - pos,
2782 						"\n");
2783 			}
2784 		}
2785 	}
2786 
2787 	return pos;
2788 }
2789 #endif
2790 
2791 /*
2792  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
2793  * for defined here
2794  */
2795 #if HIF_CE_DEBUG_DATA_BUF
2796 static const char *ce_event_type_to_str(enum hif_ce_event_type type)
2797 {
2798 	switch (type) {
2799 	case HIF_RX_DESC_POST:
2800 		return "HIF_RX_DESC_POST";
2801 	case HIF_RX_DESC_COMPLETION:
2802 		return "HIF_RX_DESC_COMPLETION";
2803 	case HIF_TX_GATHER_DESC_POST:
2804 		return "HIF_TX_GATHER_DESC_POST";
2805 	case HIF_TX_DESC_POST:
2806 		return "HIF_TX_DESC_POST";
2807 	case HIF_TX_DESC_SOFTWARE_POST:
2808 		return "HIF_TX_DESC_SOFTWARE_POST";
2809 	case HIF_TX_DESC_COMPLETION:
2810 		return "HIF_TX_DESC_COMPLETION";
2811 	case FAST_RX_WRITE_INDEX_UPDATE:
2812 		return "FAST_RX_WRITE_INDEX_UPDATE";
2813 	case FAST_RX_SOFTWARE_INDEX_UPDATE:
2814 		return "FAST_RX_SOFTWARE_INDEX_UPDATE";
2815 	case FAST_TX_WRITE_INDEX_UPDATE:
2816 		return "FAST_TX_WRITE_INDEX_UPDATE";
2817 	case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE:
2818 		return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE";
2819 	case FAST_TX_SOFTWARE_INDEX_UPDATE:
2820 		return "FAST_TX_SOFTWARE_INDEX_UPDATE";
2821 	case RESUME_WRITE_INDEX_UPDATE:
2822 		return "RESUME_WRITE_INDEX_UPDATE";
2823 	case HIF_IRQ_EVENT:
2824 		return "HIF_IRQ_EVENT";
2825 	case HIF_CE_TASKLET_ENTRY:
2826 		return "HIF_CE_TASKLET_ENTRY";
2827 	case HIF_CE_TASKLET_RESCHEDULE:
2828 		return "HIF_CE_TASKLET_RESCHEDULE";
2829 	case HIF_CE_TASKLET_EXIT:
2830 		return "HIF_CE_TASKLET_EXIT";
2831 	case HIF_CE_REAP_ENTRY:
2832 		return "HIF_CE_REAP_ENTRY";
2833 	case HIF_CE_REAP_EXIT:
2834 		return "HIF_CE_REAP_EXIT";
2835 	case NAPI_SCHEDULE:
2836 		return "NAPI_SCHEDULE";
2837 	case NAPI_POLL_ENTER:
2838 		return "NAPI_POLL_ENTER";
2839 	case NAPI_COMPLETE:
2840 		return "NAPI_COMPLETE";
2841 	case NAPI_POLL_EXIT:
2842 		return "NAPI_POLL_EXIT";
2843 	case HIF_RX_NBUF_ALLOC_FAILURE:
2844 		return "HIF_RX_NBUF_ALLOC_FAILURE";
2845 	case HIF_RX_NBUF_MAP_FAILURE:
2846 		return "HIF_RX_NBUF_MAP_FAILURE";
2847 	case HIF_RX_NBUF_ENQUEUE_FAILURE:
2848 		return "HIF_RX_NBUF_ENQUEUE_FAILURE";
2849 	default:
2850 		return "invalid";
2851 	}
2852 }
2853 
2854 /**
2855  * hif_dump_desc_event() - record ce descriptor events
2856  * @buf: Buffer to which to be copied
2857  * @ce_id: which ce is the event occurring on
2858  * @index: index that the descriptor was/will be at.
2859  */
2860 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf)
2861 {
2862 	struct hif_ce_desc_event *event;
2863 	uint64_t secs, usecs;
2864 	ssize_t len = 0;
2865 	struct ce_desc_hist *ce_hist = NULL;
2866 	struct hif_ce_desc_event *hist_ev = NULL;
2867 
2868 	if (!scn)
2869 		return -EINVAL;
2870 
2871 	ce_hist = &scn->hif_ce_desc_hist;
2872 
2873 	hist_ev =
2874 		(struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id];
2875 
2876 	if (!hist_ev) {
2877 		qdf_print("Low Memory\n");
2878 		return -EINVAL;
2879 	}
2880 
2881 	event = &hist_ev[ce_hist->hist_index];
2882 
2883 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
2884 		(ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
2885 		qdf_print("Invalid values\n");
2886 		return -EINVAL;
2887 	}
2888 
2889 	qdf_log_timestamp_to_secs(event->time, &secs, &usecs);
2890 
2891 	len += snprintf(buf, PAGE_SIZE - len,
2892 			"\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK",
2893 			secs, usecs, ce_hist->hist_id,
2894 			ce_event_type_to_str(event->type),
2895 			event->index, event->memory);
2896 #if HIF_CE_DEBUG_DATA_BUF
2897 	len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%d",
2898 			event->actual_data_len);
2899 #endif
2900 
2901 	len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: ");
2902 
2903 	hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc),
2904 				16, 1, buf + len,
2905 				(ssize_t)PAGE_SIZE - len, false);
2906 	len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc));
2907 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
2908 
2909 #if HIF_CE_DEBUG_DATA_BUF
2910 	if (ce_hist->data_enable[ce_hist->hist_id])
2911 		len = hif_dump_desc_data_buf(buf, len, event->data,
2912 						(event->actual_data_len <
2913 						 CE_DEBUG_MAX_DATA_BUF_SIZE) ?
2914 						event->actual_data_len :
2915 						CE_DEBUG_MAX_DATA_BUF_SIZE);
2916 #endif /*HIF_CE_DEBUG_DATA_BUF*/
2917 
2918 	len += snprintf(buf + len, PAGE_SIZE - len, "END\n");
2919 
2920 	return len;
2921 }
2922 
2923 /*
2924  * hif_store_desc_trace_buf_index() -
2925  * API to get the CE id and CE debug storage buffer index
2926  *
2927  * @dev: network device
2928  * @attr: sysfs attribute
2929  * @buf: data got from the user
2930  *
2931  * Return total length
2932  */
2933 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
2934 					const char *buf, size_t size)
2935 {
2936 	struct ce_desc_hist *ce_hist = NULL;
2937 
2938 	if (!scn)
2939 		return -EINVAL;
2940 
2941 	ce_hist = &scn->hif_ce_desc_hist;
2942 
2943 	if (!size) {
2944 		pr_err("%s: Invalid input buffer.\n", __func__);
2945 		return -EINVAL;
2946 	}
2947 
2948 	if (sscanf(buf, "%d %d", &ce_hist->hist_id,
2949 			&ce_hist->hist_index) != 2) {
2950 		pr_err("%s: Invalid input value.\n", __func__);
2951 		return -EINVAL;
2952 	}
2953 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
2954 	   (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
2955 		qdf_print("Invalid values\n");
2956 		return -EINVAL;
2957 	}
2958 
2959 	return size;
2960 }
2961 
2962 #endif  /*For MCL,  HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
2963 
2964 #if HIF_CE_DEBUG_DATA_BUF
2965 /*
2966  * hif_ce_en_desc_hist() -
2967  * API to enable recording the CE desc history
2968  *
2969  * @dev: network device
2970  * @attr: sysfs attribute
2971  * @buf: buffer to copy the data.
2972  *
2973  * Starts recording the ce desc history
2974  *
2975  * Return total length copied
2976  */
2977 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size)
2978 {
2979 	struct ce_desc_hist *ce_hist = NULL;
2980 	uint32_t cfg = 0;
2981 	uint32_t ce_id = 0;
2982 
2983 	if (!scn)
2984 		return -EINVAL;
2985 
2986 	ce_hist = &scn->hif_ce_desc_hist;
2987 
2988 	if (!size) {
2989 		pr_err("%s: Invalid input buffer.\n", __func__);
2990 		return -EINVAL;
2991 	}
2992 
2993 	if (sscanf(buf, "%d %d", &ce_id, &cfg) != 2) {
2994 		pr_err("%s: Invalid input: Enter CE Id<sp><1/0>.\n", __func__);
2995 		return -EINVAL;
2996 	}
2997 	if (ce_id >= CE_COUNT_MAX) {
2998 		qdf_print("Invalid value CE Id\n");
2999 		return -EINVAL;
3000 	}
3001 
3002 	if ((cfg > 1 || cfg < 0)) {
3003 		qdf_print("Invalid values: enter 0 or 1\n");
3004 		return -EINVAL;
3005 	}
3006 
3007 	if (!ce_hist->hist_ev[ce_id])
3008 		return -EINVAL;
3009 
3010 	qdf_mutex_acquire(&ce_dbg_datamem_lock[ce_id]);
3011 	if (cfg == 1) {
3012 		if (ce_hist->data_enable[ce_id] == 1) {
3013 			qdf_print("\nAlready Enabled\n");
3014 		} else {
3015 			if (alloc_mem_ce_debug_hist_data(scn, ce_id)
3016 							== QDF_STATUS_E_NOMEM){
3017 				ce_hist->data_enable[ce_id] = 0;
3018 				qdf_print("%s:Memory Alloc failed\n");
3019 			} else
3020 				ce_hist->data_enable[ce_id] = 1;
3021 		}
3022 	} else if (cfg == 0) {
3023 		if (ce_hist->data_enable[ce_id] == 0) {
3024 			qdf_print("\nAlready Disabled\n");
3025 		} else {
3026 			ce_hist->data_enable[ce_id] = 0;
3027 				free_mem_ce_debug_hist_data(scn, ce_id);
3028 		}
3029 	}
3030 	qdf_mutex_release(&ce_dbg_datamem_lock[ce_id]);
3031 
3032 	return size;
3033 }
3034 
3035 /*
3036  * hif_disp_ce_enable_desc_data_hist() -
3037  * API to display value of data_enable
3038  *
3039  * @dev: network device
3040  * @attr: sysfs attribute
3041  * @buf: buffer to copy the data.
3042  *
3043  * Return total length copied
3044  */
3045 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf)
3046 {
3047 	ssize_t len = 0;
3048 	uint32_t ce_id = 0;
3049 	struct ce_desc_hist *ce_hist = NULL;
3050 
3051 	if (!scn)
3052 		return -EINVAL;
3053 
3054 	ce_hist = &scn->hif_ce_desc_hist;
3055 
3056 	for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
3057 		len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n",
3058 				ce_id, ce_hist->data_enable[ce_id]);
3059 	}
3060 
3061 	return len;
3062 }
3063 #endif /* HIF_CE_DEBUG_DATA_BUF */
3064