xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service.c (revision 6ecd284e5a94a1c96e26d571dd47419ac305990d)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 #include "hif.h"
29 #include "hif_io32.h"
30 #include "ce_api.h"
31 #include "ce_main.h"
32 #include "ce_internal.h"
33 #include "ce_reg.h"
34 #include "qdf_lock.h"
35 #include "regtable.h"
36 #include "hif_main.h"
37 #include "hif_debug.h"
38 #include "hif_napi.h"
39 #include "qdf_module.h"
40 
41 #ifdef IPA_OFFLOAD
42 #ifdef QCA_WIFI_3_0
43 #define CE_IPA_RING_INIT(ce_desc)                       \
44 	do {                                            \
45 		ce_desc->gather = 0;                    \
46 		ce_desc->enable_11h = 0;                \
47 		ce_desc->meta_data_low = 0;             \
48 		ce_desc->packet_result_offset = 64;     \
49 		ce_desc->toeplitz_hash_enable = 0;      \
50 		ce_desc->addr_y_search_disable = 0;     \
51 		ce_desc->addr_x_search_disable = 0;     \
52 		ce_desc->misc_int_disable = 0;          \
53 		ce_desc->target_int_disable = 0;        \
54 		ce_desc->host_int_disable = 0;          \
55 		ce_desc->dest_byte_swap = 0;            \
56 		ce_desc->byte_swap = 0;                 \
57 		ce_desc->type = 2;                      \
58 		ce_desc->tx_classify = 1;               \
59 		ce_desc->buffer_addr_hi = 0;            \
60 		ce_desc->meta_data = 0;                 \
61 		ce_desc->nbytes = 128;                  \
62 	} while (0)
63 #else
64 #define CE_IPA_RING_INIT(ce_desc)                       \
65 	do {                                            \
66 		ce_desc->byte_swap = 0;                 \
67 		ce_desc->nbytes = 60;                   \
68 		ce_desc->gather = 0;                    \
69 	} while (0)
70 #endif /* QCA_WIFI_3_0 */
71 #endif /* IPA_OFFLOAD */
72 
73 #ifndef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
74 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr)				\
75 	do {                                            		\
76 		x = CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, addr); 	\
77 	} while (0);
78 #else
79 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr)
80 #endif
81 
82 static int war1_allow_sleep;
83 /* io32 write workaround */
84 static int hif_ce_war1;
85 
86 /**
87  * hif_ce_war_disable() - disable ce war gobally
88  */
89 void hif_ce_war_disable(void)
90 {
91 	hif_ce_war1 = 0;
92 }
93 
94 /**
95  * hif_ce_war_enable() - enable ce war gobally
96  */
97 void hif_ce_war_enable(void)
98 {
99 	hif_ce_war1 = 1;
100 }
101 
102 /*
103  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
104  * for defined here
105  */
106 #if HIF_CE_DEBUG_DATA_BUF
107 
108 #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1)
109 #define CE_DEBUG_DATA_PER_ROW 16
110 
111 qdf_mutex_t ce_dbg_datamem_lock[CE_COUNT_MAX];
112 static const char *ce_event_type_to_str(enum hif_ce_event_type type);
113 
114 /**
115  * get_next_record_index() - get the next record index
116  * @table_index: atomic index variable to increment
117  * @array_size: array size of the circular buffer
118  *
119  * Increment the atomic index and reserve the value.
120  * Takes care of buffer wrap.
121  * Guaranteed to be thread safe as long as fewer than array_size contexts
122  * try to access the array.  If there are more than array_size contexts
123  * trying to access the array, full locking of the recording process would
124  * be needed to have sane logging.
125  */
126 static int get_next_record_index(qdf_atomic_t *table_index, int array_size)
127 {
128 	int record_index = qdf_atomic_inc_return(table_index);
129 
130 	if (record_index == array_size)
131 		qdf_atomic_sub(array_size, table_index);
132 
133 	while (record_index >= array_size)
134 		record_index -= array_size;
135 	return record_index;
136 }
137 
138 #if HIF_CE_DEBUG_DATA_BUF
139 /**
140  * hif_ce_desc_data_record() - Record data pointed by the CE descriptor
141  * @event: structure detailing a ce event
142  * @len: length of the data
143  * Return:
144  */
145 static void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
146 {
147 	uint8_t *data = NULL;
148 
149 	if (!event->data)
150 		return;
151 
152 	if (event->memory && len > 0)
153 		data = qdf_nbuf_data((qdf_nbuf_t)event->memory);
154 
155 	event->actual_data_len = 0;
156 	qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE);
157 
158 	if (data && len > 0) {
159 		qdf_mem_copy(event->data, data,
160 				((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ?
161 				 len : CE_DEBUG_MAX_DATA_BUF_SIZE));
162 		event->actual_data_len = len;
163 	}
164 }
165 #endif
166 
167 /**
168  * hif_record_ce_desc_event() - record ce descriptor events
169  * @scn: hif_softc
170  * @ce_id: which ce is the event occuring on
171  * @type: what happened
172  * @descriptor: pointer to the descriptor posted/completed
173  * @memory: virtual address of buffer related to the descriptor
174  * @index: index that the descriptor was/will be at.
175  */
176 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
177 				enum hif_ce_event_type type,
178 				union ce_desc *descriptor,
179 				void *memory, int index,
180 				int len)
181 {
182 	int record_index;
183 	struct hif_ce_desc_event *event;
184 
185 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
186 	struct hif_ce_desc_event *hist_ev = NULL;
187 
188 	if (ce_id < CE_COUNT_MAX)
189 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
190 	else
191 		return;
192 
193 	if (ce_id >= CE_COUNT_MAX)
194 		return;
195 
196 	if (!ce_hist->enable[ce_id])
197 		return;
198 
199 	if (!hist_ev)
200 		return;
201 
202 	record_index = get_next_record_index(
203 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
204 
205 	event = &hist_ev[record_index];
206 
207 	event->type = type;
208 	event->time = qdf_get_log_timestamp();
209 
210 	if (descriptor != NULL) {
211 		qdf_mem_copy(&event->descriptor, descriptor, sizeof(union ce_desc));
212 	} else {
213 		qdf_mem_zero(&event->descriptor, sizeof(union ce_desc));
214 	}
215 
216 	event->memory = memory;
217 	event->index = index;
218 
219 #if HIF_CE_DEBUG_DATA_BUF
220 	if (ce_hist->data_enable[ce_id])
221 		hif_ce_desc_data_record(event, len);
222 #endif
223 }
224 qdf_export_symbol(hif_record_ce_desc_event);
225 
226 /**
227  * ce_init_ce_desc_event_log() - initialize the ce event log
228  * @ce_id: copy engine id for which we are initializing the log
229  * @size: size of array to dedicate
230  *
231  * Currently the passed size is ignored in favor of a precompiled value.
232  */
233 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size)
234 {
235 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
236 	qdf_atomic_init(&ce_hist->history_index[ce_id]);
237 	qdf_mutex_create(&ce_dbg_datamem_lock[ce_id]);
238 }
239 
240 /**
241  * ce_deinit_ce_desc_event_log() - deinitialize the ce event log
242  * @ce_id: copy engine id for which we are deinitializing the log
243  *
244  */
245 inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
246 {
247 	qdf_mutex_destroy(&ce_dbg_datamem_lock[ce_id]);
248 }
249 
250 #else /* Note: For MCL, (HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
251 void hif_record_ce_desc_event(struct hif_softc *scn,
252 		int ce_id, enum hif_ce_event_type type,
253 		union ce_desc *descriptor, void *memory,
254 		int index, int len)
255 {
256 }
257 qdf_export_symbol(hif_record_ce_desc_event);
258 
259 inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id,
260 					int size)
261 {
262 }
263 
264 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
265 {
266 }
267 #endif /* Note: for MCL, HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
268 
269 #ifdef NAPI_YIELD_BUDGET_BASED
270 bool hif_ce_service_should_yield(struct hif_softc *scn,
271 				 struct CE_state *ce_state)
272 {
273 	bool yield =  hif_max_num_receives_reached(scn, ce_state->receive_count);
274 	return yield;
275 }
276 #else
277 /**
278  * hif_ce_service_should_yield() - return true if the service is hogging the cpu
279  * @scn: hif context
280  * @ce_state: context of the copy engine being serviced
281  *
282  * Return: true if the service should yield
283  */
284 bool hif_ce_service_should_yield(struct hif_softc *scn,
285 				 struct CE_state *ce_state)
286 {
287 	bool yield, time_limit_reached, rxpkt_thresh_reached = 0;
288 
289 	time_limit_reached =
290 		sched_clock() > ce_state->ce_service_yield_time ? 1 : 0;
291 
292 	if (!time_limit_reached)
293 		rxpkt_thresh_reached = hif_max_num_receives_reached
294 					(scn, ce_state->receive_count);
295 
296 	yield =  time_limit_reached || rxpkt_thresh_reached;
297 
298 	if (yield && ce_state->htt_rx_data)
299 		hif_napi_update_yield_stats(ce_state,
300 					    time_limit_reached,
301 					    rxpkt_thresh_reached);
302 	return yield;
303 }
304 #endif
305 /*
306  * Support for Copy Engine hardware, which is mainly used for
307  * communication between Host and Target over a PCIe interconnect.
308  */
309 
310 /*
311  * A single CopyEngine (CE) comprises two "rings":
312  *   a source ring
313  *   a destination ring
314  *
315  * Each ring consists of a number of descriptors which specify
316  * an address, length, and meta-data.
317  *
318  * Typically, one side of the PCIe interconnect (Host or Target)
319  * controls one ring and the other side controls the other ring.
320  * The source side chooses when to initiate a transfer and it
321  * chooses what to send (buffer address, length). The destination
322  * side keeps a supply of "anonymous receive buffers" available and
323  * it handles incoming data as it arrives (when the destination
324  * recieves an interrupt).
325  *
326  * The sender may send a simple buffer (address/length) or it may
327  * send a small list of buffers.  When a small list is sent, hardware
328  * "gathers" these and they end up in a single destination buffer
329  * with a single interrupt.
330  *
331  * There are several "contexts" managed by this layer -- more, it
332  * may seem -- than should be needed. These are provided mainly for
333  * maximum flexibility and especially to facilitate a simpler HIF
334  * implementation. There are per-CopyEngine recv, send, and watermark
335  * contexts. These are supplied by the caller when a recv, send,
336  * or watermark handler is established and they are echoed back to
337  * the caller when the respective callbacks are invoked. There is
338  * also a per-transfer context supplied by the caller when a buffer
339  * (or sendlist) is sent and when a buffer is enqueued for recv.
340  * These per-transfer contexts are echoed back to the caller when
341  * the buffer is sent/received.
342  * Target TX harsh result toeplitz_hash_result
343  */
344 
345 /*
346  * Guts of ce_send, used by both ce_send and ce_sendlist_send.
347  * The caller takes responsibility for any needed locking.
348  */
349 
350 static
351 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
352 				   u32 ctrl_addr, unsigned int write_index)
353 {
354 	if (hif_ce_war1) {
355 		void __iomem *indicator_addr;
356 
357 		indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
358 
359 		if (!war1_allow_sleep
360 		    && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
361 			hif_write32_mb(indicator_addr,
362 				      (CDC_WAR_MAGIC_STR | write_index));
363 		} else {
364 			unsigned long irq_flags;
365 
366 			local_irq_save(irq_flags);
367 			hif_write32_mb(indicator_addr, 1);
368 
369 			/*
370 			 * PCIE write waits for ACK in IPQ8K, there is no
371 			 * need to read back value.
372 			 */
373 			(void)hif_read32_mb(indicator_addr);
374 			(void)hif_read32_mb(indicator_addr); /* conservative */
375 
376 			CE_SRC_RING_WRITE_IDX_SET(scn,
377 						  ctrl_addr, write_index);
378 
379 			hif_write32_mb(indicator_addr, 0);
380 			local_irq_restore(irq_flags);
381 		}
382 	} else {
383 		CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
384 	}
385 }
386 
387 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
388 /**
389  * ce_validate_nbytes() - validate nbytes for slub builds on tx descriptors
390  * @nbytes: nbytes value being written into a send descriptor
391  * @ce_state: context of the copy engine
392 
393  * nbytes should be non-zero and less than max configured for the copy engine
394  *
395  * Return: none
396  */
397 static void ce_validate_nbytes(uint32_t nbytes, struct CE_state *ce_state)
398 {
399 	if (nbytes <= 0 || nbytes > ce_state->src_sz_max)
400 		QDF_BUG(0);
401 }
402 #else
403 static void ce_validate_nbytes(uint32_t nbytes, struct CE_state *ce_state)
404 {
405 }
406 #endif
407 
408 static int
409 ce_send_nolock_legacy(struct CE_handle *copyeng,
410 			   void *per_transfer_context,
411 			   qdf_dma_addr_t buffer,
412 			   uint32_t nbytes,
413 			   uint32_t transfer_id,
414 			   uint32_t flags,
415 			   uint32_t user_flags)
416 {
417 	int status;
418 	struct CE_state *CE_state = (struct CE_state *)copyeng;
419 	struct CE_ring_state *src_ring = CE_state->src_ring;
420 	uint32_t ctrl_addr = CE_state->ctrl_addr;
421 	unsigned int nentries_mask = src_ring->nentries_mask;
422 	unsigned int sw_index = src_ring->sw_index;
423 	unsigned int write_index = src_ring->write_index;
424 	uint64_t dma_addr = buffer;
425 	struct hif_softc *scn = CE_state->scn;
426 
427 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
428 		return QDF_STATUS_E_FAILURE;
429 	if (unlikely(CE_RING_DELTA(nentries_mask,
430 				write_index, sw_index - 1) <= 0)) {
431 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
432 		Q_TARGET_ACCESS_END(scn);
433 		return QDF_STATUS_E_FAILURE;
434 	}
435 	{
436 		enum hif_ce_event_type event_type;
437 		struct CE_src_desc *src_ring_base =
438 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
439 		struct CE_src_desc *shadow_base =
440 			(struct CE_src_desc *)src_ring->shadow_base;
441 		struct CE_src_desc *src_desc =
442 			CE_SRC_RING_TO_DESC(src_ring_base, write_index);
443 		struct CE_src_desc *shadow_src_desc =
444 			CE_SRC_RING_TO_DESC(shadow_base, write_index);
445 
446 		/* Update low 32 bits source descriptor address */
447 		shadow_src_desc->buffer_addr =
448 			(uint32_t)(dma_addr & 0xFFFFFFFF);
449 #ifdef QCA_WIFI_3_0
450 		shadow_src_desc->buffer_addr_hi =
451 			(uint32_t)((dma_addr >> 32) & 0x1F);
452 		user_flags |= shadow_src_desc->buffer_addr_hi;
453 		memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
454 			   sizeof(uint32_t));
455 #endif
456 		shadow_src_desc->target_int_disable = 0;
457 		shadow_src_desc->host_int_disable = 0;
458 
459 		shadow_src_desc->meta_data = transfer_id;
460 
461 		/*
462 		 * Set the swap bit if:
463 		 * typical sends on this CE are swapped (host is big-endian)
464 		 * and this send doesn't disable the swapping
465 		 * (data is not bytestream)
466 		 */
467 		shadow_src_desc->byte_swap =
468 			(((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
469 			 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
470 		shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
471 		shadow_src_desc->nbytes = nbytes;
472 		ce_validate_nbytes(nbytes, CE_state);
473 
474 		*src_desc = *shadow_src_desc;
475 
476 		src_ring->per_transfer_context[write_index] =
477 			per_transfer_context;
478 
479 		/* Update Source Ring Write Index */
480 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
481 
482 		/* WORKAROUND */
483 		if (shadow_src_desc->gather) {
484 			event_type = HIF_TX_GATHER_DESC_POST;
485 		} else if (qdf_unlikely(CE_state->state != CE_RUNNING)) {
486 			event_type = HIF_TX_DESC_SOFTWARE_POST;
487 			CE_state->state = CE_PENDING;
488 		} else {
489 			event_type = HIF_TX_DESC_POST;
490 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
491 						      write_index);
492 		}
493 
494 		/* src_ring->write index hasn't been updated event though
495 		 * the register has allready been written to.
496 		 */
497 		hif_record_ce_desc_event(scn, CE_state->id, event_type,
498 			(union ce_desc *) shadow_src_desc, per_transfer_context,
499 			src_ring->write_index, nbytes);
500 
501 		src_ring->write_index = write_index;
502 		status = QDF_STATUS_SUCCESS;
503 	}
504 	Q_TARGET_ACCESS_END(scn);
505 	return status;
506 }
507 
508 int
509 ce_send(struct CE_handle *copyeng,
510 		void *per_transfer_context,
511 		qdf_dma_addr_t buffer,
512 		uint32_t nbytes,
513 		uint32_t transfer_id,
514 		uint32_t flags,
515 		uint32_t user_flag)
516 {
517 	struct CE_state *CE_state = (struct CE_state *)copyeng;
518 	int status;
519 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
520 
521 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
522 	status = hif_state->ce_services->ce_send_nolock(copyeng,
523 			per_transfer_context, buffer, nbytes,
524 			transfer_id, flags, user_flag);
525 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
526 
527 	return status;
528 }
529 
530 unsigned int ce_sendlist_sizeof(void)
531 {
532 	return sizeof(struct ce_sendlist);
533 }
534 
535 void ce_sendlist_init(struct ce_sendlist *sendlist)
536 {
537 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
538 
539 	sl->num_items = 0;
540 }
541 
542 int
543 ce_sendlist_buf_add(struct ce_sendlist *sendlist,
544 					qdf_dma_addr_t buffer,
545 					uint32_t nbytes,
546 					uint32_t flags,
547 					uint32_t user_flags)
548 {
549 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
550 	unsigned int num_items = sl->num_items;
551 	struct ce_sendlist_item *item;
552 
553 	if (num_items >= CE_SENDLIST_ITEMS_MAX) {
554 		QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
555 		return QDF_STATUS_E_RESOURCES;
556 	}
557 
558 	item = &sl->item[num_items];
559 	item->send_type = CE_SIMPLE_BUFFER_TYPE;
560 	item->data = buffer;
561 	item->u.nbytes = nbytes;
562 	item->flags = flags;
563 	item->user_flags = user_flags;
564 	sl->num_items = num_items + 1;
565 	return QDF_STATUS_SUCCESS;
566 }
567 
568 int
569 ce_sendlist_send(struct CE_handle *copyeng,
570 		 void *per_transfer_context,
571 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
572 {
573 	struct CE_state *CE_state = (struct CE_state *)copyeng;
574 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
575 
576 	return hif_state->ce_services->ce_sendlist_send(copyeng,
577 			per_transfer_context, sendlist, transfer_id);
578 }
579 
580 static int
581 ce_sendlist_send_legacy(struct CE_handle *copyeng,
582 		 void *per_transfer_context,
583 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
584 {
585 	int status = -ENOMEM;
586 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
587 	struct CE_state *CE_state = (struct CE_state *)copyeng;
588 	struct CE_ring_state *src_ring = CE_state->src_ring;
589 	unsigned int nentries_mask = src_ring->nentries_mask;
590 	unsigned int num_items = sl->num_items;
591 	unsigned int sw_index;
592 	unsigned int write_index;
593 	struct hif_softc *scn = CE_state->scn;
594 
595 	QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
596 
597 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
598 
599 	if (CE_state->scn->fastpath_mode_on && CE_state->htt_tx_data &&
600 	    Q_TARGET_ACCESS_BEGIN(scn) == 0) {
601 		src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR(
602 					       scn, CE_state->ctrl_addr);
603 		Q_TARGET_ACCESS_END(scn);
604 	}
605 
606 	sw_index = src_ring->sw_index;
607 	write_index = src_ring->write_index;
608 
609 	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
610 	    num_items) {
611 		struct ce_sendlist_item *item;
612 		int i;
613 
614 		/* handle all but the last item uniformly */
615 		for (i = 0; i < num_items - 1; i++) {
616 			item = &sl->item[i];
617 			/* TBDXXX: Support extensible sendlist_types? */
618 			QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
619 			status = ce_send_nolock_legacy(copyeng,
620 				CE_SENDLIST_ITEM_CTXT,
621 				(qdf_dma_addr_t) item->data,
622 				item->u.nbytes, transfer_id,
623 				item->flags | CE_SEND_FLAG_GATHER,
624 				item->user_flags);
625 			QDF_ASSERT(status == QDF_STATUS_SUCCESS);
626 		}
627 		/* provide valid context pointer for final item */
628 		item = &sl->item[i];
629 		/* TBDXXX: Support extensible sendlist_types? */
630 		QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
631 		status = ce_send_nolock_legacy(copyeng, per_transfer_context,
632 					(qdf_dma_addr_t) item->data,
633 					item->u.nbytes,
634 					transfer_id, item->flags,
635 					item->user_flags);
636 		QDF_ASSERT(status == QDF_STATUS_SUCCESS);
637 		QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
638 					QDF_NBUF_TX_PKT_CE);
639 		DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
640 			QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
641 			QDF_TRACE_DEFAULT_PDEV_ID,
642 			(uint8_t *)&(((qdf_nbuf_t)per_transfer_context)->data),
643 			sizeof(((qdf_nbuf_t)per_transfer_context)->data),
644 			QDF_TX));
645 	} else {
646 		/*
647 		 * Probably not worth the additional complexity to support
648 		 * partial sends with continuation or notification.  We expect
649 		 * to use large rings and small sendlists. If we can't handle
650 		 * the entire request at once, punt it back to the caller.
651 		 */
652 	}
653 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
654 
655 	return status;
656 }
657 
658 #ifdef WLAN_FEATURE_FASTPATH
659 #ifdef QCA_WIFI_3_0
660 static inline void
661 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
662 		      uint64_t dma_addr,
663 		      uint32_t user_flags)
664 {
665 	shadow_src_desc->buffer_addr_hi =
666 			(uint32_t)((dma_addr >> 32) & 0x1F);
667 	user_flags |= shadow_src_desc->buffer_addr_hi;
668 	memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
669 			sizeof(uint32_t));
670 }
671 #else
672 static inline void
673 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
674 		      uint64_t dma_addr,
675 		      uint32_t user_flags)
676 {
677 }
678 #endif
679 
680 #define SLOTS_PER_DATAPATH_TX 2
681 
682 /**
683  * ce_send_fast() CE layer Tx buffer posting function
684  * @copyeng: copy engine handle
685  * @msdu: msdu to be sent
686  * @transfer_id: transfer_id
687  * @download_len: packet download length
688  *
689  * Assumption : Called with an array of MSDU's
690  * Function:
691  * For each msdu in the array
692  * 1. Check no. of available entries
693  * 2. Create src ring entries (allocated in consistent memory
694  * 3. Write index to h/w
695  *
696  * Return: No. of packets that could be sent
697  */
698 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
699 		 unsigned int transfer_id, uint32_t download_len)
700 {
701 	struct CE_state *ce_state = (struct CE_state *)copyeng;
702 	struct hif_softc *scn = ce_state->scn;
703 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
704 	struct CE_ring_state *src_ring = ce_state->src_ring;
705 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
706 	unsigned int nentries_mask = src_ring->nentries_mask;
707 	unsigned int write_index;
708 	unsigned int sw_index;
709 	unsigned int frag_len;
710 	uint64_t dma_addr;
711 	uint32_t user_flags;
712 	enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE;
713 	bool ok_to_send = true;
714 
715 	/*
716 	 * Create a log assuming the call will go through, and if not, we would
717 	 * add an error trace as well.
718 	 * Please add the same failure log for any additional error paths.
719 	 */
720 	DPTRACE(qdf_dp_trace(msdu,
721 			QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
722 			QDF_TRACE_DEFAULT_PDEV_ID,
723 			qdf_nbuf_data_addr(msdu),
724 			sizeof(qdf_nbuf_data(msdu)), QDF_TX));
725 
726 	qdf_spin_lock_bh(&ce_state->ce_index_lock);
727 
728 	/*
729 	 * Request runtime PM resume if it has already suspended and make
730 	 * sure there is no PCIe link access.
731 	 */
732 	if (hif_pm_runtime_get(hif_hdl) != 0)
733 		ok_to_send = false;
734 
735 	if (ok_to_send) {
736 		Q_TARGET_ACCESS_BEGIN(scn);
737 		DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
738 	}
739 
740 	write_index = src_ring->write_index;
741 	sw_index = src_ring->sw_index;
742 	hif_record_ce_desc_event(scn, ce_state->id,
743 				FAST_TX_SOFTWARE_INDEX_UPDATE,
744 				NULL, NULL, sw_index, 0);
745 
746 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
747 			 < SLOTS_PER_DATAPATH_TX)) {
748 		HIF_ERROR("Source ring full, required %d, available %d",
749 		      SLOTS_PER_DATAPATH_TX,
750 		      CE_RING_DELTA(nentries_mask, write_index, sw_index - 1));
751 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
752 		if (ok_to_send)
753 			Q_TARGET_ACCESS_END(scn);
754 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
755 
756 		DPTRACE(qdf_dp_trace(NULL,
757 				QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD,
758 				QDF_TRACE_DEFAULT_PDEV_ID,
759 				NULL, 0, QDF_TX));
760 
761 		return 0;
762 	}
763 
764 	{
765 		struct CE_src_desc *src_ring_base =
766 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
767 		struct CE_src_desc *shadow_base =
768 			(struct CE_src_desc *)src_ring->shadow_base;
769 		struct CE_src_desc *src_desc =
770 			CE_SRC_RING_TO_DESC(src_ring_base, write_index);
771 		struct CE_src_desc *shadow_src_desc =
772 			CE_SRC_RING_TO_DESC(shadow_base, write_index);
773 
774 		hif_pm_runtime_get_noresume(hif_hdl);
775 
776 		/*
777 		 * First fill out the ring descriptor for the HTC HTT frame
778 		 * header. These are uncached writes. Should we use a local
779 		 * structure instead?
780 		 */
781 		/* HTT/HTC header can be passed as a argument */
782 		dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
783 		shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
784 							  0xFFFFFFFF);
785 		user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
786 		ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
787 			shadow_src_desc->meta_data = transfer_id;
788 		shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
789 		ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
790 		download_len -= shadow_src_desc->nbytes;
791 		/*
792 		 * HTC HTT header is a word stream, so byte swap if CE byte
793 		 * swap enabled
794 		 */
795 		shadow_src_desc->byte_swap = ((ce_state->attr_flags &
796 					CE_ATTR_BYTE_SWAP_DATA) != 0);
797 		/* For the first one, it still does not need to write */
798 		shadow_src_desc->gather = 1;
799 		*src_desc = *shadow_src_desc;
800 		/* By default we could initialize the transfer context to this
801 		 * value
802 		 */
803 		src_ring->per_transfer_context[write_index] =
804 			CE_SENDLIST_ITEM_CTXT;
805 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
806 
807 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
808 		shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
809 		/*
810 		 * Now fill out the ring descriptor for the actual data
811 		 * packet
812 		 */
813 		dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
814 		shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
815 							  0xFFFFFFFF);
816 		/*
817 		 * Clear packet offset for all but the first CE desc.
818 		 */
819 		user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
820 		ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
821 		shadow_src_desc->meta_data = transfer_id;
822 
823 		/* get actual packet length */
824 		frag_len = qdf_nbuf_get_frag_len(msdu, 1);
825 
826 		/* download remaining bytes of payload */
827 		shadow_src_desc->nbytes =  download_len;
828 		ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
829 		if (shadow_src_desc->nbytes > frag_len)
830 			shadow_src_desc->nbytes = frag_len;
831 
832 		/*  Data packet is a byte stream, so disable byte swap */
833 		shadow_src_desc->byte_swap = 0;
834 		/* For the last one, gather is not set */
835 		shadow_src_desc->gather    = 0;
836 		*src_desc = *shadow_src_desc;
837 		src_ring->per_transfer_context[write_index] = msdu;
838 
839 		hif_record_ce_desc_event(scn, ce_state->id, type,
840 					(union ce_desc *)src_desc,
841 				src_ring->per_transfer_context[write_index],
842 					write_index, shadow_src_desc->nbytes);
843 
844 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
845 
846 		DPTRACE(qdf_dp_trace(msdu,
847 			QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
848 			QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(msdu),
849 			sizeof(qdf_nbuf_data(msdu)), QDF_TX));
850 	}
851 
852 	src_ring->write_index = write_index;
853 
854 	if (ok_to_send) {
855 		if (qdf_likely(ce_state->state == CE_RUNNING)) {
856 			type = FAST_TX_WRITE_INDEX_UPDATE;
857 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
858 				write_index);
859 			Q_TARGET_ACCESS_END(scn);
860 		} else
861 			ce_state->state = CE_PENDING;
862 		hif_pm_runtime_put(hif_hdl);
863 	}
864 
865 	qdf_spin_unlock_bh(&ce_state->ce_index_lock);
866 
867 	/* sent 1 packet */
868 	return 1;
869 }
870 
871 /**
872  * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
873  * @scn: Handle to HIF context
874  *
875  * Return: true if fastpath is enabled else false.
876  */
877 static bool ce_is_fastpath_enabled(struct hif_softc *scn)
878 {
879 	return scn->fastpath_mode_on;
880 }
881 
882 /**
883  * ce_is_fastpath_handler_registered() - return true for datapath CEs and if
884  * fastpath is enabled.
885  * @ce_state: handle to copy engine
886  *
887  * Return: true if fastpath handler is registered for datapath CE.
888  */
889 static bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
890 {
891 	if (ce_state->fastpath_handler)
892 		return true;
893 	else
894 		return false;
895 }
896 
897 
898 #else
899 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
900 {
901 	return false;
902 }
903 
904 static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
905 {
906 	return false;
907 }
908 #endif /* WLAN_FEATURE_FASTPATH */
909 
910 #ifndef AH_NEED_TX_DATA_SWAP
911 #define AH_NEED_TX_DATA_SWAP 0
912 #endif
913 
914 /**
915  * ce_batch_send() - sends bunch of msdus at once
916  * @ce_tx_hdl : pointer to CE handle
917  * @msdu : list of msdus to be sent
918  * @transfer_id : transfer id
919  * @len : Downloaded length
920  * @sendhead : sendhead
921  *
922  * Assumption : Called with an array of MSDU's
923  * Function:
924  * For each msdu in the array
925  * 1. Send each msdu
926  * 2. Increment write index accordinlgy.
927  *
928  * Return: list of msds not sent
929  */
930 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,  qdf_nbuf_t msdu,
931 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
932 {
933 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
934 	struct hif_softc *scn = ce_state->scn;
935 	struct CE_ring_state *src_ring = ce_state->src_ring;
936 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
937 	/*  A_target_id_t targid = TARGID(scn);*/
938 
939 	uint32_t nentries_mask = src_ring->nentries_mask;
940 	uint32_t sw_index, write_index;
941 
942 	struct CE_src_desc *src_desc_base =
943 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
944 	uint32_t *src_desc;
945 
946 	struct CE_src_desc lsrc_desc = {0};
947 	int deltacount = 0;
948 	qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext;
949 
950 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
951 	sw_index = src_ring->sw_index;
952 	write_index = src_ring->write_index;
953 
954 	deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1);
955 
956 	while (msdu) {
957 		tempnext = qdf_nbuf_next(msdu);
958 
959 		if (deltacount < 2) {
960 			if (sendhead)
961 				return msdu;
962 			HIF_ERROR("%s: Out of descriptors", __func__);
963 			src_ring->write_index = write_index;
964 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
965 					write_index);
966 
967 			sw_index = src_ring->sw_index;
968 			write_index = src_ring->write_index;
969 
970 			deltacount = CE_RING_DELTA(nentries_mask, write_index,
971 					sw_index-1);
972 			if (freelist == NULL) {
973 				freelist = msdu;
974 				hfreelist = msdu;
975 			} else {
976 				qdf_nbuf_set_next(freelist, msdu);
977 				freelist = msdu;
978 			}
979 			qdf_nbuf_set_next(msdu, NULL);
980 			msdu = tempnext;
981 			continue;
982 		}
983 
984 		src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base,
985 				write_index);
986 
987 		src_desc[0]   = qdf_nbuf_get_frag_paddr(msdu, 0);
988 
989 		lsrc_desc.meta_data = transfer_id;
990 		if (len  > msdu->len)
991 			len =  msdu->len;
992 		lsrc_desc.nbytes = len;
993 		/*  Data packet is a byte stream, so disable byte swap */
994 		lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
995 		lsrc_desc.gather    = 0; /*For the last one, gather is not set*/
996 
997 		src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
998 
999 
1000 		src_ring->per_transfer_context[write_index] = msdu;
1001 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
1002 
1003 		if (sendhead)
1004 			break;
1005 		qdf_nbuf_set_next(msdu, NULL);
1006 		msdu = tempnext;
1007 
1008 	}
1009 
1010 
1011 	src_ring->write_index = write_index;
1012 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
1013 
1014 	return hfreelist;
1015 }
1016 
1017 /**
1018  * ce_update_tx_ring() - Advance sw index.
1019  * @ce_tx_hdl : pointer to CE handle
1020  * @num_htt_cmpls : htt completions received.
1021  *
1022  * Function:
1023  * Increment the value of sw index of src ring
1024  * according to number of htt completions
1025  * received.
1026  *
1027  * Return: void
1028  */
1029 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
1030 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
1031 {
1032 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
1033 	struct CE_ring_state *src_ring = ce_state->src_ring;
1034 	uint32_t nentries_mask = src_ring->nentries_mask;
1035 	/*
1036 	 * Advance the s/w index:
1037 	 * This effectively simulates completing the CE ring descriptors
1038 	 */
1039 	src_ring->sw_index =
1040 		CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
1041 				num_htt_cmpls);
1042 }
1043 #else
1044 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
1045 {}
1046 #endif
1047 
1048 /**
1049  * ce_send_single() - sends
1050  * @ce_tx_hdl : pointer to CE handle
1051  * @msdu : msdu to be sent
1052  * @transfer_id : transfer id
1053  * @len : Downloaded length
1054  *
1055  * Function:
1056  * 1. Send one msdu
1057  * 2. Increment write index of src ring accordinlgy.
1058  *
1059  * Return: int: CE sent status
1060  */
1061 int ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
1062 		uint32_t transfer_id, u_int32_t len)
1063 {
1064 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
1065 	struct hif_softc *scn = ce_state->scn;
1066 	struct CE_ring_state *src_ring = ce_state->src_ring;
1067 	uint32_t ctrl_addr = ce_state->ctrl_addr;
1068 	/*A_target_id_t targid = TARGID(scn);*/
1069 
1070 	uint32_t nentries_mask = src_ring->nentries_mask;
1071 	uint32_t sw_index, write_index;
1072 
1073 	struct CE_src_desc *src_desc_base =
1074 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
1075 	uint32_t *src_desc;
1076 
1077 	struct CE_src_desc lsrc_desc = {0};
1078 	enum hif_ce_event_type event_type;
1079 
1080 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
1081 	sw_index = src_ring->sw_index;
1082 	write_index = src_ring->write_index;
1083 
1084 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index,
1085 					sw_index-1) < 1)) {
1086 		/* ol_tx_stats_inc_ring_error(sc->scn->pdev_txrx_handle, 1); */
1087 		HIF_ERROR("%s: ce send fail %d %d %d", __func__, nentries_mask,
1088 			  write_index, sw_index);
1089 		return 1;
1090 	}
1091 
1092 	src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index);
1093 
1094 	src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
1095 
1096 	lsrc_desc.meta_data = transfer_id;
1097 	lsrc_desc.nbytes = len;
1098 	/*  Data packet is a byte stream, so disable byte swap */
1099 	lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
1100 	lsrc_desc.gather    = 0; /* For the last one, gather is not set */
1101 
1102 	src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
1103 
1104 
1105 	src_ring->per_transfer_context[write_index] = msdu;
1106 
1107 	if (((struct CE_src_desc *)src_desc)->gather)
1108 		event_type = HIF_TX_GATHER_DESC_POST;
1109 	else if (qdf_unlikely(ce_state->state != CE_RUNNING))
1110 		event_type = HIF_TX_DESC_SOFTWARE_POST;
1111 	else
1112 		event_type = HIF_TX_DESC_POST;
1113 
1114 	hif_record_ce_desc_event(scn, ce_state->id, event_type,
1115 				(union ce_desc *)src_desc, msdu,
1116 				write_index, len);
1117 
1118 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
1119 
1120 	src_ring->write_index = write_index;
1121 
1122 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
1123 
1124 	return QDF_STATUS_SUCCESS;
1125 }
1126 
1127 /**
1128  * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
1129  * @coyeng: copy engine handle
1130  * @per_recv_context: virtual address of the nbuf
1131  * @buffer: physical address of the nbuf
1132  *
1133  * Return: 0 if the buffer is enqueued
1134  */
1135 int
1136 ce_recv_buf_enqueue(struct CE_handle *copyeng,
1137 		    void *per_recv_context, qdf_dma_addr_t buffer)
1138 {
1139 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1140 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
1141 
1142 	return hif_state->ce_services->ce_recv_buf_enqueue(copyeng,
1143 			per_recv_context, buffer);
1144 }
1145 
1146 /**
1147  * ce_recv_buf_enqueue_legacy() - enqueue a recv buffer into a copy engine
1148  * @coyeng: copy engine handle
1149  * @per_recv_context: virtual address of the nbuf
1150  * @buffer: physical address of the nbuf
1151  *
1152  * Return: 0 if the buffer is enqueued
1153  */
1154 static int
1155 ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng,
1156 		    void *per_recv_context, qdf_dma_addr_t buffer)
1157 {
1158 	int status;
1159 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1160 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1161 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1162 	unsigned int nentries_mask = dest_ring->nentries_mask;
1163 	unsigned int write_index;
1164 	unsigned int sw_index;
1165 	uint64_t dma_addr = buffer;
1166 	struct hif_softc *scn = CE_state->scn;
1167 
1168 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1169 	write_index = dest_ring->write_index;
1170 	sw_index = dest_ring->sw_index;
1171 
1172 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1173 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1174 		return -EIO;
1175 	}
1176 
1177 	if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
1178 	    (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) {
1179 		struct CE_dest_desc *dest_ring_base =
1180 			(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1181 		struct CE_dest_desc *dest_desc =
1182 			CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
1183 
1184 		/* Update low 32 bit destination descriptor */
1185 		dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
1186 #ifdef QCA_WIFI_3_0
1187 		dest_desc->buffer_addr_hi =
1188 			(uint32_t)((dma_addr >> 32) & 0x1F);
1189 #endif
1190 		dest_desc->nbytes = 0;
1191 
1192 		dest_ring->per_transfer_context[write_index] =
1193 			per_recv_context;
1194 
1195 		hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_POST,
1196 				(union ce_desc *) dest_desc, per_recv_context,
1197 				write_index, 0);
1198 
1199 		/* Update Destination Ring Write Index */
1200 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
1201 		if (write_index != sw_index) {
1202 			CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
1203 			dest_ring->write_index = write_index;
1204 		}
1205 		status = QDF_STATUS_SUCCESS;
1206 	} else
1207 		status = QDF_STATUS_E_FAILURE;
1208 
1209 	Q_TARGET_ACCESS_END(scn);
1210 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1211 	return status;
1212 }
1213 
1214 void
1215 ce_send_watermarks_set(struct CE_handle *copyeng,
1216 		       unsigned int low_alert_nentries,
1217 		       unsigned int high_alert_nentries)
1218 {
1219 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1220 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1221 	struct hif_softc *scn = CE_state->scn;
1222 
1223 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
1224 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
1225 }
1226 
1227 void
1228 ce_recv_watermarks_set(struct CE_handle *copyeng,
1229 		       unsigned int low_alert_nentries,
1230 		       unsigned int high_alert_nentries)
1231 {
1232 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1233 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1234 	struct hif_softc *scn = CE_state->scn;
1235 
1236 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
1237 				low_alert_nentries);
1238 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
1239 				high_alert_nentries);
1240 }
1241 
1242 unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
1243 {
1244 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1245 	struct CE_ring_state *src_ring = CE_state->src_ring;
1246 	unsigned int nentries_mask = src_ring->nentries_mask;
1247 	unsigned int sw_index;
1248 	unsigned int write_index;
1249 
1250 	qdf_spin_lock(&CE_state->ce_index_lock);
1251 	sw_index = src_ring->sw_index;
1252 	write_index = src_ring->write_index;
1253 	qdf_spin_unlock(&CE_state->ce_index_lock);
1254 
1255 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
1256 }
1257 
1258 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
1259 {
1260 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1261 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1262 	unsigned int nentries_mask = dest_ring->nentries_mask;
1263 	unsigned int sw_index;
1264 	unsigned int write_index;
1265 
1266 	qdf_spin_lock(&CE_state->ce_index_lock);
1267 	sw_index = dest_ring->sw_index;
1268 	write_index = dest_ring->write_index;
1269 	qdf_spin_unlock(&CE_state->ce_index_lock);
1270 
1271 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
1272 }
1273 
1274 /*
1275  * Guts of ce_send_entries_done.
1276  * The caller takes responsibility for any necessary locking.
1277  */
1278 static unsigned int
1279 ce_send_entries_done_nolock_legacy(struct hif_softc *scn,
1280 			    struct CE_state *CE_state)
1281 {
1282 	struct CE_ring_state *src_ring = CE_state->src_ring;
1283 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1284 	unsigned int nentries_mask = src_ring->nentries_mask;
1285 	unsigned int sw_index;
1286 	unsigned int read_index;
1287 
1288 	sw_index = src_ring->sw_index;
1289 	read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
1290 
1291 	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
1292 }
1293 
1294 unsigned int ce_send_entries_done(struct CE_handle *copyeng)
1295 {
1296 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1297 	unsigned int nentries;
1298 	struct hif_softc *scn = CE_state->scn;
1299 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1300 
1301 	qdf_spin_lock(&CE_state->ce_index_lock);
1302 	nentries = hif_state->ce_services->ce_send_entries_done_nolock(
1303 						CE_state->scn, CE_state);
1304 	qdf_spin_unlock(&CE_state->ce_index_lock);
1305 
1306 	return nentries;
1307 }
1308 
1309 /*
1310  * Guts of ce_recv_entries_done.
1311  * The caller takes responsibility for any necessary locking.
1312  */
1313 static unsigned int
1314 ce_recv_entries_done_nolock_legacy(struct hif_softc *scn,
1315 			    struct CE_state *CE_state)
1316 {
1317 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1318 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1319 	unsigned int nentries_mask = dest_ring->nentries_mask;
1320 	unsigned int sw_index;
1321 	unsigned int read_index;
1322 
1323 	sw_index = dest_ring->sw_index;
1324 	read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
1325 
1326 	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
1327 }
1328 
1329 unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
1330 {
1331 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1332 	unsigned int nentries;
1333 	struct hif_softc *scn = CE_state->scn;
1334 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1335 
1336 	qdf_spin_lock(&CE_state->ce_index_lock);
1337 	nentries = hif_state->ce_services->ce_recv_entries_done_nolock(
1338 						CE_state->scn, CE_state);
1339 	qdf_spin_unlock(&CE_state->ce_index_lock);
1340 
1341 	return nentries;
1342 }
1343 
1344 /*
1345  * Guts of ce_completed_recv_next.
1346  * The caller takes responsibility for any necessary locking.
1347  */
1348 static int
1349 ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state,
1350 			      void **per_CE_contextp,
1351 			      void **per_transfer_contextp,
1352 			      qdf_dma_addr_t *bufferp,
1353 			      unsigned int *nbytesp,
1354 			      unsigned int *transfer_idp,
1355 			      unsigned int *flagsp)
1356 {
1357 	int status;
1358 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1359 	unsigned int nentries_mask = dest_ring->nentries_mask;
1360 	unsigned int sw_index = dest_ring->sw_index;
1361 	struct hif_softc *scn = CE_state->scn;
1362 	struct CE_dest_desc *dest_ring_base =
1363 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1364 	struct CE_dest_desc *dest_desc =
1365 		CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
1366 	int nbytes;
1367 	struct CE_dest_desc dest_desc_info;
1368 	/*
1369 	 * By copying the dest_desc_info element to local memory, we could
1370 	 * avoid extra memory read from non-cachable memory.
1371 	 */
1372 	dest_desc_info =  *dest_desc;
1373 	nbytes = dest_desc_info.nbytes;
1374 	if (nbytes == 0) {
1375 		/*
1376 		 * This closes a relatively unusual race where the Host
1377 		 * sees the updated DRRI before the update to the
1378 		 * corresponding descriptor has completed. We treat this
1379 		 * as a descriptor that is not yet done.
1380 		 */
1381 		status = QDF_STATUS_E_FAILURE;
1382 		goto done;
1383 	}
1384 
1385 	hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
1386 			(union ce_desc *) dest_desc,
1387 			dest_ring->per_transfer_context[sw_index],
1388 			sw_index, 0);
1389 
1390 	dest_desc->nbytes = 0;
1391 
1392 	/* Return data from completed destination descriptor */
1393 	*bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
1394 	*nbytesp = nbytes;
1395 	*transfer_idp = dest_desc_info.meta_data;
1396 	*flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
1397 
1398 	if (per_CE_contextp)
1399 		*per_CE_contextp = CE_state->recv_context;
1400 
1401 	if (per_transfer_contextp) {
1402 		*per_transfer_contextp =
1403 			dest_ring->per_transfer_context[sw_index];
1404 	}
1405 	dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
1406 
1407 	/* Update sw_index */
1408 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1409 	dest_ring->sw_index = sw_index;
1410 	status = QDF_STATUS_SUCCESS;
1411 
1412 done:
1413 	return status;
1414 }
1415 
1416 int
1417 ce_completed_recv_next(struct CE_handle *copyeng,
1418 		       void **per_CE_contextp,
1419 		       void **per_transfer_contextp,
1420 		       qdf_dma_addr_t *bufferp,
1421 		       unsigned int *nbytesp,
1422 		       unsigned int *transfer_idp, unsigned int *flagsp)
1423 {
1424 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1425 	int status;
1426 	struct hif_softc *scn = CE_state->scn;
1427 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1428 	struct ce_ops *ce_services;
1429 
1430 	ce_services = hif_state->ce_services;
1431 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1432 	status =
1433 		ce_services->ce_completed_recv_next_nolock(CE_state,
1434 				per_CE_contextp, per_transfer_contextp, bufferp,
1435 					      nbytesp, transfer_idp, flagsp);
1436 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1437 
1438 	return status;
1439 }
1440 
1441 QDF_STATUS
1442 ce_revoke_recv_next(struct CE_handle *copyeng,
1443 		    void **per_CE_contextp,
1444 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
1445 {
1446 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1447 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
1448 
1449 	return hif_state->ce_services->ce_revoke_recv_next(copyeng,
1450 			per_CE_contextp, per_transfer_contextp, bufferp);
1451 }
1452 /* NB: Modeled after ce_completed_recv_next_nolock */
1453 static QDF_STATUS
1454 ce_revoke_recv_next_legacy(struct CE_handle *copyeng,
1455 		    void **per_CE_contextp,
1456 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
1457 {
1458 	struct CE_state *CE_state;
1459 	struct CE_ring_state *dest_ring;
1460 	unsigned int nentries_mask;
1461 	unsigned int sw_index;
1462 	unsigned int write_index;
1463 	QDF_STATUS status;
1464 	struct hif_softc *scn;
1465 
1466 	CE_state = (struct CE_state *)copyeng;
1467 	dest_ring = CE_state->dest_ring;
1468 	if (!dest_ring)
1469 		return QDF_STATUS_E_FAILURE;
1470 
1471 	scn = CE_state->scn;
1472 	qdf_spin_lock(&CE_state->ce_index_lock);
1473 	nentries_mask = dest_ring->nentries_mask;
1474 	sw_index = dest_ring->sw_index;
1475 	write_index = dest_ring->write_index;
1476 	if (write_index != sw_index) {
1477 		struct CE_dest_desc *dest_ring_base =
1478 			(struct CE_dest_desc *)dest_ring->
1479 			    base_addr_owner_space;
1480 		struct CE_dest_desc *dest_desc =
1481 			CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
1482 
1483 		/* Return data from completed destination descriptor */
1484 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
1485 
1486 		if (per_CE_contextp)
1487 			*per_CE_contextp = CE_state->recv_context;
1488 
1489 		if (per_transfer_contextp) {
1490 			*per_transfer_contextp =
1491 				dest_ring->per_transfer_context[sw_index];
1492 		}
1493 		dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
1494 
1495 		/* Update sw_index */
1496 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1497 		dest_ring->sw_index = sw_index;
1498 		status = QDF_STATUS_SUCCESS;
1499 	} else {
1500 		status = QDF_STATUS_E_FAILURE;
1501 	}
1502 	qdf_spin_unlock(&CE_state->ce_index_lock);
1503 
1504 	return status;
1505 }
1506 
1507 /*
1508  * Guts of ce_completed_send_next.
1509  * The caller takes responsibility for any necessary locking.
1510  */
1511 static int
1512 ce_completed_send_next_nolock_legacy(struct CE_state *CE_state,
1513 			      void **per_CE_contextp,
1514 			      void **per_transfer_contextp,
1515 			      qdf_dma_addr_t *bufferp,
1516 			      unsigned int *nbytesp,
1517 			      unsigned int *transfer_idp,
1518 			      unsigned int *sw_idx,
1519 			      unsigned int *hw_idx,
1520 			      uint32_t *toeplitz_hash_result)
1521 {
1522 	int status = QDF_STATUS_E_FAILURE;
1523 	struct CE_ring_state *src_ring = CE_state->src_ring;
1524 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1525 	unsigned int nentries_mask = src_ring->nentries_mask;
1526 	unsigned int sw_index = src_ring->sw_index;
1527 	unsigned int read_index;
1528 	struct hif_softc *scn = CE_state->scn;
1529 
1530 	if (src_ring->hw_index == sw_index) {
1531 		/*
1532 		 * The SW completion index has caught up with the cached
1533 		 * version of the HW completion index.
1534 		 * Update the cached HW completion index to see whether
1535 		 * the SW has really caught up to the HW, or if the cached
1536 		 * value of the HW index has become stale.
1537 		 */
1538 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1539 			return QDF_STATUS_E_FAILURE;
1540 		src_ring->hw_index =
1541 			CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
1542 		if (Q_TARGET_ACCESS_END(scn) < 0)
1543 			return QDF_STATUS_E_FAILURE;
1544 	}
1545 	read_index = src_ring->hw_index;
1546 
1547 	if (sw_idx)
1548 		*sw_idx = sw_index;
1549 
1550 	if (hw_idx)
1551 		*hw_idx = read_index;
1552 
1553 	if ((read_index != sw_index) && (read_index != 0xffffffff)) {
1554 		struct CE_src_desc *shadow_base =
1555 			(struct CE_src_desc *)src_ring->shadow_base;
1556 		struct CE_src_desc *shadow_src_desc =
1557 			CE_SRC_RING_TO_DESC(shadow_base, sw_index);
1558 #ifdef QCA_WIFI_3_0
1559 		struct CE_src_desc *src_ring_base =
1560 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
1561 		struct CE_src_desc *src_desc =
1562 			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1563 #endif
1564 		hif_record_ce_desc_event(scn, CE_state->id,
1565 				HIF_TX_DESC_COMPLETION,
1566 				(union ce_desc *) shadow_src_desc,
1567 				src_ring->per_transfer_context[sw_index],
1568 				sw_index, shadow_src_desc->nbytes);
1569 
1570 		/* Return data from completed source descriptor */
1571 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
1572 		*nbytesp = shadow_src_desc->nbytes;
1573 		*transfer_idp = shadow_src_desc->meta_data;
1574 #ifdef QCA_WIFI_3_0
1575 		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
1576 #else
1577 		*toeplitz_hash_result = 0;
1578 #endif
1579 		if (per_CE_contextp)
1580 			*per_CE_contextp = CE_state->send_context;
1581 
1582 		if (per_transfer_contextp) {
1583 			*per_transfer_contextp =
1584 				src_ring->per_transfer_context[sw_index];
1585 		}
1586 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
1587 
1588 		/* Update sw_index */
1589 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1590 		src_ring->sw_index = sw_index;
1591 		status = QDF_STATUS_SUCCESS;
1592 	}
1593 
1594 	return status;
1595 }
1596 
1597 QDF_STATUS
1598 ce_cancel_send_next(struct CE_handle *copyeng,
1599 		void **per_CE_contextp,
1600 		void **per_transfer_contextp,
1601 		qdf_dma_addr_t *bufferp,
1602 		unsigned int *nbytesp,
1603 		unsigned int *transfer_idp,
1604 		uint32_t *toeplitz_hash_result)
1605 {
1606 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1607 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
1608 
1609 	return hif_state->ce_services->ce_cancel_send_next
1610 		(copyeng, per_CE_contextp, per_transfer_contextp,
1611 		 bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
1612 }
1613 
1614 /* NB: Modeled after ce_completed_send_next */
1615 static QDF_STATUS
1616 ce_cancel_send_next_legacy(struct CE_handle *copyeng,
1617 		void **per_CE_contextp,
1618 		void **per_transfer_contextp,
1619 		qdf_dma_addr_t *bufferp,
1620 		unsigned int *nbytesp,
1621 		unsigned int *transfer_idp,
1622 		uint32_t *toeplitz_hash_result)
1623 {
1624 	struct CE_state *CE_state;
1625 	struct CE_ring_state *src_ring;
1626 	unsigned int nentries_mask;
1627 	unsigned int sw_index;
1628 	unsigned int write_index;
1629 	QDF_STATUS status;
1630 	struct hif_softc *scn;
1631 
1632 	CE_state = (struct CE_state *)copyeng;
1633 	src_ring = CE_state->src_ring;
1634 	if (!src_ring)
1635 		return QDF_STATUS_E_FAILURE;
1636 
1637 	scn = CE_state->scn;
1638 	qdf_spin_lock(&CE_state->ce_index_lock);
1639 	nentries_mask = src_ring->nentries_mask;
1640 	sw_index = src_ring->sw_index;
1641 	write_index = src_ring->write_index;
1642 
1643 	if (write_index != sw_index) {
1644 		struct CE_src_desc *src_ring_base =
1645 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
1646 		struct CE_src_desc *src_desc =
1647 			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1648 
1649 		/* Return data from completed source descriptor */
1650 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1651 		*nbytesp = src_desc->nbytes;
1652 		*transfer_idp = src_desc->meta_data;
1653 #ifdef QCA_WIFI_3_0
1654 		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
1655 #else
1656 		*toeplitz_hash_result = 0;
1657 #endif
1658 
1659 		if (per_CE_contextp)
1660 			*per_CE_contextp = CE_state->send_context;
1661 
1662 		if (per_transfer_contextp) {
1663 			*per_transfer_contextp =
1664 				src_ring->per_transfer_context[sw_index];
1665 		}
1666 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
1667 
1668 		/* Update sw_index */
1669 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1670 		src_ring->sw_index = sw_index;
1671 		status = QDF_STATUS_SUCCESS;
1672 	} else {
1673 		status = QDF_STATUS_E_FAILURE;
1674 	}
1675 	qdf_spin_unlock(&CE_state->ce_index_lock);
1676 
1677 	return status;
1678 }
1679 
1680 /* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
1681 #define CE_WM_SHFT 1
1682 
1683 int
1684 ce_completed_send_next(struct CE_handle *copyeng,
1685 		       void **per_CE_contextp,
1686 		       void **per_transfer_contextp,
1687 		       qdf_dma_addr_t *bufferp,
1688 		       unsigned int *nbytesp,
1689 		       unsigned int *transfer_idp,
1690 		       unsigned int *sw_idx,
1691 		       unsigned int *hw_idx,
1692 		       unsigned int *toeplitz_hash_result)
1693 {
1694 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1695 	struct hif_softc *scn = CE_state->scn;
1696 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1697 	struct ce_ops *ce_services;
1698 	int status;
1699 
1700 	ce_services = hif_state->ce_services;
1701 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1702 	status =
1703 		ce_services->ce_completed_send_next_nolock(CE_state,
1704 					per_CE_contextp, per_transfer_contextp,
1705 					bufferp, nbytesp, transfer_idp, sw_idx,
1706 					      hw_idx, toeplitz_hash_result);
1707 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1708 
1709 	return status;
1710 }
1711 
1712 #ifdef ATH_11AC_TXCOMPACT
1713 /* CE engine descriptor reap
1714  * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
1715  * does recieve and reaping of completed descriptor ,
1716  * This function only handles reaping of Tx complete descriptor.
1717  * The Function is called from threshold reap  poll routine
1718  * hif_send_complete_check so should not countain recieve functionality
1719  * within it .
1720  */
1721 
1722 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
1723 {
1724 	void *CE_context;
1725 	void *transfer_context;
1726 	qdf_dma_addr_t buf;
1727 	unsigned int nbytes;
1728 	unsigned int id;
1729 	unsigned int sw_idx, hw_idx;
1730 	uint32_t toeplitz_hash_result;
1731 	struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
1732 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1733 
1734 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1735 		return;
1736 
1737 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
1738 			NULL, NULL, 0, 0);
1739 
1740 	/* Since this function is called from both user context and
1741 	 * tasklet context the spinlock has to lock the bottom halves.
1742 	 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
1743 	 * enabled in TX polling mode. If this is not the case, more
1744 	 * bottom halve spin lock changes are needed. Due to data path
1745 	 * performance concern, after internal discussion we've decided
1746 	 * to make minimum change, i.e., only address the issue occured
1747 	 * in this function. The possible negative effect of this minimum
1748 	 * change is that, in the future, if some other function will also
1749 	 * be opened to let the user context to use, those cases need to be
1750 	 * addressed by change spin_lock to spin_lock_bh also.
1751 	 */
1752 
1753 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1754 
1755 	if (CE_state->send_cb) {
1756 		{
1757 			struct ce_ops *ce_services = hif_state->ce_services;
1758 			/* Pop completed send buffers and call the
1759 			 * registered send callback for each
1760 			 */
1761 			while (ce_services->ce_completed_send_next_nolock
1762 				 (CE_state, &CE_context,
1763 				  &transfer_context, &buf,
1764 				  &nbytes, &id, &sw_idx, &hw_idx,
1765 				  &toeplitz_hash_result) ==
1766 				  QDF_STATUS_SUCCESS) {
1767 				if (ce_id != CE_HTT_H2T_MSG) {
1768 					qdf_spin_unlock_bh(
1769 						&CE_state->ce_index_lock);
1770 					CE_state->send_cb(
1771 						(struct CE_handle *)
1772 						CE_state, CE_context,
1773 						transfer_context, buf,
1774 						nbytes, id, sw_idx, hw_idx,
1775 						toeplitz_hash_result);
1776 					qdf_spin_lock_bh(
1777 						&CE_state->ce_index_lock);
1778 				} else {
1779 					struct HIF_CE_pipe_info *pipe_info =
1780 						(struct HIF_CE_pipe_info *)
1781 						CE_context;
1782 
1783 					qdf_spin_lock_bh(&pipe_info->
1784 						 completion_freeq_lock);
1785 					pipe_info->num_sends_allowed++;
1786 					qdf_spin_unlock_bh(&pipe_info->
1787 						   completion_freeq_lock);
1788 				}
1789 			}
1790 		}
1791 	}
1792 
1793 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1794 
1795 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
1796 			NULL, NULL, 0, 0);
1797 	Q_TARGET_ACCESS_END(scn);
1798 }
1799 
1800 #endif /*ATH_11AC_TXCOMPACT */
1801 
1802 /*
1803  * Number of times to check for any pending tx/rx completion on
1804  * a copy engine, this count should be big enough. Once we hit
1805  * this threashold we'll not check for any Tx/Rx comlpetion in same
1806  * interrupt handling. Note that this threashold is only used for
1807  * Rx interrupt processing, this can be used tor Tx as well if we
1808  * suspect any infinite loop in checking for pending Tx completion.
1809  */
1810 #define CE_TXRX_COMP_CHECK_THRESHOLD 20
1811 
1812 #ifdef WLAN_FEATURE_FASTPATH
1813 /**
1814  * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler
1815  * @ce_state: handle to copy engine state
1816  * @cmpl_msdus: Rx msdus
1817  * @num_cmpls: number of Rx msdus
1818  * @ctrl_addr: CE control address
1819  *
1820  * Return: None
1821  */
1822 static void ce_fastpath_rx_handle(struct CE_state *ce_state,
1823 				  qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls,
1824 				  uint32_t ctrl_addr)
1825 {
1826 	struct hif_softc *scn = ce_state->scn;
1827 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
1828 	uint32_t nentries_mask = dest_ring->nentries_mask;
1829 	uint32_t write_index;
1830 
1831 	qdf_spin_unlock(&ce_state->ce_index_lock);
1832 	(ce_state->fastpath_handler)(ce_state->context,	cmpl_msdus, num_cmpls);
1833 	qdf_spin_lock(&ce_state->ce_index_lock);
1834 
1835 	/* Update Destination Ring Write Index */
1836 	write_index = dest_ring->write_index;
1837 	write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
1838 
1839 	hif_record_ce_desc_event(scn, ce_state->id,
1840 			FAST_RX_WRITE_INDEX_UPDATE,
1841 			NULL, NULL, write_index, 0);
1842 
1843 	CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
1844 	dest_ring->write_index = write_index;
1845 }
1846 
1847 #ifdef CONFIG_SLUB_DEBUG_ON
1848 #define MSG_FLUSH_NUM 16
1849 #else /* PERF build */
1850 #define MSG_FLUSH_NUM 32
1851 #endif /* SLUB_DEBUG_ON */
1852 /**
1853  * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs
1854  * @scn: hif_context
1855  * @ce_id: Copy engine ID
1856  * 1) Go through the CE ring, and find the completions
1857  * 2) For valid completions retrieve context (nbuf) for per_transfer_context[]
1858  * 3) Unmap buffer & accumulate in an array.
1859  * 4) Call message handler when array is full or when exiting the handler
1860  *
1861  * Return: void
1862  */
1863 
1864 static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
1865 {
1866 	struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
1867 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1868 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
1869 	struct CE_dest_desc *dest_ring_base =
1870 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1871 
1872 	uint32_t nentries_mask = dest_ring->nentries_mask;
1873 	uint32_t sw_index = dest_ring->sw_index;
1874 	uint32_t nbytes;
1875 	qdf_nbuf_t nbuf;
1876 	dma_addr_t paddr;
1877 	struct CE_dest_desc *dest_desc;
1878 	qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM];
1879 	uint32_t ctrl_addr = ce_state->ctrl_addr;
1880 	uint32_t nbuf_cmpl_idx = 0;
1881 	unsigned int more_comp_cnt = 0;
1882 
1883 more_data:
1884 	for (;;) {
1885 
1886 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base,
1887 						 sw_index);
1888 
1889 		/*
1890 		 * The following 2 reads are from non-cached memory
1891 		 */
1892 		nbytes = dest_desc->nbytes;
1893 
1894 		/* If completion is invalid, break */
1895 		if (qdf_unlikely(nbytes == 0))
1896 			break;
1897 
1898 
1899 		/*
1900 		 * Build the nbuf list from valid completions
1901 		 */
1902 		nbuf = dest_ring->per_transfer_context[sw_index];
1903 
1904 		/*
1905 		 * No lock is needed here, since this is the only thread
1906 		 * that accesses the sw_index
1907 		 */
1908 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1909 
1910 		/*
1911 		 * CAREFUL : Uncached write, but still less expensive,
1912 		 * since most modern caches use "write-combining" to
1913 		 * flush multiple cache-writes all at once.
1914 		 */
1915 		dest_desc->nbytes = 0;
1916 
1917 		/*
1918 		 * Per our understanding this is not required on our
1919 		 * since we are doing the same cache invalidation
1920 		 * operation on the same buffer twice in succession,
1921 		 * without any modifiication to this buffer by CPU in
1922 		 * between.
1923 		 * However, this code with 2 syncs in succession has
1924 		 * been undergoing some testing at a customer site,
1925 		 * and seemed to be showing no problems so far. Would
1926 		 * like to validate from the customer, that this line
1927 		 * is really not required, before we remove this line
1928 		 * completely.
1929 		 */
1930 		paddr = QDF_NBUF_CB_PADDR(nbuf);
1931 
1932 		qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev, paddr,
1933 				(skb_end_pointer(nbuf) - (nbuf)->data),
1934 				DMA_FROM_DEVICE);
1935 
1936 		qdf_nbuf_put_tail(nbuf, nbytes);
1937 
1938 		qdf_assert_always(nbuf->data != NULL);
1939 
1940 		QDF_NBUF_CB_RX_CTX_ID(nbuf) =
1941 				hif_get_rx_ctx_id(ce_state->id, hif_hdl);
1942 		cmpl_msdus[nbuf_cmpl_idx++] = nbuf;
1943 
1944 		/*
1945 		 * we are not posting the buffers back instead
1946 		 * reusing the buffers
1947 		 */
1948 		if (nbuf_cmpl_idx == MSG_FLUSH_NUM) {
1949 			hif_record_ce_desc_event(scn, ce_state->id,
1950 						 FAST_RX_SOFTWARE_INDEX_UPDATE,
1951 						 NULL, NULL, sw_index, 0);
1952 			dest_ring->sw_index = sw_index;
1953 			ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1954 					      MSG_FLUSH_NUM, ctrl_addr);
1955 
1956 			ce_state->receive_count += MSG_FLUSH_NUM;
1957 			if (qdf_unlikely(hif_ce_service_should_yield(
1958 						scn, ce_state))) {
1959 				ce_state->force_break = 1;
1960 				qdf_atomic_set(&ce_state->rx_pending, 1);
1961 				return;
1962 			}
1963 
1964 			nbuf_cmpl_idx = 0;
1965 			more_comp_cnt = 0;
1966 		}
1967 	}
1968 
1969 	hif_record_ce_desc_event(scn, ce_state->id,
1970 				 FAST_RX_SOFTWARE_INDEX_UPDATE,
1971 				 NULL, NULL, sw_index, 0);
1972 
1973 	dest_ring->sw_index = sw_index;
1974 
1975 	/*
1976 	 * If there are not enough completions to fill the array,
1977 	 * just call the message handler here
1978 	 */
1979 	if (nbuf_cmpl_idx) {
1980 		ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1981 				      nbuf_cmpl_idx, ctrl_addr);
1982 
1983 		ce_state->receive_count += nbuf_cmpl_idx;
1984 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
1985 			ce_state->force_break = 1;
1986 			qdf_atomic_set(&ce_state->rx_pending, 1);
1987 			return;
1988 		}
1989 
1990 		/* check for more packets after upper layer processing */
1991 		nbuf_cmpl_idx = 0;
1992 		more_comp_cnt = 0;
1993 		goto more_data;
1994 	}
1995 	qdf_atomic_set(&ce_state->rx_pending, 0);
1996 	if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1997 		CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1998 					   HOST_IS_COPY_COMPLETE_MASK);
1999 	} else {
2000 		hif_err_rl("%s: target access is not allowed", __func__);
2001 		return;
2002 	}
2003 
2004 	if (ce_recv_entries_done_nolock_legacy(scn, ce_state)) {
2005 		if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
2006 			goto more_data;
2007 		} else {
2008 			HIF_ERROR("%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
2009 				  __func__, nentries_mask,
2010 				  ce_state->dest_ring->sw_index,
2011 				  CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr));
2012 		}
2013 	}
2014 #ifdef NAPI_YIELD_BUDGET_BASED
2015 	/* Caution : Before you modify this code, please refer hif_napi_poll function
2016 	to understand how napi_complete gets called and make the necessary changes
2017 	Force break has to be done till WIN disables the interrupt at source */
2018 	ce_state->force_break = 1;
2019 #endif
2020 }
2021 
2022 #else
2023 static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
2024 {
2025 }
2026 #endif /* WLAN_FEATURE_FASTPATH */
2027 
2028 /* Maximum amount of time in nano seconds before which the CE per engine service
2029  * should yield. ~1 jiffie.
2030  */
2031 #define CE_PER_ENGINE_SERVICE_MAX_YIELD_TIME_NS (10 * 1000 * 1000)
2032 
2033 /*
2034  * Guts of interrupt handler for per-engine interrupts on a particular CE.
2035  *
2036  * Invokes registered callbacks for recv_complete,
2037  * send_complete, and watermarks.
2038  *
2039  * Returns: number of messages processed
2040  */
2041 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
2042 {
2043 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2044 	uint32_t ctrl_addr = CE_state->ctrl_addr;
2045 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2046 	void *CE_context;
2047 	void *transfer_context;
2048 	qdf_dma_addr_t buf;
2049 	unsigned int nbytes;
2050 	unsigned int id;
2051 	unsigned int flags;
2052 	unsigned int more_comp_cnt = 0;
2053 	unsigned int more_snd_comp_cnt = 0;
2054 	unsigned int sw_idx, hw_idx;
2055 	uint32_t toeplitz_hash_result;
2056 	uint32_t mode = hif_get_conparam(scn);
2057 
2058 	if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data))
2059 		return CE_state->receive_count;
2060 
2061 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
2062 		HIF_ERROR("[premature rc=0]");
2063 		return 0; /* no work done */
2064 	}
2065 
2066 	/* Clear force_break flag and re-initialize receive_count to 0 */
2067 	CE_state->receive_count = 0;
2068 	CE_state->force_break = 0;
2069 	CE_state->ce_service_yield_time =
2070 		sched_clock() +
2071 		(unsigned long long)CE_PER_ENGINE_SERVICE_MAX_YIELD_TIME_NS;
2072 
2073 
2074 	qdf_spin_lock(&CE_state->ce_index_lock);
2075 	/*
2076 	 * With below check we make sure CE we are handling is datapath CE and
2077 	 * fastpath is enabled.
2078 	 */
2079 	if (ce_is_fastpath_handler_registered(CE_state)) {
2080 		/* For datapath only Rx CEs */
2081 		ce_per_engine_service_fast(scn, CE_id);
2082 		goto unlock_end;
2083 	}
2084 
2085 more_completions:
2086 	if (CE_state->recv_cb) {
2087 
2088 		/* Pop completed recv buffers and call
2089 		 * the registered recv callback for each
2090 		 */
2091 		while (hif_state->ce_services->ce_completed_recv_next_nolock
2092 				(CE_state, &CE_context, &transfer_context,
2093 				&buf, &nbytes, &id, &flags) ==
2094 				QDF_STATUS_SUCCESS) {
2095 			qdf_spin_unlock(&CE_state->ce_index_lock);
2096 			CE_state->recv_cb((struct CE_handle *)CE_state,
2097 					  CE_context, transfer_context, buf,
2098 					  nbytes, id, flags);
2099 
2100 			/*
2101 			 * EV #112693 -
2102 			 * [Peregrine][ES1][WB342][Win8x86][Performance]
2103 			 * BSoD_0x133 occurred in VHT80 UDP_DL
2104 			 * Break out DPC by force if number of loops in
2105 			 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
2106 			 * to avoid spending too long time in
2107 			 * DPC for each interrupt handling. Schedule another
2108 			 * DPC to avoid data loss if we had taken
2109 			 * force-break action before apply to Windows OS
2110 			 * only currently, Linux/MAC os can expand to their
2111 			 * platform if necessary
2112 			 */
2113 
2114 			/* Break the receive processes by
2115 			 * force if force_break set up
2116 			 */
2117 			if (qdf_unlikely(CE_state->force_break)) {
2118 				qdf_atomic_set(&CE_state->rx_pending, 1);
2119 				goto target_access_end;
2120 			}
2121 			qdf_spin_lock(&CE_state->ce_index_lock);
2122 		}
2123 	}
2124 
2125 	/*
2126 	 * Attention: We may experience potential infinite loop for below
2127 	 * While Loop during Sending Stress test.
2128 	 * Resolve the same way as Receive Case (Refer to EV #112693)
2129 	 */
2130 
2131 	if (CE_state->send_cb) {
2132 		/* Pop completed send buffers and call
2133 		 * the registered send callback for each
2134 		 */
2135 
2136 #ifdef ATH_11AC_TXCOMPACT
2137 		while (hif_state->ce_services->ce_completed_send_next_nolock
2138 			 (CE_state, &CE_context,
2139 			 &transfer_context, &buf, &nbytes,
2140 			 &id, &sw_idx, &hw_idx,
2141 			 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2142 
2143 			if (CE_id != CE_HTT_H2T_MSG ||
2144 			    QDF_IS_EPPING_ENABLED(mode)) {
2145 				qdf_spin_unlock(&CE_state->ce_index_lock);
2146 				CE_state->send_cb((struct CE_handle *)CE_state,
2147 						  CE_context, transfer_context,
2148 						  buf, nbytes, id, sw_idx,
2149 						  hw_idx, toeplitz_hash_result);
2150 				qdf_spin_lock(&CE_state->ce_index_lock);
2151 			} else {
2152 				struct HIF_CE_pipe_info *pipe_info =
2153 					(struct HIF_CE_pipe_info *)CE_context;
2154 
2155 				qdf_spin_lock(&pipe_info->
2156 					      completion_freeq_lock);
2157 				pipe_info->num_sends_allowed++;
2158 				qdf_spin_unlock(&pipe_info->
2159 						completion_freeq_lock);
2160 			}
2161 		}
2162 #else                           /*ATH_11AC_TXCOMPACT */
2163 		while (hif_state->ce_services->ce_completed_send_next_nolock
2164 			 (CE_state, &CE_context,
2165 			  &transfer_context, &buf, &nbytes,
2166 			  &id, &sw_idx, &hw_idx,
2167 			  &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2168 			qdf_spin_unlock(&CE_state->ce_index_lock);
2169 			CE_state->send_cb((struct CE_handle *)CE_state,
2170 				  CE_context, transfer_context, buf,
2171 				  nbytes, id, sw_idx, hw_idx,
2172 				  toeplitz_hash_result);
2173 			qdf_spin_lock(&CE_state->ce_index_lock);
2174 		}
2175 #endif /*ATH_11AC_TXCOMPACT */
2176 	}
2177 
2178 more_watermarks:
2179 	if (CE_state->misc_cbs) {
2180 		if (CE_state->watermark_cb &&
2181 				hif_state->ce_services->watermark_int(CE_state,
2182 					&flags)) {
2183 			qdf_spin_unlock(&CE_state->ce_index_lock);
2184 			/* Convert HW IS bits to software flags */
2185 			CE_state->watermark_cb((struct CE_handle *)CE_state,
2186 					CE_state->wm_context, flags);
2187 			qdf_spin_lock(&CE_state->ce_index_lock);
2188 		}
2189 	}
2190 
2191 	/*
2192 	 * Clear the misc interrupts (watermark) that were handled above,
2193 	 * and that will be checked again below.
2194 	 * Clear and check for copy-complete interrupts again, just in case
2195 	 * more copy completions happened while the misc interrupts were being
2196 	 * handled.
2197 	 */
2198 	if (!ce_srng_based(scn)) {
2199 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
2200 			CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
2201 					   CE_WATERMARK_MASK |
2202 					   HOST_IS_COPY_COMPLETE_MASK);
2203 		} else {
2204 			hif_err_rl("%s: target access is not allowed",
2205 				   __func__);
2206 			goto unlock_end;
2207 		}
2208 	}
2209 
2210 	/*
2211 	 * Now that per-engine interrupts are cleared, verify that
2212 	 * no recv interrupts arrive while processing send interrupts,
2213 	 * and no recv or send interrupts happened while processing
2214 	 * misc interrupts.Go back and check again.Keep checking until
2215 	 * we find no more events to process.
2216 	 */
2217 	if (CE_state->recv_cb &&
2218 		hif_state->ce_services->ce_recv_entries_done_nolock(scn,
2219 				CE_state)) {
2220 		if (QDF_IS_EPPING_ENABLED(mode) ||
2221 		    more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
2222 			goto more_completions;
2223 		} else {
2224 			if (!ce_srng_based(scn)) {
2225 				HIF_ERROR(
2226 					"%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
2227 					__func__,
2228 					CE_state->dest_ring->nentries_mask,
2229 					CE_state->dest_ring->sw_index,
2230 					CE_DEST_RING_READ_IDX_GET(scn,
2231 							  CE_state->ctrl_addr));
2232 			}
2233 		}
2234 	}
2235 
2236 	if (CE_state->send_cb &&
2237 		hif_state->ce_services->ce_send_entries_done_nolock(scn,
2238 				CE_state)) {
2239 		if (QDF_IS_EPPING_ENABLED(mode) ||
2240 		    more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
2241 			goto more_completions;
2242 		} else {
2243 			if (!ce_srng_based(scn)) {
2244 				HIF_ERROR(
2245 					"%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
2246 					__func__,
2247 					CE_state->src_ring->nentries_mask,
2248 					CE_state->src_ring->sw_index,
2249 					CE_SRC_RING_READ_IDX_GET(scn,
2250 							 CE_state->ctrl_addr));
2251 			}
2252 		}
2253 	}
2254 
2255 	if (CE_state->misc_cbs && CE_state->watermark_cb) {
2256 		if (hif_state->ce_services->watermark_int(CE_state, &flags))
2257 			goto more_watermarks;
2258 	}
2259 
2260 	qdf_atomic_set(&CE_state->rx_pending, 0);
2261 
2262 unlock_end:
2263 	qdf_spin_unlock(&CE_state->ce_index_lock);
2264 target_access_end:
2265 	if (Q_TARGET_ACCESS_END(scn) < 0)
2266 		HIF_ERROR("<--[premature rc=%d]", CE_state->receive_count);
2267 	return CE_state->receive_count;
2268 }
2269 qdf_export_symbol(ce_per_engine_service);
2270 
2271 /*
2272  * Handler for per-engine interrupts on ALL active CEs.
2273  * This is used in cases where the system is sharing a
2274  * single interrput for all CEs
2275  */
2276 
2277 void ce_per_engine_service_any(int irq, struct hif_softc *scn)
2278 {
2279 	int CE_id;
2280 	uint32_t intr_summary;
2281 
2282 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2283 		return;
2284 
2285 	if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
2286 		for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2287 			struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2288 
2289 			if (qdf_atomic_read(&CE_state->rx_pending)) {
2290 				qdf_atomic_set(&CE_state->rx_pending, 0);
2291 				ce_per_engine_service(scn, CE_id);
2292 			}
2293 		}
2294 
2295 		Q_TARGET_ACCESS_END(scn);
2296 		return;
2297 	}
2298 
2299 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
2300 
2301 	for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
2302 		if (intr_summary & (1 << CE_id))
2303 			intr_summary &= ~(1 << CE_id);
2304 		else
2305 			continue;       /* no intr pending on this CE */
2306 
2307 		ce_per_engine_service(scn, CE_id);
2308 	}
2309 
2310 	Q_TARGET_ACCESS_END(scn);
2311 }
2312 
2313 /*
2314  * Adjust interrupts for the copy complete handler.
2315  * If it's needed for either send or recv, then unmask
2316  * this interrupt; otherwise, mask it.
2317  *
2318  * Called with target_lock held.
2319  */
2320 static void
2321 ce_per_engine_handler_adjust_legacy(struct CE_state *CE_state,
2322 			     int disable_copy_compl_intr)
2323 {
2324 	uint32_t ctrl_addr = CE_state->ctrl_addr;
2325 	struct hif_softc *scn = CE_state->scn;
2326 
2327 	CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
2328 
2329 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2330 		return;
2331 
2332 	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
2333 		hif_err_rl("%s: target access is not allowed", __func__);
2334 		return;
2335 	}
2336 
2337 	if ((!disable_copy_compl_intr) &&
2338 	    (CE_state->send_cb || CE_state->recv_cb))
2339 		CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
2340 	else
2341 		CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
2342 
2343 	if (CE_state->watermark_cb)
2344 		CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
2345 	 else
2346 		CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
2347 	Q_TARGET_ACCESS_END(scn);
2348 }
2349 
2350 /*Iterate the CE_state list and disable the compl interrupt
2351  * if it has been registered already.
2352  */
2353 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
2354 {
2355 	int CE_id;
2356 
2357 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2358 		return;
2359 
2360 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2361 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2362 		uint32_t ctrl_addr = CE_state->ctrl_addr;
2363 
2364 		/* if the interrupt is currently enabled, disable it */
2365 		if (!CE_state->disable_copy_compl_intr
2366 		    && (CE_state->send_cb || CE_state->recv_cb))
2367 			CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
2368 
2369 		if (CE_state->watermark_cb)
2370 			CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
2371 	}
2372 	Q_TARGET_ACCESS_END(scn);
2373 }
2374 
2375 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
2376 {
2377 	int CE_id;
2378 
2379 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2380 		return;
2381 
2382 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2383 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2384 		uint32_t ctrl_addr = CE_state->ctrl_addr;
2385 
2386 		/*
2387 		 * If the CE is supposed to have copy complete interrupts
2388 		 * enabled (i.e. there a callback registered, and the
2389 		 * "disable" flag is not set), then re-enable the interrupt.
2390 		 */
2391 		if (!CE_state->disable_copy_compl_intr
2392 		    && (CE_state->send_cb || CE_state->recv_cb))
2393 			CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
2394 
2395 		if (CE_state->watermark_cb)
2396 			CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
2397 	}
2398 	Q_TARGET_ACCESS_END(scn);
2399 }
2400 
2401 /**
2402  * ce_send_cb_register(): register completion handler
2403  * @copyeng: CE_state representing the ce we are adding the behavior to
2404  * @fn_ptr: callback that the ce should use when processing tx completions
2405  * @disable_interrupts: if the interupts should be enabled or not.
2406  *
2407  * Caller should guarantee that no transactions are in progress before
2408  * switching the callback function.
2409  *
2410  * Registers the send context before the fn pointer so that if the cb is valid
2411  * the context should be valid.
2412  *
2413  * Beware that currently this function will enable completion interrupts.
2414  */
2415 void
2416 ce_send_cb_register(struct CE_handle *copyeng,
2417 		    ce_send_cb fn_ptr,
2418 		    void *ce_send_context, int disable_interrupts)
2419 {
2420 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2421 	struct hif_softc *scn;
2422 	struct HIF_CE_state *hif_state;
2423 
2424 	if (CE_state == NULL) {
2425 		HIF_ERROR("%s: Error CE state = NULL", __func__);
2426 		return;
2427 	}
2428 	scn = CE_state->scn;
2429 	hif_state = HIF_GET_CE_STATE(scn);
2430 	if (hif_state == NULL) {
2431 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
2432 		return;
2433 	}
2434 	CE_state->send_context = ce_send_context;
2435 	CE_state->send_cb = fn_ptr;
2436 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
2437 							disable_interrupts);
2438 }
2439 
2440 /**
2441  * ce_recv_cb_register(): register completion handler
2442  * @copyeng: CE_state representing the ce we are adding the behavior to
2443  * @fn_ptr: callback that the ce should use when processing rx completions
2444  * @disable_interrupts: if the interupts should be enabled or not.
2445  *
2446  * Registers the send context before the fn pointer so that if the cb is valid
2447  * the context should be valid.
2448  *
2449  * Caller should guarantee that no transactions are in progress before
2450  * switching the callback function.
2451  */
2452 void
2453 ce_recv_cb_register(struct CE_handle *copyeng,
2454 		    CE_recv_cb fn_ptr,
2455 		    void *CE_recv_context, int disable_interrupts)
2456 {
2457 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2458 	struct hif_softc *scn;
2459 	struct HIF_CE_state *hif_state;
2460 
2461 	if (CE_state == NULL) {
2462 		HIF_ERROR("%s: ERROR CE state = NULL", __func__);
2463 		return;
2464 	}
2465 	scn = CE_state->scn;
2466 	hif_state = HIF_GET_CE_STATE(scn);
2467 	if (hif_state == NULL) {
2468 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
2469 		return;
2470 	}
2471 	CE_state->recv_context = CE_recv_context;
2472 	CE_state->recv_cb = fn_ptr;
2473 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
2474 							disable_interrupts);
2475 }
2476 
2477 /**
2478  * ce_watermark_cb_register(): register completion handler
2479  * @copyeng: CE_state representing the ce we are adding the behavior to
2480  * @fn_ptr: callback that the ce should use when processing watermark events
2481  *
2482  * Caller should guarantee that no watermark events are being processed before
2483  * switching the callback function.
2484  */
2485 void
2486 ce_watermark_cb_register(struct CE_handle *copyeng,
2487 			 CE_watermark_cb fn_ptr, void *CE_wm_context)
2488 {
2489 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2490 	struct hif_softc *scn = CE_state->scn;
2491 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2492 
2493 	CE_state->watermark_cb = fn_ptr;
2494 	CE_state->wm_context = CE_wm_context;
2495 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
2496 							0);
2497 	if (fn_ptr)
2498 		CE_state->misc_cbs = 1;
2499 }
2500 
2501 bool ce_get_rx_pending(struct hif_softc *scn)
2502 {
2503 	int CE_id;
2504 
2505 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2506 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2507 
2508 		if (qdf_atomic_read(&CE_state->rx_pending))
2509 			return true;
2510 	}
2511 
2512 	return false;
2513 }
2514 
2515 /**
2516  * ce_check_rx_pending() - ce_check_rx_pending
2517  * @CE_state: context of the copy engine to check
2518  *
2519  * Return: true if there per_engine_service
2520  *	didn't process all the rx descriptors.
2521  */
2522 bool ce_check_rx_pending(struct CE_state *CE_state)
2523 {
2524 	if (qdf_atomic_read(&CE_state->rx_pending))
2525 		return true;
2526 	else
2527 		return false;
2528 }
2529 qdf_export_symbol(ce_check_rx_pending);
2530 
2531 #ifdef IPA_OFFLOAD
2532 /**
2533  * ce_ipa_get_resource() - get uc resource on copyengine
2534  * @ce: copyengine context
2535  * @ce_sr: copyengine source ring resource info
2536  * @ce_sr_ring_size: copyengine source ring size
2537  * @ce_reg_paddr: copyengine register physical address
2538  *
2539  * Copy engine should release resource to micro controller
2540  * Micro controller needs
2541  *  - Copy engine source descriptor base address
2542  *  - Copy engine source descriptor size
2543  *  - PCI BAR address to access copy engine regiser
2544  *
2545  * Return: None
2546  */
2547 void ce_ipa_get_resource(struct CE_handle *ce,
2548 			 qdf_shared_mem_t **ce_sr,
2549 			 uint32_t *ce_sr_ring_size,
2550 			 qdf_dma_addr_t *ce_reg_paddr)
2551 {
2552 	struct CE_state *CE_state = (struct CE_state *)ce;
2553 	uint32_t ring_loop;
2554 	struct CE_src_desc *ce_desc;
2555 	qdf_dma_addr_t phy_mem_base;
2556 	struct hif_softc *scn = CE_state->scn;
2557 
2558 	if (CE_UNUSED == CE_state->state) {
2559 		*qdf_mem_get_dma_addr_ptr(scn->qdf_dev,
2560 			&CE_state->scn->ipa_ce_ring->mem_info) = 0;
2561 		*ce_sr_ring_size = 0;
2562 		return;
2563 	}
2564 
2565 	/* Update default value for descriptor */
2566 	for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
2567 	     ring_loop++) {
2568 		ce_desc = (struct CE_src_desc *)
2569 			  ((char *)CE_state->src_ring->base_addr_owner_space +
2570 			   ring_loop * (sizeof(struct CE_src_desc)));
2571 		CE_IPA_RING_INIT(ce_desc);
2572 	}
2573 
2574 	/* Get BAR address */
2575 	hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
2576 
2577 	*ce_sr = CE_state->scn->ipa_ce_ring;
2578 	*ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries *
2579 		sizeof(struct CE_src_desc));
2580 	*ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
2581 			SR_WR_INDEX_ADDRESS;
2582 }
2583 #endif /* IPA_OFFLOAD */
2584 
2585 static bool ce_check_int_watermark(struct CE_state *CE_state,
2586 				   unsigned int *flags)
2587 {
2588 	uint32_t ce_int_status;
2589 	uint32_t ctrl_addr = CE_state->ctrl_addr;
2590 	struct hif_softc *scn = CE_state->scn;
2591 
2592 	ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
2593 	if (ce_int_status & CE_WATERMARK_MASK) {
2594 		/* Convert HW IS bits to software flags */
2595 		*flags =
2596 			(ce_int_status & CE_WATERMARK_MASK) >>
2597 			CE_WM_SHFT;
2598 		return true;
2599 	}
2600 
2601 	return false;
2602 }
2603 
2604 static void ce_legacy_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
2605 			struct CE_ring_state *src_ring,
2606 			struct CE_attr *attr)
2607 {
2608 	uint32_t ctrl_addr;
2609 	uint64_t dma_addr;
2610 
2611 	QDF_ASSERT(ce_id < scn->ce_count);
2612 	ctrl_addr = CE_BASE_ADDRESS(ce_id);
2613 
2614 	src_ring->hw_index =
2615 		CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2616 	src_ring->sw_index = src_ring->hw_index;
2617 	src_ring->write_index =
2618 		CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2619 	dma_addr = src_ring->base_addr_CE_space;
2620 	CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
2621 			(uint32_t)(dma_addr & 0xFFFFFFFF));
2622 
2623 	/* if SR_BA_ADDRESS_HIGH register exists */
2624 	if (is_register_supported(SR_BA_ADDRESS_HIGH)) {
2625 		uint32_t tmp;
2626 
2627 		tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
2628 				scn, ctrl_addr);
2629 		tmp &= ~0x1F;
2630 		dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
2631 		CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
2632 				ctrl_addr, (uint32_t)dma_addr);
2633 	}
2634 	CE_SRC_RING_SZ_SET(scn, ctrl_addr, src_ring->nentries);
2635 	CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
2636 #ifdef BIG_ENDIAN_HOST
2637 	/* Enable source ring byte swap for big endian host */
2638 	CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
2639 #endif
2640 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
2641 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, src_ring->nentries);
2642 
2643 }
2644 
2645 static void ce_legacy_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
2646 				struct CE_ring_state *dest_ring,
2647 				struct CE_attr *attr)
2648 {
2649 	uint32_t ctrl_addr;
2650 	uint64_t dma_addr;
2651 
2652 	QDF_ASSERT(ce_id < scn->ce_count);
2653 	ctrl_addr = CE_BASE_ADDRESS(ce_id);
2654 	dest_ring->sw_index =
2655 		CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2656 	dest_ring->write_index =
2657 		CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2658 	dma_addr = dest_ring->base_addr_CE_space;
2659 	CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
2660 			(uint32_t)(dma_addr & 0xFFFFFFFF));
2661 
2662 	/* if DR_BA_ADDRESS_HIGH exists */
2663 	if (is_register_supported(DR_BA_ADDRESS_HIGH)) {
2664 		uint32_t tmp;
2665 
2666 		tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
2667 				ctrl_addr);
2668 		tmp &= ~0x1F;
2669 		dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
2670 		CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
2671 				ctrl_addr, (uint32_t)dma_addr);
2672 	}
2673 
2674 	CE_DEST_RING_SZ_SET(scn, ctrl_addr, dest_ring->nentries);
2675 #ifdef BIG_ENDIAN_HOST
2676 	/* Enable Dest ring byte swap for big endian host */
2677 	CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
2678 #endif
2679 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
2680 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, dest_ring->nentries);
2681 }
2682 
2683 static uint32_t ce_get_desc_size_legacy(uint8_t ring_type)
2684 {
2685 	switch (ring_type) {
2686 	case CE_RING_SRC:
2687 		return sizeof(struct CE_src_desc);
2688 	case CE_RING_DEST:
2689 		return sizeof(struct CE_dest_desc);
2690 	case CE_RING_STATUS:
2691 		qdf_assert(0);
2692 		return 0;
2693 	default:
2694 		return 0;
2695 	}
2696 
2697 	return 0;
2698 }
2699 
2700 static int ce_ring_setup_legacy(struct hif_softc *scn, uint8_t ring_type,
2701 		uint32_t ce_id, struct CE_ring_state *ring,
2702 		struct CE_attr *attr)
2703 {
2704 	int status = Q_TARGET_ACCESS_BEGIN(scn);
2705 
2706 	if (status < 0)
2707 		goto out;
2708 
2709 
2710 	switch (ring_type) {
2711 	case CE_RING_SRC:
2712 		ce_legacy_src_ring_setup(scn, ce_id, ring, attr);
2713 		break;
2714 	case CE_RING_DEST:
2715 		ce_legacy_dest_ring_setup(scn, ce_id, ring, attr);
2716 		break;
2717 	case CE_RING_STATUS:
2718 	default:
2719 		qdf_assert(0);
2720 		break;
2721 	}
2722 
2723 	Q_TARGET_ACCESS_END(scn);
2724 out:
2725 	return status;
2726 }
2727 
2728 static void ce_prepare_shadow_register_v2_cfg_legacy(struct hif_softc *scn,
2729 			    struct pld_shadow_reg_v2_cfg **shadow_config,
2730 			    int *num_shadow_registers_configured)
2731 {
2732 	*num_shadow_registers_configured = 0;
2733 	*shadow_config = NULL;
2734 }
2735 
2736 struct ce_ops ce_service_legacy = {
2737 	.ce_get_desc_size = ce_get_desc_size_legacy,
2738 	.ce_ring_setup = ce_ring_setup_legacy,
2739 	.ce_sendlist_send = ce_sendlist_send_legacy,
2740 	.ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_legacy,
2741 	.ce_revoke_recv_next = ce_revoke_recv_next_legacy,
2742 	.ce_cancel_send_next = ce_cancel_send_next_legacy,
2743 	.ce_recv_buf_enqueue = ce_recv_buf_enqueue_legacy,
2744 	.ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_legacy,
2745 	.ce_send_nolock = ce_send_nolock_legacy,
2746 	.watermark_int = ce_check_int_watermark,
2747 	.ce_completed_send_next_nolock = ce_completed_send_next_nolock_legacy,
2748 	.ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_legacy,
2749 	.ce_send_entries_done_nolock = ce_send_entries_done_nolock_legacy,
2750 	.ce_prepare_shadow_register_v2_cfg =
2751 		ce_prepare_shadow_register_v2_cfg_legacy,
2752 };
2753 
2754 
2755 struct ce_ops *ce_services_legacy()
2756 {
2757 	return &ce_service_legacy;
2758 }
2759 
2760 #if HIF_CE_DEBUG_DATA_BUF
2761 /**
2762  * hif_dump_desc_data_buf() - record ce descriptor events
2763  * @buf: buffer to copy to
2764  * @pos: Current position till which the buf is filled
2765  * @data: Data to be copied
2766  * @data_len: Length of the data to be copied
2767  */
2768 static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos,
2769 					uint8_t *data, uint32_t data_len)
2770 {
2771 	pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n",
2772 			CE_DEBUG_MAX_DATA_BUF_SIZE);
2773 
2774 	if ((data_len > 0) && data) {
2775 		if (data_len < 16) {
2776 			hex_dump_to_buffer(data,
2777 						CE_DEBUG_DATA_PER_ROW,
2778 						16, 1, buf + pos,
2779 						(ssize_t)PAGE_SIZE - pos,
2780 						false);
2781 			pos += CE_DEBUG_PRINT_BUF_SIZE(data_len);
2782 			pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
2783 		} else {
2784 			uint32_t rows = (data_len / 16) + 1;
2785 			uint32_t row = 0;
2786 
2787 			for (row = 0; row < rows; row++) {
2788 				hex_dump_to_buffer(data + (row * 16),
2789 							CE_DEBUG_DATA_PER_ROW,
2790 							16, 1, buf + pos,
2791 							(ssize_t)PAGE_SIZE
2792 							- pos, false);
2793 				pos +=
2794 				CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW);
2795 				pos += snprintf(buf + pos, PAGE_SIZE - pos,
2796 						"\n");
2797 			}
2798 		}
2799 	}
2800 
2801 	return pos;
2802 }
2803 #endif
2804 
2805 /*
2806  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
2807  * for defined here
2808  */
2809 #if HIF_CE_DEBUG_DATA_BUF
2810 static const char *ce_event_type_to_str(enum hif_ce_event_type type)
2811 {
2812 	switch (type) {
2813 	case HIF_RX_DESC_POST:
2814 		return "HIF_RX_DESC_POST";
2815 	case HIF_RX_DESC_COMPLETION:
2816 		return "HIF_RX_DESC_COMPLETION";
2817 	case HIF_TX_GATHER_DESC_POST:
2818 		return "HIF_TX_GATHER_DESC_POST";
2819 	case HIF_TX_DESC_POST:
2820 		return "HIF_TX_DESC_POST";
2821 	case HIF_TX_DESC_SOFTWARE_POST:
2822 		return "HIF_TX_DESC_SOFTWARE_POST";
2823 	case HIF_TX_DESC_COMPLETION:
2824 		return "HIF_TX_DESC_COMPLETION";
2825 	case FAST_RX_WRITE_INDEX_UPDATE:
2826 		return "FAST_RX_WRITE_INDEX_UPDATE";
2827 	case FAST_RX_SOFTWARE_INDEX_UPDATE:
2828 		return "FAST_RX_SOFTWARE_INDEX_UPDATE";
2829 	case FAST_TX_WRITE_INDEX_UPDATE:
2830 		return "FAST_TX_WRITE_INDEX_UPDATE";
2831 	case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE:
2832 		return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE";
2833 	case FAST_TX_SOFTWARE_INDEX_UPDATE:
2834 		return "FAST_TX_SOFTWARE_INDEX_UPDATE";
2835 	case RESUME_WRITE_INDEX_UPDATE:
2836 		return "RESUME_WRITE_INDEX_UPDATE";
2837 	case HIF_IRQ_EVENT:
2838 		return "HIF_IRQ_EVENT";
2839 	case HIF_CE_TASKLET_ENTRY:
2840 		return "HIF_CE_TASKLET_ENTRY";
2841 	case HIF_CE_TASKLET_RESCHEDULE:
2842 		return "HIF_CE_TASKLET_RESCHEDULE";
2843 	case HIF_CE_TASKLET_EXIT:
2844 		return "HIF_CE_TASKLET_EXIT";
2845 	case HIF_CE_REAP_ENTRY:
2846 		return "HIF_CE_REAP_ENTRY";
2847 	case HIF_CE_REAP_EXIT:
2848 		return "HIF_CE_REAP_EXIT";
2849 	case NAPI_SCHEDULE:
2850 		return "NAPI_SCHEDULE";
2851 	case NAPI_POLL_ENTER:
2852 		return "NAPI_POLL_ENTER";
2853 	case NAPI_COMPLETE:
2854 		return "NAPI_COMPLETE";
2855 	case NAPI_POLL_EXIT:
2856 		return "NAPI_POLL_EXIT";
2857 	case HIF_RX_NBUF_ALLOC_FAILURE:
2858 		return "HIF_RX_NBUF_ALLOC_FAILURE";
2859 	case HIF_RX_NBUF_MAP_FAILURE:
2860 		return "HIF_RX_NBUF_MAP_FAILURE";
2861 	case HIF_RX_NBUF_ENQUEUE_FAILURE:
2862 		return "HIF_RX_NBUF_ENQUEUE_FAILURE";
2863 	default:
2864 		return "invalid";
2865 	}
2866 }
2867 
2868 /**
2869  * hif_dump_desc_event() - record ce descriptor events
2870  * @buf: Buffer to which to be copied
2871  * @ce_id: which ce is the event occurring on
2872  * @index: index that the descriptor was/will be at.
2873  */
2874 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf)
2875 {
2876 	struct hif_ce_desc_event *event;
2877 	uint64_t secs, usecs;
2878 	ssize_t len = 0;
2879 	struct ce_desc_hist *ce_hist = NULL;
2880 	struct hif_ce_desc_event *hist_ev = NULL;
2881 
2882 	if (!scn)
2883 		return -EINVAL;
2884 
2885 	ce_hist = &scn->hif_ce_desc_hist;
2886 
2887 	hist_ev =
2888 		(struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id];
2889 
2890 	if (!hist_ev) {
2891 		qdf_print("Low Memory\n");
2892 		return -EINVAL;
2893 	}
2894 
2895 	event = &hist_ev[ce_hist->hist_index];
2896 
2897 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
2898 		(ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
2899 		qdf_print("Invalid values\n");
2900 		return -EINVAL;
2901 	}
2902 
2903 	qdf_log_timestamp_to_secs(event->time, &secs, &usecs);
2904 
2905 	len += snprintf(buf, PAGE_SIZE - len,
2906 			"\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%p",
2907 			secs, usecs, ce_hist->hist_id,
2908 			ce_event_type_to_str(event->type),
2909 			event->index, event->memory);
2910 #if HIF_CE_DEBUG_DATA_BUF
2911 	len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%d",
2912 			event->actual_data_len);
2913 #endif
2914 
2915 	len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: ");
2916 
2917 	hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc),
2918 				16, 1, buf + len,
2919 				(ssize_t)PAGE_SIZE - len, false);
2920 	len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc));
2921 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
2922 
2923 #if HIF_CE_DEBUG_DATA_BUF
2924 	if (ce_hist->data_enable[ce_hist->hist_id])
2925 		len = hif_dump_desc_data_buf(buf, len, event->data,
2926 						(event->actual_data_len <
2927 						 CE_DEBUG_MAX_DATA_BUF_SIZE) ?
2928 						event->actual_data_len :
2929 						CE_DEBUG_MAX_DATA_BUF_SIZE);
2930 #endif /*HIF_CE_DEBUG_DATA_BUF*/
2931 
2932 	len += snprintf(buf + len, PAGE_SIZE - len, "END\n");
2933 
2934 	return len;
2935 }
2936 
2937 /*
2938  * hif_store_desc_trace_buf_index() -
2939  * API to get the CE id and CE debug storage buffer index
2940  *
2941  * @dev: network device
2942  * @attr: sysfs attribute
2943  * @buf: data got from the user
2944  *
2945  * Return total length
2946  */
2947 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
2948 					const char *buf, size_t size)
2949 {
2950 	struct ce_desc_hist *ce_hist = NULL;
2951 
2952 	if (!scn)
2953 		return -EINVAL;
2954 
2955 	ce_hist = &scn->hif_ce_desc_hist;
2956 
2957 	if (!size) {
2958 		pr_err("%s: Invalid input buffer.\n", __func__);
2959 		return -EINVAL;
2960 	}
2961 
2962 	if (sscanf(buf, "%d %d", &ce_hist->hist_id,
2963 			&ce_hist->hist_index) != 2) {
2964 		pr_err("%s: Invalid input value.\n", __func__);
2965 		return -EINVAL;
2966 	}
2967 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
2968 	   (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
2969 		qdf_print("Invalid values\n");
2970 		return -EINVAL;
2971 	}
2972 
2973 	return size;
2974 }
2975 
2976 #endif  /*For MCL,  HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
2977 
2978 #if HIF_CE_DEBUG_DATA_BUF
2979 /*
2980  * hif_ce_en_desc_hist() -
2981  * API to enable recording the CE desc history
2982  *
2983  * @dev: network device
2984  * @attr: sysfs attribute
2985  * @buf: buffer to copy the data.
2986  *
2987  * Starts recording the ce desc history
2988  *
2989  * Return total length copied
2990  */
2991 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size)
2992 {
2993 	struct ce_desc_hist *ce_hist = NULL;
2994 	uint32_t cfg = 0;
2995 	uint32_t ce_id = 0;
2996 
2997 	if (!scn)
2998 		return -EINVAL;
2999 
3000 	ce_hist = &scn->hif_ce_desc_hist;
3001 
3002 	if (!size) {
3003 		pr_err("%s: Invalid input buffer.\n", __func__);
3004 		return -EINVAL;
3005 	}
3006 
3007 	if (sscanf(buf, "%d %d", &ce_id, &cfg) != 2) {
3008 		pr_err("%s: Invalid input: Enter CE Id<sp><1/0>.\n", __func__);
3009 		return -EINVAL;
3010 	}
3011 	if (ce_id >= CE_COUNT_MAX) {
3012 		qdf_print("Invalid value CE Id\n");
3013 		return -EINVAL;
3014 	}
3015 
3016 	if ((cfg > 1 || cfg < 0)) {
3017 		qdf_print("Invalid values: enter 0 or 1\n");
3018 		return -EINVAL;
3019 	}
3020 
3021 	if (!ce_hist->hist_ev[ce_id])
3022 		return -EINVAL;
3023 
3024 	qdf_mutex_acquire(&ce_dbg_datamem_lock[ce_id]);
3025 	if (cfg == 1) {
3026 		if (ce_hist->data_enable[ce_id] == 1) {
3027 			qdf_print("\nAlready Enabled\n");
3028 		} else {
3029 			if (alloc_mem_ce_debug_hist_data(scn, ce_id)
3030 							== QDF_STATUS_E_NOMEM){
3031 				ce_hist->data_enable[ce_id] = 0;
3032 				qdf_print("%s:Memory Alloc failed\n");
3033 			} else
3034 				ce_hist->data_enable[ce_id] = 1;
3035 		}
3036 	} else if (cfg == 0) {
3037 		if (ce_hist->data_enable[ce_id] == 0) {
3038 			qdf_print("\nAlready Disabled\n");
3039 		} else {
3040 			ce_hist->data_enable[ce_id] = 0;
3041 				free_mem_ce_debug_hist_data(scn, ce_id);
3042 		}
3043 	}
3044 	qdf_mutex_release(&ce_dbg_datamem_lock[ce_id]);
3045 
3046 	return size;
3047 }
3048 
3049 /*
3050  * hif_disp_ce_enable_desc_data_hist() -
3051  * API to display value of data_enable
3052  *
3053  * @dev: network device
3054  * @attr: sysfs attribute
3055  * @buf: buffer to copy the data.
3056  *
3057  * Return total length copied
3058  */
3059 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf)
3060 {
3061 	ssize_t len = 0;
3062 	uint32_t ce_id = 0;
3063 	struct ce_desc_hist *ce_hist = NULL;
3064 
3065 	if (!scn)
3066 		return -EINVAL;
3067 
3068 	ce_hist = &scn->hif_ce_desc_hist;
3069 
3070 	for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
3071 		len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n",
3072 				ce_id, ce_hist->data_enable[ce_id]);
3073 	}
3074 
3075 	return len;
3076 }
3077 #endif /* HIF_CE_DEBUG_DATA_BUF */
3078