xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_service.c (revision 3149adf58a329e17232a4c0e58d460d025edd55a)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 #include "hif.h"
29 #include "hif_io32.h"
30 #include "ce_api.h"
31 #include "ce_main.h"
32 #include "ce_internal.h"
33 #include "ce_reg.h"
34 #include "qdf_lock.h"
35 #include "regtable.h"
36 #include "hif_main.h"
37 #include "hif_debug.h"
38 #include "hif_napi.h"
39 #include "qdf_module.h"
40 
41 #ifdef IPA_OFFLOAD
42 #ifdef QCA_WIFI_3_0
43 #define CE_IPA_RING_INIT(ce_desc)                       \
44 	do {                                            \
45 		ce_desc->gather = 0;                    \
46 		ce_desc->enable_11h = 0;                \
47 		ce_desc->meta_data_low = 0;             \
48 		ce_desc->packet_result_offset = 64;     \
49 		ce_desc->toeplitz_hash_enable = 0;      \
50 		ce_desc->addr_y_search_disable = 0;     \
51 		ce_desc->addr_x_search_disable = 0;     \
52 		ce_desc->misc_int_disable = 0;          \
53 		ce_desc->target_int_disable = 0;        \
54 		ce_desc->host_int_disable = 0;          \
55 		ce_desc->dest_byte_swap = 0;            \
56 		ce_desc->byte_swap = 0;                 \
57 		ce_desc->type = 2;                      \
58 		ce_desc->tx_classify = 1;               \
59 		ce_desc->buffer_addr_hi = 0;            \
60 		ce_desc->meta_data = 0;                 \
61 		ce_desc->nbytes = 128;                  \
62 	} while (0)
63 #else
64 #define CE_IPA_RING_INIT(ce_desc)                       \
65 	do {                                            \
66 		ce_desc->byte_swap = 0;                 \
67 		ce_desc->nbytes = 60;                   \
68 		ce_desc->gather = 0;                    \
69 	} while (0)
70 #endif /* QCA_WIFI_3_0 */
71 #endif /* IPA_OFFLOAD */
72 
73 #ifndef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
74 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr)				\
75 	do {                                            		\
76 		x = CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, addr); 	\
77 	} while (0);
78 #else
79 #define DATA_CE_UPDATE_SWINDEX(x, scn, addr)
80 #endif
81 
82 static int war1_allow_sleep;
83 /* io32 write workaround */
84 static int hif_ce_war1;
85 
86 /**
87  * hif_ce_war_disable() - disable ce war gobally
88  */
89 void hif_ce_war_disable(void)
90 {
91 	hif_ce_war1 = 0;
92 }
93 
94 /**
95  * hif_ce_war_enable() - enable ce war gobally
96  */
97 void hif_ce_war_enable(void)
98 {
99 	hif_ce_war1 = 1;
100 }
101 
102 /*
103  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
104  * for defined here
105  */
106 #if HIF_CE_DEBUG_DATA_BUF
107 
108 #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1)
109 #define CE_DEBUG_DATA_PER_ROW 16
110 
111 qdf_mutex_t ce_dbg_datamem_lock[CE_COUNT_MAX];
112 static const char *ce_event_type_to_str(enum hif_ce_event_type type);
113 
114 /**
115  * get_next_record_index() - get the next record index
116  * @table_index: atomic index variable to increment
117  * @array_size: array size of the circular buffer
118  *
119  * Increment the atomic index and reserve the value.
120  * Takes care of buffer wrap.
121  * Guaranteed to be thread safe as long as fewer than array_size contexts
122  * try to access the array.  If there are more than array_size contexts
123  * trying to access the array, full locking of the recording process would
124  * be needed to have sane logging.
125  */
126 static int get_next_record_index(qdf_atomic_t *table_index, int array_size)
127 {
128 	int record_index = qdf_atomic_inc_return(table_index);
129 
130 	if (record_index == array_size)
131 		qdf_atomic_sub(array_size, table_index);
132 
133 	while (record_index >= array_size)
134 		record_index -= array_size;
135 	return record_index;
136 }
137 
138 #if HIF_CE_DEBUG_DATA_BUF
139 /**
140  * hif_ce_desc_data_record() - Record data pointed by the CE descriptor
141  * @event: structure detailing a ce event
142  * @len: length of the data
143  * Return:
144  */
145 static void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
146 {
147 	uint8_t *data = NULL;
148 
149 	if (!event->data)
150 		return;
151 
152 	if (event->memory && len > 0)
153 		data = qdf_nbuf_data((qdf_nbuf_t)event->memory);
154 
155 	event->actual_data_len = 0;
156 	qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE);
157 
158 	if (data && len > 0) {
159 		qdf_mem_copy(event->data, data,
160 				((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ?
161 				 len : CE_DEBUG_MAX_DATA_BUF_SIZE));
162 		event->actual_data_len = len;
163 	}
164 }
165 #endif
166 
167 /**
168  * hif_record_ce_desc_event() - record ce descriptor events
169  * @scn: hif_softc
170  * @ce_id: which ce is the event occuring on
171  * @type: what happened
172  * @descriptor: pointer to the descriptor posted/completed
173  * @memory: virtual address of buffer related to the descriptor
174  * @index: index that the descriptor was/will be at.
175  */
176 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
177 				enum hif_ce_event_type type,
178 				union ce_desc *descriptor,
179 				void *memory, int index,
180 				int len)
181 {
182 	int record_index;
183 	struct hif_ce_desc_event *event;
184 
185 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
186 	struct hif_ce_desc_event *hist_ev =
187 			(struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
188 
189 	if (ce_id >= CE_COUNT_MAX)
190 		return;
191 
192 	if (!ce_hist->enable[ce_id])
193 		return;
194 
195 	if (!hist_ev)
196 		return;
197 
198 	record_index = get_next_record_index(
199 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
200 
201 	event = &hist_ev[record_index];
202 
203 	event->type = type;
204 	event->time = qdf_get_log_timestamp();
205 
206 	if (descriptor != NULL) {
207 		qdf_mem_copy(&event->descriptor, descriptor, sizeof(union ce_desc));
208 	} else {
209 		qdf_mem_zero(&event->descriptor, sizeof(union ce_desc));
210 	}
211 
212 	event->memory = memory;
213 	event->index = index;
214 
215 #if HIF_CE_DEBUG_DATA_BUF
216 	if (ce_hist->data_enable[ce_id])
217 		hif_ce_desc_data_record(event, len);
218 #endif
219 }
220 qdf_export_symbol(hif_record_ce_desc_event);
221 
222 /**
223  * ce_init_ce_desc_event_log() - initialize the ce event log
224  * @ce_id: copy engine id for which we are initializing the log
225  * @size: size of array to dedicate
226  *
227  * Currently the passed size is ignored in favor of a precompiled value.
228  */
229 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size)
230 {
231 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
232 	qdf_atomic_init(&ce_hist->history_index[ce_id]);
233 	qdf_mutex_create(&ce_dbg_datamem_lock[ce_id]);
234 }
235 
236 /**
237  * ce_deinit_ce_desc_event_log() - deinitialize the ce event log
238  * @ce_id: copy engine id for which we are deinitializing the log
239  *
240  */
241 inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
242 {
243 	qdf_mutex_destroy(&ce_dbg_datamem_lock[ce_id]);
244 }
245 
246 #else /* Note: For MCL, (HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
247 void hif_record_ce_desc_event(struct hif_softc *scn,
248 		int ce_id, enum hif_ce_event_type type,
249 		union ce_desc *descriptor, void *memory,
250 		int index, int len)
251 {
252 }
253 qdf_export_symbol(hif_record_ce_desc_event);
254 
255 inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id,
256 					int size)
257 {
258 }
259 
260 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
261 {
262 }
263 #endif /* Note: for MCL, HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
264 
265 #ifdef NAPI_YIELD_BUDGET_BASED
266 bool hif_ce_service_should_yield(struct hif_softc *scn,
267 				 struct CE_state *ce_state)
268 {
269 	bool yield =  hif_max_num_receives_reached(scn, ce_state->receive_count);
270 	return yield;
271 }
272 #else
273 /**
274  * hif_ce_service_should_yield() - return true if the service is hogging the cpu
275  * @scn: hif context
276  * @ce_state: context of the copy engine being serviced
277  *
278  * Return: true if the service should yield
279  */
280 bool hif_ce_service_should_yield(struct hif_softc *scn,
281 				 struct CE_state *ce_state)
282 {
283 	bool yield, time_limit_reached, rxpkt_thresh_reached = 0;
284 
285 	time_limit_reached =
286 		sched_clock() > ce_state->ce_service_yield_time ? 1 : 0;
287 
288 	if (!time_limit_reached)
289 		rxpkt_thresh_reached = hif_max_num_receives_reached
290 					(scn, ce_state->receive_count);
291 
292 	yield =  time_limit_reached || rxpkt_thresh_reached;
293 
294 	if (yield && ce_state->htt_rx_data)
295 		hif_napi_update_yield_stats(ce_state,
296 					    time_limit_reached,
297 					    rxpkt_thresh_reached);
298 	return yield;
299 }
300 #endif
301 /*
302  * Support for Copy Engine hardware, which is mainly used for
303  * communication between Host and Target over a PCIe interconnect.
304  */
305 
306 /*
307  * A single CopyEngine (CE) comprises two "rings":
308  *   a source ring
309  *   a destination ring
310  *
311  * Each ring consists of a number of descriptors which specify
312  * an address, length, and meta-data.
313  *
314  * Typically, one side of the PCIe interconnect (Host or Target)
315  * controls one ring and the other side controls the other ring.
316  * The source side chooses when to initiate a transfer and it
317  * chooses what to send (buffer address, length). The destination
318  * side keeps a supply of "anonymous receive buffers" available and
319  * it handles incoming data as it arrives (when the destination
320  * recieves an interrupt).
321  *
322  * The sender may send a simple buffer (address/length) or it may
323  * send a small list of buffers.  When a small list is sent, hardware
324  * "gathers" these and they end up in a single destination buffer
325  * with a single interrupt.
326  *
327  * There are several "contexts" managed by this layer -- more, it
328  * may seem -- than should be needed. These are provided mainly for
329  * maximum flexibility and especially to facilitate a simpler HIF
330  * implementation. There are per-CopyEngine recv, send, and watermark
331  * contexts. These are supplied by the caller when a recv, send,
332  * or watermark handler is established and they are echoed back to
333  * the caller when the respective callbacks are invoked. There is
334  * also a per-transfer context supplied by the caller when a buffer
335  * (or sendlist) is sent and when a buffer is enqueued for recv.
336  * These per-transfer contexts are echoed back to the caller when
337  * the buffer is sent/received.
338  * Target TX harsh result toeplitz_hash_result
339  */
340 
341 /*
342  * Guts of ce_send, used by both ce_send and ce_sendlist_send.
343  * The caller takes responsibility for any needed locking.
344  */
345 
346 static
347 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
348 				   u32 ctrl_addr, unsigned int write_index)
349 {
350 	if (hif_ce_war1) {
351 		void __iomem *indicator_addr;
352 
353 		indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
354 
355 		if (!war1_allow_sleep
356 		    && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
357 			hif_write32_mb(indicator_addr,
358 				      (CDC_WAR_MAGIC_STR | write_index));
359 		} else {
360 			unsigned long irq_flags;
361 
362 			local_irq_save(irq_flags);
363 			hif_write32_mb(indicator_addr, 1);
364 
365 			/*
366 			 * PCIE write waits for ACK in IPQ8K, there is no
367 			 * need to read back value.
368 			 */
369 			(void)hif_read32_mb(indicator_addr);
370 			(void)hif_read32_mb(indicator_addr); /* conservative */
371 
372 			CE_SRC_RING_WRITE_IDX_SET(scn,
373 						  ctrl_addr, write_index);
374 
375 			hif_write32_mb(indicator_addr, 0);
376 			local_irq_restore(irq_flags);
377 		}
378 	} else {
379 		CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
380 	}
381 }
382 
383 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
384 /**
385  * ce_validate_nbytes() - validate nbytes for slub builds on tx descriptors
386  * @nbytes: nbytes value being written into a send descriptor
387  * @ce_state: context of the copy engine
388 
389  * nbytes should be non-zero and less than max configured for the copy engine
390  *
391  * Return: none
392  */
393 static void ce_validate_nbytes(uint32_t nbytes, struct CE_state *ce_state)
394 {
395 	if (nbytes <= 0 || nbytes > ce_state->src_sz_max)
396 		QDF_BUG(0);
397 }
398 #else
399 static void ce_validate_nbytes(uint32_t nbytes, struct CE_state *ce_state)
400 {
401 }
402 #endif
403 
404 static int
405 ce_send_nolock_legacy(struct CE_handle *copyeng,
406 			   void *per_transfer_context,
407 			   qdf_dma_addr_t buffer,
408 			   uint32_t nbytes,
409 			   uint32_t transfer_id,
410 			   uint32_t flags,
411 			   uint32_t user_flags)
412 {
413 	int status;
414 	struct CE_state *CE_state = (struct CE_state *)copyeng;
415 	struct CE_ring_state *src_ring = CE_state->src_ring;
416 	uint32_t ctrl_addr = CE_state->ctrl_addr;
417 	unsigned int nentries_mask = src_ring->nentries_mask;
418 	unsigned int sw_index = src_ring->sw_index;
419 	unsigned int write_index = src_ring->write_index;
420 	uint64_t dma_addr = buffer;
421 	struct hif_softc *scn = CE_state->scn;
422 
423 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
424 		return QDF_STATUS_E_FAILURE;
425 	if (unlikely(CE_RING_DELTA(nentries_mask,
426 				write_index, sw_index - 1) <= 0)) {
427 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
428 		Q_TARGET_ACCESS_END(scn);
429 		return QDF_STATUS_E_FAILURE;
430 	}
431 	{
432 		enum hif_ce_event_type event_type;
433 		struct CE_src_desc *src_ring_base =
434 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
435 		struct CE_src_desc *shadow_base =
436 			(struct CE_src_desc *)src_ring->shadow_base;
437 		struct CE_src_desc *src_desc =
438 			CE_SRC_RING_TO_DESC(src_ring_base, write_index);
439 		struct CE_src_desc *shadow_src_desc =
440 			CE_SRC_RING_TO_DESC(shadow_base, write_index);
441 
442 		/* Update low 32 bits source descriptor address */
443 		shadow_src_desc->buffer_addr =
444 			(uint32_t)(dma_addr & 0xFFFFFFFF);
445 #ifdef QCA_WIFI_3_0
446 		shadow_src_desc->buffer_addr_hi =
447 			(uint32_t)((dma_addr >> 32) & 0x1F);
448 		user_flags |= shadow_src_desc->buffer_addr_hi;
449 		memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
450 			   sizeof(uint32_t));
451 #endif
452 		shadow_src_desc->target_int_disable = 0;
453 		shadow_src_desc->host_int_disable = 0;
454 
455 		shadow_src_desc->meta_data = transfer_id;
456 
457 		/*
458 		 * Set the swap bit if:
459 		 * typical sends on this CE are swapped (host is big-endian)
460 		 * and this send doesn't disable the swapping
461 		 * (data is not bytestream)
462 		 */
463 		shadow_src_desc->byte_swap =
464 			(((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
465 			 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
466 		shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
467 		shadow_src_desc->nbytes = nbytes;
468 		ce_validate_nbytes(nbytes, CE_state);
469 
470 		*src_desc = *shadow_src_desc;
471 
472 		src_ring->per_transfer_context[write_index] =
473 			per_transfer_context;
474 
475 		/* Update Source Ring Write Index */
476 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
477 
478 		/* WORKAROUND */
479 		if (shadow_src_desc->gather) {
480 			event_type = HIF_TX_GATHER_DESC_POST;
481 		} else if (qdf_unlikely(CE_state->state != CE_RUNNING)) {
482 			event_type = HIF_TX_DESC_SOFTWARE_POST;
483 			CE_state->state = CE_PENDING;
484 		} else {
485 			event_type = HIF_TX_DESC_POST;
486 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
487 						      write_index);
488 		}
489 
490 		/* src_ring->write index hasn't been updated event though
491 		 * the register has allready been written to.
492 		 */
493 		hif_record_ce_desc_event(scn, CE_state->id, event_type,
494 			(union ce_desc *) shadow_src_desc, per_transfer_context,
495 			src_ring->write_index, nbytes);
496 
497 		src_ring->write_index = write_index;
498 		status = QDF_STATUS_SUCCESS;
499 	}
500 	Q_TARGET_ACCESS_END(scn);
501 	return status;
502 }
503 
504 int
505 ce_send(struct CE_handle *copyeng,
506 		void *per_transfer_context,
507 		qdf_dma_addr_t buffer,
508 		uint32_t nbytes,
509 		uint32_t transfer_id,
510 		uint32_t flags,
511 		uint32_t user_flag)
512 {
513 	struct CE_state *CE_state = (struct CE_state *)copyeng;
514 	int status;
515 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
516 
517 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
518 	status = hif_state->ce_services->ce_send_nolock(copyeng,
519 			per_transfer_context, buffer, nbytes,
520 			transfer_id, flags, user_flag);
521 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
522 
523 	return status;
524 }
525 
526 unsigned int ce_sendlist_sizeof(void)
527 {
528 	return sizeof(struct ce_sendlist);
529 }
530 
531 void ce_sendlist_init(struct ce_sendlist *sendlist)
532 {
533 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
534 
535 	sl->num_items = 0;
536 }
537 
538 int
539 ce_sendlist_buf_add(struct ce_sendlist *sendlist,
540 					qdf_dma_addr_t buffer,
541 					uint32_t nbytes,
542 					uint32_t flags,
543 					uint32_t user_flags)
544 {
545 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
546 	unsigned int num_items = sl->num_items;
547 	struct ce_sendlist_item *item;
548 
549 	if (num_items >= CE_SENDLIST_ITEMS_MAX) {
550 		QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
551 		return QDF_STATUS_E_RESOURCES;
552 	}
553 
554 	item = &sl->item[num_items];
555 	item->send_type = CE_SIMPLE_BUFFER_TYPE;
556 	item->data = buffer;
557 	item->u.nbytes = nbytes;
558 	item->flags = flags;
559 	item->user_flags = user_flags;
560 	sl->num_items = num_items + 1;
561 	return QDF_STATUS_SUCCESS;
562 }
563 
564 int
565 ce_sendlist_send(struct CE_handle *copyeng,
566 		 void *per_transfer_context,
567 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
568 {
569 	struct CE_state *CE_state = (struct CE_state *)copyeng;
570 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
571 
572 	return hif_state->ce_services->ce_sendlist_send(copyeng,
573 			per_transfer_context, sendlist, transfer_id);
574 }
575 
576 static int
577 ce_sendlist_send_legacy(struct CE_handle *copyeng,
578 		 void *per_transfer_context,
579 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
580 {
581 	int status = -ENOMEM;
582 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
583 	struct CE_state *CE_state = (struct CE_state *)copyeng;
584 	struct CE_ring_state *src_ring = CE_state->src_ring;
585 	unsigned int nentries_mask = src_ring->nentries_mask;
586 	unsigned int num_items = sl->num_items;
587 	unsigned int sw_index;
588 	unsigned int write_index;
589 	struct hif_softc *scn = CE_state->scn;
590 
591 	QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
592 
593 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
594 
595 	if (CE_state->scn->fastpath_mode_on && CE_state->htt_tx_data &&
596 	    Q_TARGET_ACCESS_BEGIN(scn) == 0) {
597 		src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR(
598 					       scn, CE_state->ctrl_addr);
599 		Q_TARGET_ACCESS_END(scn);
600 	}
601 
602 	sw_index = src_ring->sw_index;
603 	write_index = src_ring->write_index;
604 
605 	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
606 	    num_items) {
607 		struct ce_sendlist_item *item;
608 		int i;
609 
610 		/* handle all but the last item uniformly */
611 		for (i = 0; i < num_items - 1; i++) {
612 			item = &sl->item[i];
613 			/* TBDXXX: Support extensible sendlist_types? */
614 			QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
615 			status = ce_send_nolock_legacy(copyeng,
616 				CE_SENDLIST_ITEM_CTXT,
617 				(qdf_dma_addr_t) item->data,
618 				item->u.nbytes, transfer_id,
619 				item->flags | CE_SEND_FLAG_GATHER,
620 				item->user_flags);
621 			QDF_ASSERT(status == QDF_STATUS_SUCCESS);
622 		}
623 		/* provide valid context pointer for final item */
624 		item = &sl->item[i];
625 		/* TBDXXX: Support extensible sendlist_types? */
626 		QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
627 		status = ce_send_nolock_legacy(copyeng, per_transfer_context,
628 					(qdf_dma_addr_t) item->data,
629 					item->u.nbytes,
630 					transfer_id, item->flags,
631 					item->user_flags);
632 		QDF_ASSERT(status == QDF_STATUS_SUCCESS);
633 		QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
634 					QDF_NBUF_TX_PKT_CE);
635 		DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
636 			QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
637 			QDF_TRACE_DEFAULT_PDEV_ID,
638 			(uint8_t *)&(((qdf_nbuf_t)per_transfer_context)->data),
639 			sizeof(((qdf_nbuf_t)per_transfer_context)->data),
640 			QDF_TX));
641 	} else {
642 		/*
643 		 * Probably not worth the additional complexity to support
644 		 * partial sends with continuation or notification.  We expect
645 		 * to use large rings and small sendlists. If we can't handle
646 		 * the entire request at once, punt it back to the caller.
647 		 */
648 	}
649 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
650 
651 	return status;
652 }
653 
654 #ifdef WLAN_FEATURE_FASTPATH
655 #ifdef QCA_WIFI_3_0
656 static inline void
657 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
658 		      uint64_t dma_addr,
659 		      uint32_t user_flags)
660 {
661 	shadow_src_desc->buffer_addr_hi =
662 			(uint32_t)((dma_addr >> 32) & 0x1F);
663 	user_flags |= shadow_src_desc->buffer_addr_hi;
664 	memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
665 			sizeof(uint32_t));
666 }
667 #else
668 static inline void
669 ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
670 		      uint64_t dma_addr,
671 		      uint32_t user_flags)
672 {
673 }
674 #endif
675 
676 #define SLOTS_PER_DATAPATH_TX 2
677 
678 /**
679  * ce_send_fast() CE layer Tx buffer posting function
680  * @copyeng: copy engine handle
681  * @msdu: msdu to be sent
682  * @transfer_id: transfer_id
683  * @download_len: packet download length
684  *
685  * Assumption : Called with an array of MSDU's
686  * Function:
687  * For each msdu in the array
688  * 1. Check no. of available entries
689  * 2. Create src ring entries (allocated in consistent memory
690  * 3. Write index to h/w
691  *
692  * Return: No. of packets that could be sent
693  */
694 int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
695 		 unsigned int transfer_id, uint32_t download_len)
696 {
697 	struct CE_state *ce_state = (struct CE_state *)copyeng;
698 	struct hif_softc *scn = ce_state->scn;
699 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
700 	struct CE_ring_state *src_ring = ce_state->src_ring;
701 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
702 	unsigned int nentries_mask = src_ring->nentries_mask;
703 	unsigned int write_index;
704 	unsigned int sw_index;
705 	unsigned int frag_len;
706 	uint64_t dma_addr;
707 	uint32_t user_flags;
708 	enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE;
709 	bool ok_to_send = true;
710 
711 	qdf_spin_lock_bh(&ce_state->ce_index_lock);
712 
713 	/*
714 	 * Request runtime PM resume if it has already suspended and make
715 	 * sure there is no PCIe link access.
716 	 */
717 	if (hif_pm_runtime_get(hif_hdl) != 0)
718 		ok_to_send = false;
719 
720 	if (ok_to_send) {
721 		Q_TARGET_ACCESS_BEGIN(scn);
722 		DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
723 	}
724 
725 	write_index = src_ring->write_index;
726 	sw_index = src_ring->sw_index;
727 	hif_record_ce_desc_event(scn, ce_state->id,
728 				FAST_TX_SOFTWARE_INDEX_UPDATE,
729 				NULL, NULL, sw_index, 0);
730 
731 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
732 			 < SLOTS_PER_DATAPATH_TX)) {
733 		HIF_ERROR("Source ring full, required %d, available %d",
734 		      SLOTS_PER_DATAPATH_TX,
735 		      CE_RING_DELTA(nentries_mask, write_index, sw_index - 1));
736 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
737 		if (ok_to_send)
738 			Q_TARGET_ACCESS_END(scn);
739 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
740 		return 0;
741 	}
742 
743 	{
744 		struct CE_src_desc *src_ring_base =
745 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
746 		struct CE_src_desc *shadow_base =
747 			(struct CE_src_desc *)src_ring->shadow_base;
748 		struct CE_src_desc *src_desc =
749 			CE_SRC_RING_TO_DESC(src_ring_base, write_index);
750 		struct CE_src_desc *shadow_src_desc =
751 			CE_SRC_RING_TO_DESC(shadow_base, write_index);
752 
753 		hif_pm_runtime_get_noresume(hif_hdl);
754 
755 		/*
756 		 * First fill out the ring descriptor for the HTC HTT frame
757 		 * header. These are uncached writes. Should we use a local
758 		 * structure instead?
759 		 */
760 		/* HTT/HTC header can be passed as a argument */
761 		dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
762 		shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
763 							  0xFFFFFFFF);
764 		user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
765 		ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
766 			shadow_src_desc->meta_data = transfer_id;
767 		shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
768 		ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
769 		download_len -= shadow_src_desc->nbytes;
770 		/*
771 		 * HTC HTT header is a word stream, so byte swap if CE byte
772 		 * swap enabled
773 		 */
774 		shadow_src_desc->byte_swap = ((ce_state->attr_flags &
775 					CE_ATTR_BYTE_SWAP_DATA) != 0);
776 		/* For the first one, it still does not need to write */
777 		shadow_src_desc->gather = 1;
778 		*src_desc = *shadow_src_desc;
779 		/* By default we could initialize the transfer context to this
780 		 * value
781 		 */
782 		src_ring->per_transfer_context[write_index] =
783 			CE_SENDLIST_ITEM_CTXT;
784 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
785 
786 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
787 		shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
788 		/*
789 		 * Now fill out the ring descriptor for the actual data
790 		 * packet
791 		 */
792 		dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
793 		shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
794 							  0xFFFFFFFF);
795 		/*
796 		 * Clear packet offset for all but the first CE desc.
797 		 */
798 		user_flags &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
799 		ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
800 		shadow_src_desc->meta_data = transfer_id;
801 
802 		/* get actual packet length */
803 		frag_len = qdf_nbuf_get_frag_len(msdu, 1);
804 
805 		/* download remaining bytes of payload */
806 		shadow_src_desc->nbytes =  download_len;
807 		ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
808 		if (shadow_src_desc->nbytes > frag_len)
809 			shadow_src_desc->nbytes = frag_len;
810 
811 		/*  Data packet is a byte stream, so disable byte swap */
812 		shadow_src_desc->byte_swap = 0;
813 		/* For the last one, gather is not set */
814 		shadow_src_desc->gather    = 0;
815 		*src_desc = *shadow_src_desc;
816 		src_ring->per_transfer_context[write_index] = msdu;
817 
818 		hif_record_ce_desc_event(scn, ce_state->id, type,
819 					(union ce_desc *)src_desc,
820 				src_ring->per_transfer_context[write_index],
821 					write_index, shadow_src_desc->nbytes);
822 
823 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
824 
825 		DPTRACE(qdf_dp_trace(msdu,
826 			QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
827 			QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(msdu),
828 			sizeof(qdf_nbuf_data(msdu)), QDF_TX));
829 	}
830 
831 	src_ring->write_index = write_index;
832 
833 	if (ok_to_send) {
834 		if (qdf_likely(ce_state->state == CE_RUNNING)) {
835 			type = FAST_TX_WRITE_INDEX_UPDATE;
836 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
837 				write_index);
838 			Q_TARGET_ACCESS_END(scn);
839 		} else
840 			ce_state->state = CE_PENDING;
841 		hif_pm_runtime_put(hif_hdl);
842 	}
843 
844 
845 	qdf_spin_unlock_bh(&ce_state->ce_index_lock);
846 
847 	/* sent 1 packet */
848 	return 1;
849 }
850 
851 /**
852  * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
853  * @scn: Handle to HIF context
854  *
855  * Return: true if fastpath is enabled else false.
856  */
857 static bool ce_is_fastpath_enabled(struct hif_softc *scn)
858 {
859 	return scn->fastpath_mode_on;
860 }
861 
862 /**
863  * ce_is_fastpath_handler_registered() - return true for datapath CEs and if
864  * fastpath is enabled.
865  * @ce_state: handle to copy engine
866  *
867  * Return: true if fastpath handler is registered for datapath CE.
868  */
869 static bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
870 {
871 	if (ce_state->fastpath_handler)
872 		return true;
873 	else
874 		return false;
875 }
876 
877 
878 #else
879 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
880 {
881 	return false;
882 }
883 
884 static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
885 {
886 	return false;
887 }
888 #endif /* WLAN_FEATURE_FASTPATH */
889 
890 #ifndef AH_NEED_TX_DATA_SWAP
891 #define AH_NEED_TX_DATA_SWAP 0
892 #endif
893 
894 /**
895  * ce_batch_send() - sends bunch of msdus at once
896  * @ce_tx_hdl : pointer to CE handle
897  * @msdu : list of msdus to be sent
898  * @transfer_id : transfer id
899  * @len : Downloaded length
900  * @sendhead : sendhead
901  *
902  * Assumption : Called with an array of MSDU's
903  * Function:
904  * For each msdu in the array
905  * 1. Send each msdu
906  * 2. Increment write index accordinlgy.
907  *
908  * Return: list of msds not sent
909  */
910 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,  qdf_nbuf_t msdu,
911 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
912 {
913 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
914 	struct hif_softc *scn = ce_state->scn;
915 	struct CE_ring_state *src_ring = ce_state->src_ring;
916 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
917 	/*  A_target_id_t targid = TARGID(scn);*/
918 
919 	uint32_t nentries_mask = src_ring->nentries_mask;
920 	uint32_t sw_index, write_index;
921 
922 	struct CE_src_desc *src_desc_base =
923 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
924 	uint32_t *src_desc;
925 
926 	struct CE_src_desc lsrc_desc = {0};
927 	int deltacount = 0;
928 	qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext;
929 
930 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
931 	sw_index = src_ring->sw_index;
932 	write_index = src_ring->write_index;
933 
934 	deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1);
935 
936 	while (msdu) {
937 		tempnext = qdf_nbuf_next(msdu);
938 
939 		if (deltacount < 2) {
940 			if (sendhead)
941 				return msdu;
942 			HIF_ERROR("%s: Out of descriptors", __func__);
943 			src_ring->write_index = write_index;
944 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
945 					write_index);
946 
947 			sw_index = src_ring->sw_index;
948 			write_index = src_ring->write_index;
949 
950 			deltacount = CE_RING_DELTA(nentries_mask, write_index,
951 					sw_index-1);
952 			if (freelist == NULL) {
953 				freelist = msdu;
954 				hfreelist = msdu;
955 			} else {
956 				qdf_nbuf_set_next(freelist, msdu);
957 				freelist = msdu;
958 			}
959 			qdf_nbuf_set_next(msdu, NULL);
960 			msdu = tempnext;
961 			continue;
962 		}
963 
964 		src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base,
965 				write_index);
966 
967 		src_desc[0]   = qdf_nbuf_get_frag_paddr(msdu, 0);
968 
969 		lsrc_desc.meta_data = transfer_id;
970 		if (len  > msdu->len)
971 			len =  msdu->len;
972 		lsrc_desc.nbytes = len;
973 		/*  Data packet is a byte stream, so disable byte swap */
974 		lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
975 		lsrc_desc.gather    = 0; /*For the last one, gather is not set*/
976 
977 		src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
978 
979 
980 		src_ring->per_transfer_context[write_index] = msdu;
981 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
982 
983 		if (sendhead)
984 			break;
985 		qdf_nbuf_set_next(msdu, NULL);
986 		msdu = tempnext;
987 
988 	}
989 
990 
991 	src_ring->write_index = write_index;
992 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
993 
994 	return hfreelist;
995 }
996 
997 /**
998  * ce_update_tx_ring() - Advance sw index.
999  * @ce_tx_hdl : pointer to CE handle
1000  * @num_htt_cmpls : htt completions received.
1001  *
1002  * Function:
1003  * Increment the value of sw index of src ring
1004  * according to number of htt completions
1005  * received.
1006  *
1007  * Return: void
1008  */
1009 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
1010 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
1011 {
1012 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
1013 	struct CE_ring_state *src_ring = ce_state->src_ring;
1014 	uint32_t nentries_mask = src_ring->nentries_mask;
1015 	/*
1016 	 * Advance the s/w index:
1017 	 * This effectively simulates completing the CE ring descriptors
1018 	 */
1019 	src_ring->sw_index =
1020 		CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
1021 				num_htt_cmpls);
1022 }
1023 #else
1024 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
1025 {}
1026 #endif
1027 
1028 /**
1029  * ce_send_single() - sends
1030  * @ce_tx_hdl : pointer to CE handle
1031  * @msdu : msdu to be sent
1032  * @transfer_id : transfer id
1033  * @len : Downloaded length
1034  *
1035  * Function:
1036  * 1. Send one msdu
1037  * 2. Increment write index of src ring accordinlgy.
1038  *
1039  * Return: int: CE sent status
1040  */
1041 int ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
1042 		uint32_t transfer_id, u_int32_t len)
1043 {
1044 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
1045 	struct hif_softc *scn = ce_state->scn;
1046 	struct CE_ring_state *src_ring = ce_state->src_ring;
1047 	uint32_t ctrl_addr = ce_state->ctrl_addr;
1048 	/*A_target_id_t targid = TARGID(scn);*/
1049 
1050 	uint32_t nentries_mask = src_ring->nentries_mask;
1051 	uint32_t sw_index, write_index;
1052 
1053 	struct CE_src_desc *src_desc_base =
1054 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
1055 	uint32_t *src_desc;
1056 
1057 	struct CE_src_desc lsrc_desc = {0};
1058 	enum hif_ce_event_type event_type;
1059 
1060 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
1061 	sw_index = src_ring->sw_index;
1062 	write_index = src_ring->write_index;
1063 
1064 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index,
1065 					sw_index-1) < 1)) {
1066 		/* ol_tx_stats_inc_ring_error(sc->scn->pdev_txrx_handle, 1); */
1067 		HIF_ERROR("%s: ce send fail %d %d %d", __func__, nentries_mask,
1068 			  write_index, sw_index);
1069 		return 1;
1070 	}
1071 
1072 	src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index);
1073 
1074 	src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
1075 
1076 	lsrc_desc.meta_data = transfer_id;
1077 	lsrc_desc.nbytes = len;
1078 	/*  Data packet is a byte stream, so disable byte swap */
1079 	lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
1080 	lsrc_desc.gather    = 0; /* For the last one, gather is not set */
1081 
1082 	src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
1083 
1084 
1085 	src_ring->per_transfer_context[write_index] = msdu;
1086 
1087 	if (((struct CE_src_desc *)src_desc)->gather)
1088 		event_type = HIF_TX_GATHER_DESC_POST;
1089 	else if (qdf_unlikely(ce_state->state != CE_RUNNING))
1090 		event_type = HIF_TX_DESC_SOFTWARE_POST;
1091 	else
1092 		event_type = HIF_TX_DESC_POST;
1093 
1094 	hif_record_ce_desc_event(scn, ce_state->id, event_type,
1095 				(union ce_desc *)src_desc, msdu,
1096 				write_index, len);
1097 
1098 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
1099 
1100 	src_ring->write_index = write_index;
1101 
1102 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
1103 
1104 	return QDF_STATUS_SUCCESS;
1105 }
1106 
1107 /**
1108  * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
1109  * @coyeng: copy engine handle
1110  * @per_recv_context: virtual address of the nbuf
1111  * @buffer: physical address of the nbuf
1112  *
1113  * Return: 0 if the buffer is enqueued
1114  */
1115 int
1116 ce_recv_buf_enqueue(struct CE_handle *copyeng,
1117 		    void *per_recv_context, qdf_dma_addr_t buffer)
1118 {
1119 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1120 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
1121 
1122 	return hif_state->ce_services->ce_recv_buf_enqueue(copyeng,
1123 			per_recv_context, buffer);
1124 }
1125 
1126 /**
1127  * ce_recv_buf_enqueue_legacy() - enqueue a recv buffer into a copy engine
1128  * @coyeng: copy engine handle
1129  * @per_recv_context: virtual address of the nbuf
1130  * @buffer: physical address of the nbuf
1131  *
1132  * Return: 0 if the buffer is enqueued
1133  */
1134 static int
1135 ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng,
1136 		    void *per_recv_context, qdf_dma_addr_t buffer)
1137 {
1138 	int status;
1139 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1140 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1141 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1142 	unsigned int nentries_mask = dest_ring->nentries_mask;
1143 	unsigned int write_index;
1144 	unsigned int sw_index;
1145 	uint64_t dma_addr = buffer;
1146 	struct hif_softc *scn = CE_state->scn;
1147 
1148 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1149 	write_index = dest_ring->write_index;
1150 	sw_index = dest_ring->sw_index;
1151 
1152 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1153 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1154 		return -EIO;
1155 	}
1156 
1157 	if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
1158 	    (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) {
1159 		struct CE_dest_desc *dest_ring_base =
1160 			(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1161 		struct CE_dest_desc *dest_desc =
1162 			CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
1163 
1164 		/* Update low 32 bit destination descriptor */
1165 		dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
1166 #ifdef QCA_WIFI_3_0
1167 		dest_desc->buffer_addr_hi =
1168 			(uint32_t)((dma_addr >> 32) & 0x1F);
1169 #endif
1170 		dest_desc->nbytes = 0;
1171 
1172 		dest_ring->per_transfer_context[write_index] =
1173 			per_recv_context;
1174 
1175 		hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_POST,
1176 				(union ce_desc *) dest_desc, per_recv_context,
1177 				write_index, 0);
1178 
1179 		/* Update Destination Ring Write Index */
1180 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
1181 		if (write_index != sw_index) {
1182 			CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
1183 			dest_ring->write_index = write_index;
1184 		}
1185 		status = QDF_STATUS_SUCCESS;
1186 	} else
1187 		status = QDF_STATUS_E_FAILURE;
1188 
1189 	Q_TARGET_ACCESS_END(scn);
1190 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1191 	return status;
1192 }
1193 
1194 void
1195 ce_send_watermarks_set(struct CE_handle *copyeng,
1196 		       unsigned int low_alert_nentries,
1197 		       unsigned int high_alert_nentries)
1198 {
1199 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1200 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1201 	struct hif_softc *scn = CE_state->scn;
1202 
1203 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
1204 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
1205 }
1206 
1207 void
1208 ce_recv_watermarks_set(struct CE_handle *copyeng,
1209 		       unsigned int low_alert_nentries,
1210 		       unsigned int high_alert_nentries)
1211 {
1212 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1213 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1214 	struct hif_softc *scn = CE_state->scn;
1215 
1216 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
1217 				low_alert_nentries);
1218 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
1219 				high_alert_nentries);
1220 }
1221 
1222 unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
1223 {
1224 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1225 	struct CE_ring_state *src_ring = CE_state->src_ring;
1226 	unsigned int nentries_mask = src_ring->nentries_mask;
1227 	unsigned int sw_index;
1228 	unsigned int write_index;
1229 
1230 	qdf_spin_lock(&CE_state->ce_index_lock);
1231 	sw_index = src_ring->sw_index;
1232 	write_index = src_ring->write_index;
1233 	qdf_spin_unlock(&CE_state->ce_index_lock);
1234 
1235 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
1236 }
1237 
1238 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
1239 {
1240 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1241 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1242 	unsigned int nentries_mask = dest_ring->nentries_mask;
1243 	unsigned int sw_index;
1244 	unsigned int write_index;
1245 
1246 	qdf_spin_lock(&CE_state->ce_index_lock);
1247 	sw_index = dest_ring->sw_index;
1248 	write_index = dest_ring->write_index;
1249 	qdf_spin_unlock(&CE_state->ce_index_lock);
1250 
1251 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
1252 }
1253 
1254 /*
1255  * Guts of ce_send_entries_done.
1256  * The caller takes responsibility for any necessary locking.
1257  */
1258 static unsigned int
1259 ce_send_entries_done_nolock_legacy(struct hif_softc *scn,
1260 			    struct CE_state *CE_state)
1261 {
1262 	struct CE_ring_state *src_ring = CE_state->src_ring;
1263 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1264 	unsigned int nentries_mask = src_ring->nentries_mask;
1265 	unsigned int sw_index;
1266 	unsigned int read_index;
1267 
1268 	sw_index = src_ring->sw_index;
1269 	read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
1270 
1271 	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
1272 }
1273 
1274 unsigned int ce_send_entries_done(struct CE_handle *copyeng)
1275 {
1276 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1277 	unsigned int nentries;
1278 	struct hif_softc *scn = CE_state->scn;
1279 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1280 
1281 	qdf_spin_lock(&CE_state->ce_index_lock);
1282 	nentries = hif_state->ce_services->ce_send_entries_done_nolock(
1283 						CE_state->scn, CE_state);
1284 	qdf_spin_unlock(&CE_state->ce_index_lock);
1285 
1286 	return nentries;
1287 }
1288 
1289 /*
1290  * Guts of ce_recv_entries_done.
1291  * The caller takes responsibility for any necessary locking.
1292  */
1293 static unsigned int
1294 ce_recv_entries_done_nolock_legacy(struct hif_softc *scn,
1295 			    struct CE_state *CE_state)
1296 {
1297 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1298 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1299 	unsigned int nentries_mask = dest_ring->nentries_mask;
1300 	unsigned int sw_index;
1301 	unsigned int read_index;
1302 
1303 	sw_index = dest_ring->sw_index;
1304 	read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
1305 
1306 	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
1307 }
1308 
1309 unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
1310 {
1311 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1312 	unsigned int nentries;
1313 	struct hif_softc *scn = CE_state->scn;
1314 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1315 
1316 	qdf_spin_lock(&CE_state->ce_index_lock);
1317 	nentries = hif_state->ce_services->ce_recv_entries_done_nolock(
1318 						CE_state->scn, CE_state);
1319 	qdf_spin_unlock(&CE_state->ce_index_lock);
1320 
1321 	return nentries;
1322 }
1323 
1324 /*
1325  * Guts of ce_completed_recv_next.
1326  * The caller takes responsibility for any necessary locking.
1327  */
1328 static int
1329 ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state,
1330 			      void **per_CE_contextp,
1331 			      void **per_transfer_contextp,
1332 			      qdf_dma_addr_t *bufferp,
1333 			      unsigned int *nbytesp,
1334 			      unsigned int *transfer_idp,
1335 			      unsigned int *flagsp)
1336 {
1337 	int status;
1338 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
1339 	unsigned int nentries_mask = dest_ring->nentries_mask;
1340 	unsigned int sw_index = dest_ring->sw_index;
1341 	struct hif_softc *scn = CE_state->scn;
1342 	struct CE_dest_desc *dest_ring_base =
1343 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1344 	struct CE_dest_desc *dest_desc =
1345 		CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
1346 	int nbytes;
1347 	struct CE_dest_desc dest_desc_info;
1348 	/*
1349 	 * By copying the dest_desc_info element to local memory, we could
1350 	 * avoid extra memory read from non-cachable memory.
1351 	 */
1352 	dest_desc_info =  *dest_desc;
1353 	nbytes = dest_desc_info.nbytes;
1354 	if (nbytes == 0) {
1355 		/*
1356 		 * This closes a relatively unusual race where the Host
1357 		 * sees the updated DRRI before the update to the
1358 		 * corresponding descriptor has completed. We treat this
1359 		 * as a descriptor that is not yet done.
1360 		 */
1361 		status = QDF_STATUS_E_FAILURE;
1362 		goto done;
1363 	}
1364 
1365 	hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
1366 			(union ce_desc *) dest_desc,
1367 			dest_ring->per_transfer_context[sw_index],
1368 			sw_index, 0);
1369 
1370 	dest_desc->nbytes = 0;
1371 
1372 	/* Return data from completed destination descriptor */
1373 	*bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
1374 	*nbytesp = nbytes;
1375 	*transfer_idp = dest_desc_info.meta_data;
1376 	*flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
1377 
1378 	if (per_CE_contextp)
1379 		*per_CE_contextp = CE_state->recv_context;
1380 
1381 	if (per_transfer_contextp) {
1382 		*per_transfer_contextp =
1383 			dest_ring->per_transfer_context[sw_index];
1384 	}
1385 	dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
1386 
1387 	/* Update sw_index */
1388 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1389 	dest_ring->sw_index = sw_index;
1390 	status = QDF_STATUS_SUCCESS;
1391 
1392 done:
1393 	return status;
1394 }
1395 
1396 int
1397 ce_completed_recv_next(struct CE_handle *copyeng,
1398 		       void **per_CE_contextp,
1399 		       void **per_transfer_contextp,
1400 		       qdf_dma_addr_t *bufferp,
1401 		       unsigned int *nbytesp,
1402 		       unsigned int *transfer_idp, unsigned int *flagsp)
1403 {
1404 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1405 	int status;
1406 	struct hif_softc *scn = CE_state->scn;
1407 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1408 	struct ce_ops *ce_services;
1409 
1410 	ce_services = hif_state->ce_services;
1411 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1412 	status =
1413 		ce_services->ce_completed_recv_next_nolock(CE_state,
1414 				per_CE_contextp, per_transfer_contextp, bufferp,
1415 					      nbytesp, transfer_idp, flagsp);
1416 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1417 
1418 	return status;
1419 }
1420 
1421 QDF_STATUS
1422 ce_revoke_recv_next(struct CE_handle *copyeng,
1423 		    void **per_CE_contextp,
1424 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
1425 {
1426 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1427 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
1428 
1429 	return hif_state->ce_services->ce_revoke_recv_next(copyeng,
1430 			per_CE_contextp, per_transfer_contextp, bufferp);
1431 }
1432 /* NB: Modeled after ce_completed_recv_next_nolock */
1433 static QDF_STATUS
1434 ce_revoke_recv_next_legacy(struct CE_handle *copyeng,
1435 		    void **per_CE_contextp,
1436 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
1437 {
1438 	struct CE_state *CE_state;
1439 	struct CE_ring_state *dest_ring;
1440 	unsigned int nentries_mask;
1441 	unsigned int sw_index;
1442 	unsigned int write_index;
1443 	QDF_STATUS status;
1444 	struct hif_softc *scn;
1445 
1446 	CE_state = (struct CE_state *)copyeng;
1447 	dest_ring = CE_state->dest_ring;
1448 	if (!dest_ring)
1449 		return QDF_STATUS_E_FAILURE;
1450 
1451 	scn = CE_state->scn;
1452 	qdf_spin_lock(&CE_state->ce_index_lock);
1453 	nentries_mask = dest_ring->nentries_mask;
1454 	sw_index = dest_ring->sw_index;
1455 	write_index = dest_ring->write_index;
1456 	if (write_index != sw_index) {
1457 		struct CE_dest_desc *dest_ring_base =
1458 			(struct CE_dest_desc *)dest_ring->
1459 			    base_addr_owner_space;
1460 		struct CE_dest_desc *dest_desc =
1461 			CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
1462 
1463 		/* Return data from completed destination descriptor */
1464 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
1465 
1466 		if (per_CE_contextp)
1467 			*per_CE_contextp = CE_state->recv_context;
1468 
1469 		if (per_transfer_contextp) {
1470 			*per_transfer_contextp =
1471 				dest_ring->per_transfer_context[sw_index];
1472 		}
1473 		dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
1474 
1475 		/* Update sw_index */
1476 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1477 		dest_ring->sw_index = sw_index;
1478 		status = QDF_STATUS_SUCCESS;
1479 	} else {
1480 		status = QDF_STATUS_E_FAILURE;
1481 	}
1482 	qdf_spin_unlock(&CE_state->ce_index_lock);
1483 
1484 	return status;
1485 }
1486 
1487 /*
1488  * Guts of ce_completed_send_next.
1489  * The caller takes responsibility for any necessary locking.
1490  */
1491 static int
1492 ce_completed_send_next_nolock_legacy(struct CE_state *CE_state,
1493 			      void **per_CE_contextp,
1494 			      void **per_transfer_contextp,
1495 			      qdf_dma_addr_t *bufferp,
1496 			      unsigned int *nbytesp,
1497 			      unsigned int *transfer_idp,
1498 			      unsigned int *sw_idx,
1499 			      unsigned int *hw_idx,
1500 			      uint32_t *toeplitz_hash_result)
1501 {
1502 	int status = QDF_STATUS_E_FAILURE;
1503 	struct CE_ring_state *src_ring = CE_state->src_ring;
1504 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1505 	unsigned int nentries_mask = src_ring->nentries_mask;
1506 	unsigned int sw_index = src_ring->sw_index;
1507 	unsigned int read_index;
1508 	struct hif_softc *scn = CE_state->scn;
1509 
1510 	if (src_ring->hw_index == sw_index) {
1511 		/*
1512 		 * The SW completion index has caught up with the cached
1513 		 * version of the HW completion index.
1514 		 * Update the cached HW completion index to see whether
1515 		 * the SW has really caught up to the HW, or if the cached
1516 		 * value of the HW index has become stale.
1517 		 */
1518 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1519 			return QDF_STATUS_E_FAILURE;
1520 		src_ring->hw_index =
1521 			CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
1522 		if (Q_TARGET_ACCESS_END(scn) < 0)
1523 			return QDF_STATUS_E_FAILURE;
1524 	}
1525 	read_index = src_ring->hw_index;
1526 
1527 	if (sw_idx)
1528 		*sw_idx = sw_index;
1529 
1530 	if (hw_idx)
1531 		*hw_idx = read_index;
1532 
1533 	if ((read_index != sw_index) && (read_index != 0xffffffff)) {
1534 		struct CE_src_desc *shadow_base =
1535 			(struct CE_src_desc *)src_ring->shadow_base;
1536 		struct CE_src_desc *shadow_src_desc =
1537 			CE_SRC_RING_TO_DESC(shadow_base, sw_index);
1538 #ifdef QCA_WIFI_3_0
1539 		struct CE_src_desc *src_ring_base =
1540 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
1541 		struct CE_src_desc *src_desc =
1542 			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1543 #endif
1544 		hif_record_ce_desc_event(scn, CE_state->id,
1545 				HIF_TX_DESC_COMPLETION,
1546 				(union ce_desc *) shadow_src_desc,
1547 				src_ring->per_transfer_context[sw_index],
1548 				sw_index, shadow_src_desc->nbytes);
1549 
1550 		/* Return data from completed source descriptor */
1551 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
1552 		*nbytesp = shadow_src_desc->nbytes;
1553 		*transfer_idp = shadow_src_desc->meta_data;
1554 #ifdef QCA_WIFI_3_0
1555 		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
1556 #else
1557 		*toeplitz_hash_result = 0;
1558 #endif
1559 		if (per_CE_contextp)
1560 			*per_CE_contextp = CE_state->send_context;
1561 
1562 		if (per_transfer_contextp) {
1563 			*per_transfer_contextp =
1564 				src_ring->per_transfer_context[sw_index];
1565 		}
1566 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
1567 
1568 		/* Update sw_index */
1569 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1570 		src_ring->sw_index = sw_index;
1571 		status = QDF_STATUS_SUCCESS;
1572 	}
1573 
1574 	return status;
1575 }
1576 
1577 QDF_STATUS
1578 ce_cancel_send_next(struct CE_handle *copyeng,
1579 		void **per_CE_contextp,
1580 		void **per_transfer_contextp,
1581 		qdf_dma_addr_t *bufferp,
1582 		unsigned int *nbytesp,
1583 		unsigned int *transfer_idp,
1584 		uint32_t *toeplitz_hash_result)
1585 {
1586 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1587 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
1588 
1589 	return hif_state->ce_services->ce_cancel_send_next
1590 		(copyeng, per_CE_contextp, per_transfer_contextp,
1591 		 bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
1592 }
1593 
1594 /* NB: Modeled after ce_completed_send_next */
1595 static QDF_STATUS
1596 ce_cancel_send_next_legacy(struct CE_handle *copyeng,
1597 		void **per_CE_contextp,
1598 		void **per_transfer_contextp,
1599 		qdf_dma_addr_t *bufferp,
1600 		unsigned int *nbytesp,
1601 		unsigned int *transfer_idp,
1602 		uint32_t *toeplitz_hash_result)
1603 {
1604 	struct CE_state *CE_state;
1605 	struct CE_ring_state *src_ring;
1606 	unsigned int nentries_mask;
1607 	unsigned int sw_index;
1608 	unsigned int write_index;
1609 	QDF_STATUS status;
1610 	struct hif_softc *scn;
1611 
1612 	CE_state = (struct CE_state *)copyeng;
1613 	src_ring = CE_state->src_ring;
1614 	if (!src_ring)
1615 		return QDF_STATUS_E_FAILURE;
1616 
1617 	scn = CE_state->scn;
1618 	qdf_spin_lock(&CE_state->ce_index_lock);
1619 	nentries_mask = src_ring->nentries_mask;
1620 	sw_index = src_ring->sw_index;
1621 	write_index = src_ring->write_index;
1622 
1623 	if (write_index != sw_index) {
1624 		struct CE_src_desc *src_ring_base =
1625 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
1626 		struct CE_src_desc *src_desc =
1627 			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1628 
1629 		/* Return data from completed source descriptor */
1630 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1631 		*nbytesp = src_desc->nbytes;
1632 		*transfer_idp = src_desc->meta_data;
1633 #ifdef QCA_WIFI_3_0
1634 		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
1635 #else
1636 		*toeplitz_hash_result = 0;
1637 #endif
1638 
1639 		if (per_CE_contextp)
1640 			*per_CE_contextp = CE_state->send_context;
1641 
1642 		if (per_transfer_contextp) {
1643 			*per_transfer_contextp =
1644 				src_ring->per_transfer_context[sw_index];
1645 		}
1646 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
1647 
1648 		/* Update sw_index */
1649 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1650 		src_ring->sw_index = sw_index;
1651 		status = QDF_STATUS_SUCCESS;
1652 	} else {
1653 		status = QDF_STATUS_E_FAILURE;
1654 	}
1655 	qdf_spin_unlock(&CE_state->ce_index_lock);
1656 
1657 	return status;
1658 }
1659 
1660 /* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
1661 #define CE_WM_SHFT 1
1662 
1663 int
1664 ce_completed_send_next(struct CE_handle *copyeng,
1665 		       void **per_CE_contextp,
1666 		       void **per_transfer_contextp,
1667 		       qdf_dma_addr_t *bufferp,
1668 		       unsigned int *nbytesp,
1669 		       unsigned int *transfer_idp,
1670 		       unsigned int *sw_idx,
1671 		       unsigned int *hw_idx,
1672 		       unsigned int *toeplitz_hash_result)
1673 {
1674 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1675 	struct hif_softc *scn = CE_state->scn;
1676 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1677 	struct ce_ops *ce_services;
1678 	int status;
1679 
1680 	ce_services = hif_state->ce_services;
1681 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1682 	status =
1683 		ce_services->ce_completed_send_next_nolock(CE_state,
1684 					per_CE_contextp, per_transfer_contextp,
1685 					bufferp, nbytesp, transfer_idp, sw_idx,
1686 					      hw_idx, toeplitz_hash_result);
1687 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1688 
1689 	return status;
1690 }
1691 
1692 #ifdef ATH_11AC_TXCOMPACT
1693 /* CE engine descriptor reap
1694  * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
1695  * does recieve and reaping of completed descriptor ,
1696  * This function only handles reaping of Tx complete descriptor.
1697  * The Function is called from threshold reap  poll routine
1698  * hif_send_complete_check so should not countain recieve functionality
1699  * within it .
1700  */
1701 
1702 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
1703 {
1704 	void *CE_context;
1705 	void *transfer_context;
1706 	qdf_dma_addr_t buf;
1707 	unsigned int nbytes;
1708 	unsigned int id;
1709 	unsigned int sw_idx, hw_idx;
1710 	uint32_t toeplitz_hash_result;
1711 	struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
1712 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1713 
1714 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1715 		return;
1716 
1717 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
1718 			NULL, NULL, 0, 0);
1719 
1720 	/* Since this function is called from both user context and
1721 	 * tasklet context the spinlock has to lock the bottom halves.
1722 	 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
1723 	 * enabled in TX polling mode. If this is not the case, more
1724 	 * bottom halve spin lock changes are needed. Due to data path
1725 	 * performance concern, after internal discussion we've decided
1726 	 * to make minimum change, i.e., only address the issue occured
1727 	 * in this function. The possible negative effect of this minimum
1728 	 * change is that, in the future, if some other function will also
1729 	 * be opened to let the user context to use, those cases need to be
1730 	 * addressed by change spin_lock to spin_lock_bh also.
1731 	 */
1732 
1733 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
1734 
1735 	if (CE_state->send_cb) {
1736 		{
1737 			struct ce_ops *ce_services = hif_state->ce_services;
1738 			/* Pop completed send buffers and call the
1739 			 * registered send callback for each
1740 			 */
1741 			while (ce_services->ce_completed_send_next_nolock
1742 				 (CE_state, &CE_context,
1743 				  &transfer_context, &buf,
1744 				  &nbytes, &id, &sw_idx, &hw_idx,
1745 				  &toeplitz_hash_result) ==
1746 				  QDF_STATUS_SUCCESS) {
1747 				if (ce_id != CE_HTT_H2T_MSG) {
1748 					qdf_spin_unlock_bh(
1749 						&CE_state->ce_index_lock);
1750 					CE_state->send_cb(
1751 						(struct CE_handle *)
1752 						CE_state, CE_context,
1753 						transfer_context, buf,
1754 						nbytes, id, sw_idx, hw_idx,
1755 						toeplitz_hash_result);
1756 					qdf_spin_lock_bh(
1757 						&CE_state->ce_index_lock);
1758 				} else {
1759 					struct HIF_CE_pipe_info *pipe_info =
1760 						(struct HIF_CE_pipe_info *)
1761 						CE_context;
1762 
1763 					qdf_spin_lock_bh(&pipe_info->
1764 						 completion_freeq_lock);
1765 					pipe_info->num_sends_allowed++;
1766 					qdf_spin_unlock_bh(&pipe_info->
1767 						   completion_freeq_lock);
1768 				}
1769 			}
1770 		}
1771 	}
1772 
1773 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1774 
1775 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
1776 			NULL, NULL, 0, 0);
1777 	Q_TARGET_ACCESS_END(scn);
1778 }
1779 
1780 #endif /*ATH_11AC_TXCOMPACT */
1781 
1782 /*
1783  * Number of times to check for any pending tx/rx completion on
1784  * a copy engine, this count should be big enough. Once we hit
1785  * this threashold we'll not check for any Tx/Rx comlpetion in same
1786  * interrupt handling. Note that this threashold is only used for
1787  * Rx interrupt processing, this can be used tor Tx as well if we
1788  * suspect any infinite loop in checking for pending Tx completion.
1789  */
1790 #define CE_TXRX_COMP_CHECK_THRESHOLD 20
1791 
1792 #ifdef WLAN_FEATURE_FASTPATH
1793 /**
1794  * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler
1795  * @ce_state: handle to copy engine state
1796  * @cmpl_msdus: Rx msdus
1797  * @num_cmpls: number of Rx msdus
1798  * @ctrl_addr: CE control address
1799  *
1800  * Return: None
1801  */
1802 static void ce_fastpath_rx_handle(struct CE_state *ce_state,
1803 				  qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls,
1804 				  uint32_t ctrl_addr)
1805 {
1806 	struct hif_softc *scn = ce_state->scn;
1807 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
1808 	uint32_t nentries_mask = dest_ring->nentries_mask;
1809 	uint32_t write_index;
1810 
1811 	qdf_spin_unlock(&ce_state->ce_index_lock);
1812 	(ce_state->fastpath_handler)(ce_state->context,	cmpl_msdus, num_cmpls);
1813 	qdf_spin_lock(&ce_state->ce_index_lock);
1814 
1815 	/* Update Destination Ring Write Index */
1816 	write_index = dest_ring->write_index;
1817 	write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
1818 
1819 	hif_record_ce_desc_event(scn, ce_state->id,
1820 			FAST_RX_WRITE_INDEX_UPDATE,
1821 			NULL, NULL, write_index, 0);
1822 
1823 	CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
1824 	dest_ring->write_index = write_index;
1825 }
1826 
1827 #ifdef CONFIG_SLUB_DEBUG_ON
1828 #define MSG_FLUSH_NUM 16
1829 #else /* PERF build */
1830 #define MSG_FLUSH_NUM 32
1831 #endif /* SLUB_DEBUG_ON */
1832 /**
1833  * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs
1834  * @scn: hif_context
1835  * @ce_id: Copy engine ID
1836  * 1) Go through the CE ring, and find the completions
1837  * 2) For valid completions retrieve context (nbuf) for per_transfer_context[]
1838  * 3) Unmap buffer & accumulate in an array.
1839  * 4) Call message handler when array is full or when exiting the handler
1840  *
1841  * Return: void
1842  */
1843 
1844 static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
1845 {
1846 	struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
1847 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1848 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
1849 	struct CE_dest_desc *dest_ring_base =
1850 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
1851 
1852 	uint32_t nentries_mask = dest_ring->nentries_mask;
1853 	uint32_t sw_index = dest_ring->sw_index;
1854 	uint32_t nbytes;
1855 	qdf_nbuf_t nbuf;
1856 	dma_addr_t paddr;
1857 	struct CE_dest_desc *dest_desc;
1858 	qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM];
1859 	uint32_t ctrl_addr = ce_state->ctrl_addr;
1860 	uint32_t nbuf_cmpl_idx = 0;
1861 	unsigned int more_comp_cnt = 0;
1862 
1863 more_data:
1864 	for (;;) {
1865 
1866 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base,
1867 						 sw_index);
1868 
1869 		/*
1870 		 * The following 2 reads are from non-cached memory
1871 		 */
1872 		nbytes = dest_desc->nbytes;
1873 
1874 		/* If completion is invalid, break */
1875 		if (qdf_unlikely(nbytes == 0))
1876 			break;
1877 
1878 
1879 		/*
1880 		 * Build the nbuf list from valid completions
1881 		 */
1882 		nbuf = dest_ring->per_transfer_context[sw_index];
1883 
1884 		/*
1885 		 * No lock is needed here, since this is the only thread
1886 		 * that accesses the sw_index
1887 		 */
1888 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1889 
1890 		/*
1891 		 * CAREFUL : Uncached write, but still less expensive,
1892 		 * since most modern caches use "write-combining" to
1893 		 * flush multiple cache-writes all at once.
1894 		 */
1895 		dest_desc->nbytes = 0;
1896 
1897 		/*
1898 		 * Per our understanding this is not required on our
1899 		 * since we are doing the same cache invalidation
1900 		 * operation on the same buffer twice in succession,
1901 		 * without any modifiication to this buffer by CPU in
1902 		 * between.
1903 		 * However, this code with 2 syncs in succession has
1904 		 * been undergoing some testing at a customer site,
1905 		 * and seemed to be showing no problems so far. Would
1906 		 * like to validate from the customer, that this line
1907 		 * is really not required, before we remove this line
1908 		 * completely.
1909 		 */
1910 		paddr = QDF_NBUF_CB_PADDR(nbuf);
1911 
1912 		qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev, paddr,
1913 				(skb_end_pointer(nbuf) - (nbuf)->data),
1914 				DMA_FROM_DEVICE);
1915 
1916 		qdf_nbuf_put_tail(nbuf, nbytes);
1917 
1918 		qdf_assert_always(nbuf->data != NULL);
1919 
1920 		QDF_NBUF_CB_RX_CTX_ID(nbuf) =
1921 				hif_get_rx_ctx_id(ce_state->id, hif_hdl);
1922 		cmpl_msdus[nbuf_cmpl_idx++] = nbuf;
1923 
1924 		/*
1925 		 * we are not posting the buffers back instead
1926 		 * reusing the buffers
1927 		 */
1928 		if (nbuf_cmpl_idx == MSG_FLUSH_NUM) {
1929 			hif_record_ce_desc_event(scn, ce_state->id,
1930 						 FAST_RX_SOFTWARE_INDEX_UPDATE,
1931 						 NULL, NULL, sw_index, 0);
1932 			dest_ring->sw_index = sw_index;
1933 			ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1934 					      MSG_FLUSH_NUM, ctrl_addr);
1935 
1936 			ce_state->receive_count += MSG_FLUSH_NUM;
1937 			if (qdf_unlikely(hif_ce_service_should_yield(
1938 						scn, ce_state))) {
1939 				ce_state->force_break = 1;
1940 				qdf_atomic_set(&ce_state->rx_pending, 1);
1941 				return;
1942 			}
1943 
1944 			nbuf_cmpl_idx = 0;
1945 			more_comp_cnt = 0;
1946 		}
1947 	}
1948 
1949 	hif_record_ce_desc_event(scn, ce_state->id,
1950 				 FAST_RX_SOFTWARE_INDEX_UPDATE,
1951 				 NULL, NULL, sw_index, 0);
1952 
1953 	dest_ring->sw_index = sw_index;
1954 
1955 	/*
1956 	 * If there are not enough completions to fill the array,
1957 	 * just call the message handler here
1958 	 */
1959 	if (nbuf_cmpl_idx) {
1960 		ce_fastpath_rx_handle(ce_state, cmpl_msdus,
1961 				      nbuf_cmpl_idx, ctrl_addr);
1962 
1963 		ce_state->receive_count += nbuf_cmpl_idx;
1964 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
1965 			ce_state->force_break = 1;
1966 			qdf_atomic_set(&ce_state->rx_pending, 1);
1967 			return;
1968 		}
1969 
1970 		/* check for more packets after upper layer processing */
1971 		nbuf_cmpl_idx = 0;
1972 		more_comp_cnt = 0;
1973 		goto more_data;
1974 	}
1975 	qdf_atomic_set(&ce_state->rx_pending, 0);
1976 	if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1977 		CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1978 					   HOST_IS_COPY_COMPLETE_MASK);
1979 	} else {
1980 		HIF_ERROR("%s: target access is not allowed", __func__);
1981 		return;
1982 	}
1983 
1984 	if (ce_recv_entries_done_nolock_legacy(scn, ce_state)) {
1985 		if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1986 			goto more_data;
1987 		} else {
1988 			HIF_ERROR("%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1989 				  __func__, nentries_mask,
1990 				  ce_state->dest_ring->sw_index,
1991 				  CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr));
1992 		}
1993 	}
1994 #ifdef NAPI_YIELD_BUDGET_BASED
1995 	/* Caution : Before you modify this code, please refer hif_napi_poll function
1996 	to understand how napi_complete gets called and make the necessary changes
1997 	Force break has to be done till WIN disables the interrupt at source */
1998 	ce_state->force_break = 1;
1999 #endif
2000 }
2001 
2002 #else
2003 static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
2004 {
2005 }
2006 #endif /* WLAN_FEATURE_FASTPATH */
2007 
2008 /* Maximum amount of time in nano seconds before which the CE per engine service
2009  * should yield. ~1 jiffie.
2010  */
2011 #define CE_PER_ENGINE_SERVICE_MAX_YIELD_TIME_NS (10 * 1000 * 1000)
2012 
2013 /*
2014  * Guts of interrupt handler for per-engine interrupts on a particular CE.
2015  *
2016  * Invokes registered callbacks for recv_complete,
2017  * send_complete, and watermarks.
2018  *
2019  * Returns: number of messages processed
2020  */
2021 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
2022 {
2023 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2024 	uint32_t ctrl_addr = CE_state->ctrl_addr;
2025 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2026 	void *CE_context;
2027 	void *transfer_context;
2028 	qdf_dma_addr_t buf;
2029 	unsigned int nbytes;
2030 	unsigned int id;
2031 	unsigned int flags;
2032 	unsigned int more_comp_cnt = 0;
2033 	unsigned int more_snd_comp_cnt = 0;
2034 	unsigned int sw_idx, hw_idx;
2035 	uint32_t toeplitz_hash_result;
2036 	uint32_t mode = hif_get_conparam(scn);
2037 
2038 	if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data))
2039 		return CE_state->receive_count;
2040 
2041 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
2042 		HIF_ERROR("[premature rc=0]");
2043 		return 0; /* no work done */
2044 	}
2045 
2046 	/* Clear force_break flag and re-initialize receive_count to 0 */
2047 	CE_state->receive_count = 0;
2048 	CE_state->force_break = 0;
2049 	CE_state->ce_service_yield_time =
2050 		sched_clock() +
2051 		(unsigned long long)CE_PER_ENGINE_SERVICE_MAX_YIELD_TIME_NS;
2052 
2053 
2054 	qdf_spin_lock(&CE_state->ce_index_lock);
2055 	/*
2056 	 * With below check we make sure CE we are handling is datapath CE and
2057 	 * fastpath is enabled.
2058 	 */
2059 	if (ce_is_fastpath_handler_registered(CE_state)) {
2060 		/* For datapath only Rx CEs */
2061 		ce_per_engine_service_fast(scn, CE_id);
2062 		goto unlock_end;
2063 	}
2064 
2065 more_completions:
2066 	if (CE_state->recv_cb) {
2067 
2068 		/* Pop completed recv buffers and call
2069 		 * the registered recv callback for each
2070 		 */
2071 		while (hif_state->ce_services->ce_completed_recv_next_nolock
2072 				(CE_state, &CE_context, &transfer_context,
2073 				&buf, &nbytes, &id, &flags) ==
2074 				QDF_STATUS_SUCCESS) {
2075 			qdf_spin_unlock(&CE_state->ce_index_lock);
2076 			CE_state->recv_cb((struct CE_handle *)CE_state,
2077 					  CE_context, transfer_context, buf,
2078 					  nbytes, id, flags);
2079 
2080 			/*
2081 			 * EV #112693 -
2082 			 * [Peregrine][ES1][WB342][Win8x86][Performance]
2083 			 * BSoD_0x133 occurred in VHT80 UDP_DL
2084 			 * Break out DPC by force if number of loops in
2085 			 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
2086 			 * to avoid spending too long time in
2087 			 * DPC for each interrupt handling. Schedule another
2088 			 * DPC to avoid data loss if we had taken
2089 			 * force-break action before apply to Windows OS
2090 			 * only currently, Linux/MAC os can expand to their
2091 			 * platform if necessary
2092 			 */
2093 
2094 			/* Break the receive processes by
2095 			 * force if force_break set up
2096 			 */
2097 			if (qdf_unlikely(CE_state->force_break)) {
2098 				qdf_atomic_set(&CE_state->rx_pending, 1);
2099 				goto target_access_end;
2100 			}
2101 			qdf_spin_lock(&CE_state->ce_index_lock);
2102 		}
2103 	}
2104 
2105 	/*
2106 	 * Attention: We may experience potential infinite loop for below
2107 	 * While Loop during Sending Stress test.
2108 	 * Resolve the same way as Receive Case (Refer to EV #112693)
2109 	 */
2110 
2111 	if (CE_state->send_cb) {
2112 		/* Pop completed send buffers and call
2113 		 * the registered send callback for each
2114 		 */
2115 
2116 #ifdef ATH_11AC_TXCOMPACT
2117 		while (hif_state->ce_services->ce_completed_send_next_nolock
2118 			 (CE_state, &CE_context,
2119 			 &transfer_context, &buf, &nbytes,
2120 			 &id, &sw_idx, &hw_idx,
2121 			 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2122 
2123 			if (CE_id != CE_HTT_H2T_MSG ||
2124 			    QDF_IS_EPPING_ENABLED(mode)) {
2125 				qdf_spin_unlock(&CE_state->ce_index_lock);
2126 				CE_state->send_cb((struct CE_handle *)CE_state,
2127 						  CE_context, transfer_context,
2128 						  buf, nbytes, id, sw_idx,
2129 						  hw_idx, toeplitz_hash_result);
2130 				qdf_spin_lock(&CE_state->ce_index_lock);
2131 			} else {
2132 				struct HIF_CE_pipe_info *pipe_info =
2133 					(struct HIF_CE_pipe_info *)CE_context;
2134 
2135 				qdf_spin_lock(&pipe_info->
2136 					      completion_freeq_lock);
2137 				pipe_info->num_sends_allowed++;
2138 				qdf_spin_unlock(&pipe_info->
2139 						completion_freeq_lock);
2140 			}
2141 		}
2142 #else                           /*ATH_11AC_TXCOMPACT */
2143 		while (hif_state->ce_services->ce_completed_send_next_nolock
2144 			 (CE_state, &CE_context,
2145 			  &transfer_context, &buf, &nbytes,
2146 			  &id, &sw_idx, &hw_idx,
2147 			  &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2148 			qdf_spin_unlock(&CE_state->ce_index_lock);
2149 			CE_state->send_cb((struct CE_handle *)CE_state,
2150 				  CE_context, transfer_context, buf,
2151 				  nbytes, id, sw_idx, hw_idx,
2152 				  toeplitz_hash_result);
2153 			qdf_spin_lock(&CE_state->ce_index_lock);
2154 		}
2155 #endif /*ATH_11AC_TXCOMPACT */
2156 	}
2157 
2158 more_watermarks:
2159 	if (CE_state->misc_cbs) {
2160 		if (CE_state->watermark_cb &&
2161 				hif_state->ce_services->watermark_int(CE_state,
2162 					&flags)) {
2163 			qdf_spin_unlock(&CE_state->ce_index_lock);
2164 			/* Convert HW IS bits to software flags */
2165 			CE_state->watermark_cb((struct CE_handle *)CE_state,
2166 					CE_state->wm_context, flags);
2167 			qdf_spin_lock(&CE_state->ce_index_lock);
2168 		}
2169 	}
2170 
2171 	/*
2172 	 * Clear the misc interrupts (watermark) that were handled above,
2173 	 * and that will be checked again below.
2174 	 * Clear and check for copy-complete interrupts again, just in case
2175 	 * more copy completions happened while the misc interrupts were being
2176 	 * handled.
2177 	 */
2178 	if (!ce_srng_based(scn)) {
2179 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
2180 			CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
2181 					   CE_WATERMARK_MASK |
2182 					   HOST_IS_COPY_COMPLETE_MASK);
2183 		} else {
2184 			HIF_ERROR("%s: target access is not allowed", __func__);
2185 			goto unlock_end;
2186 		}
2187 	}
2188 
2189 	/*
2190 	 * Now that per-engine interrupts are cleared, verify that
2191 	 * no recv interrupts arrive while processing send interrupts,
2192 	 * and no recv or send interrupts happened while processing
2193 	 * misc interrupts.Go back and check again.Keep checking until
2194 	 * we find no more events to process.
2195 	 */
2196 	if (CE_state->recv_cb &&
2197 		hif_state->ce_services->ce_recv_entries_done_nolock(scn,
2198 				CE_state)) {
2199 		if (QDF_IS_EPPING_ENABLED(mode) ||
2200 		    more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
2201 			goto more_completions;
2202 		} else {
2203 			if (!ce_srng_based(scn)) {
2204 				HIF_ERROR(
2205 					"%s:Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
2206 					__func__,
2207 					CE_state->dest_ring->nentries_mask,
2208 					CE_state->dest_ring->sw_index,
2209 					CE_DEST_RING_READ_IDX_GET(scn,
2210 							  CE_state->ctrl_addr));
2211 			}
2212 		}
2213 	}
2214 
2215 	if (CE_state->send_cb &&
2216 		hif_state->ce_services->ce_send_entries_done_nolock(scn,
2217 				CE_state)) {
2218 		if (QDF_IS_EPPING_ENABLED(mode) ||
2219 		    more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
2220 			goto more_completions;
2221 		} else {
2222 			if (!ce_srng_based(scn)) {
2223 				HIF_ERROR(
2224 					"%s:Potential infinite loop detected during send completion nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
2225 					__func__,
2226 					CE_state->src_ring->nentries_mask,
2227 					CE_state->src_ring->sw_index,
2228 					CE_SRC_RING_READ_IDX_GET(scn,
2229 							 CE_state->ctrl_addr));
2230 			}
2231 		}
2232 	}
2233 
2234 	if (CE_state->misc_cbs && CE_state->watermark_cb) {
2235 		if (hif_state->ce_services->watermark_int(CE_state, &flags))
2236 			goto more_watermarks;
2237 	}
2238 
2239 	qdf_atomic_set(&CE_state->rx_pending, 0);
2240 
2241 unlock_end:
2242 	qdf_spin_unlock(&CE_state->ce_index_lock);
2243 target_access_end:
2244 	if (Q_TARGET_ACCESS_END(scn) < 0)
2245 		HIF_ERROR("<--[premature rc=%d]", CE_state->receive_count);
2246 	return CE_state->receive_count;
2247 }
2248 qdf_export_symbol(ce_per_engine_service);
2249 
2250 /*
2251  * Handler for per-engine interrupts on ALL active CEs.
2252  * This is used in cases where the system is sharing a
2253  * single interrput for all CEs
2254  */
2255 
2256 void ce_per_engine_service_any(int irq, struct hif_softc *scn)
2257 {
2258 	int CE_id;
2259 	uint32_t intr_summary;
2260 
2261 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2262 		return;
2263 
2264 	if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
2265 		for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2266 			struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2267 
2268 			if (qdf_atomic_read(&CE_state->rx_pending)) {
2269 				qdf_atomic_set(&CE_state->rx_pending, 0);
2270 				ce_per_engine_service(scn, CE_id);
2271 			}
2272 		}
2273 
2274 		Q_TARGET_ACCESS_END(scn);
2275 		return;
2276 	}
2277 
2278 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
2279 
2280 	for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
2281 		if (intr_summary & (1 << CE_id))
2282 			intr_summary &= ~(1 << CE_id);
2283 		else
2284 			continue;       /* no intr pending on this CE */
2285 
2286 		ce_per_engine_service(scn, CE_id);
2287 	}
2288 
2289 	Q_TARGET_ACCESS_END(scn);
2290 }
2291 
2292 /*
2293  * Adjust interrupts for the copy complete handler.
2294  * If it's needed for either send or recv, then unmask
2295  * this interrupt; otherwise, mask it.
2296  *
2297  * Called with target_lock held.
2298  */
2299 static void
2300 ce_per_engine_handler_adjust_legacy(struct CE_state *CE_state,
2301 			     int disable_copy_compl_intr)
2302 {
2303 	uint32_t ctrl_addr = CE_state->ctrl_addr;
2304 	struct hif_softc *scn = CE_state->scn;
2305 
2306 	CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
2307 
2308 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2309 		return;
2310 
2311 	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
2312 		HIF_ERROR("%s: target access is not allowed", __func__);
2313 		return;
2314 	}
2315 
2316 	if ((!disable_copy_compl_intr) &&
2317 	    (CE_state->send_cb || CE_state->recv_cb))
2318 		CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
2319 	else
2320 		CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
2321 
2322 	if (CE_state->watermark_cb)
2323 		CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
2324 	 else
2325 		CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
2326 	Q_TARGET_ACCESS_END(scn);
2327 }
2328 
2329 /*Iterate the CE_state list and disable the compl interrupt
2330  * if it has been registered already.
2331  */
2332 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
2333 {
2334 	int CE_id;
2335 
2336 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2337 		return;
2338 
2339 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2340 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2341 		uint32_t ctrl_addr = CE_state->ctrl_addr;
2342 
2343 		/* if the interrupt is currently enabled, disable it */
2344 		if (!CE_state->disable_copy_compl_intr
2345 		    && (CE_state->send_cb || CE_state->recv_cb))
2346 			CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
2347 
2348 		if (CE_state->watermark_cb)
2349 			CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
2350 	}
2351 	Q_TARGET_ACCESS_END(scn);
2352 }
2353 
2354 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
2355 {
2356 	int CE_id;
2357 
2358 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2359 		return;
2360 
2361 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2362 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2363 		uint32_t ctrl_addr = CE_state->ctrl_addr;
2364 
2365 		/*
2366 		 * If the CE is supposed to have copy complete interrupts
2367 		 * enabled (i.e. there a callback registered, and the
2368 		 * "disable" flag is not set), then re-enable the interrupt.
2369 		 */
2370 		if (!CE_state->disable_copy_compl_intr
2371 		    && (CE_state->send_cb || CE_state->recv_cb))
2372 			CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
2373 
2374 		if (CE_state->watermark_cb)
2375 			CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
2376 	}
2377 	Q_TARGET_ACCESS_END(scn);
2378 }
2379 
2380 /**
2381  * ce_send_cb_register(): register completion handler
2382  * @copyeng: CE_state representing the ce we are adding the behavior to
2383  * @fn_ptr: callback that the ce should use when processing tx completions
2384  * @disable_interrupts: if the interupts should be enabled or not.
2385  *
2386  * Caller should guarantee that no transactions are in progress before
2387  * switching the callback function.
2388  *
2389  * Registers the send context before the fn pointer so that if the cb is valid
2390  * the context should be valid.
2391  *
2392  * Beware that currently this function will enable completion interrupts.
2393  */
2394 void
2395 ce_send_cb_register(struct CE_handle *copyeng,
2396 		    ce_send_cb fn_ptr,
2397 		    void *ce_send_context, int disable_interrupts)
2398 {
2399 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2400 	struct hif_softc *scn;
2401 	struct HIF_CE_state *hif_state;
2402 
2403 	if (CE_state == NULL) {
2404 		HIF_ERROR("%s: Error CE state = NULL", __func__);
2405 		return;
2406 	}
2407 	scn = CE_state->scn;
2408 	hif_state = HIF_GET_CE_STATE(scn);
2409 	if (hif_state == NULL) {
2410 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
2411 		return;
2412 	}
2413 	CE_state->send_context = ce_send_context;
2414 	CE_state->send_cb = fn_ptr;
2415 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
2416 							disable_interrupts);
2417 }
2418 
2419 /**
2420  * ce_recv_cb_register(): register completion handler
2421  * @copyeng: CE_state representing the ce we are adding the behavior to
2422  * @fn_ptr: callback that the ce should use when processing rx completions
2423  * @disable_interrupts: if the interupts should be enabled or not.
2424  *
2425  * Registers the send context before the fn pointer so that if the cb is valid
2426  * the context should be valid.
2427  *
2428  * Caller should guarantee that no transactions are in progress before
2429  * switching the callback function.
2430  */
2431 void
2432 ce_recv_cb_register(struct CE_handle *copyeng,
2433 		    CE_recv_cb fn_ptr,
2434 		    void *CE_recv_context, int disable_interrupts)
2435 {
2436 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2437 	struct hif_softc *scn;
2438 	struct HIF_CE_state *hif_state;
2439 
2440 	if (CE_state == NULL) {
2441 		HIF_ERROR("%s: ERROR CE state = NULL", __func__);
2442 		return;
2443 	}
2444 	scn = CE_state->scn;
2445 	hif_state = HIF_GET_CE_STATE(scn);
2446 	if (hif_state == NULL) {
2447 		HIF_ERROR("%s: Error HIF state = NULL", __func__);
2448 		return;
2449 	}
2450 	CE_state->recv_context = CE_recv_context;
2451 	CE_state->recv_cb = fn_ptr;
2452 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
2453 							disable_interrupts);
2454 }
2455 
2456 /**
2457  * ce_watermark_cb_register(): register completion handler
2458  * @copyeng: CE_state representing the ce we are adding the behavior to
2459  * @fn_ptr: callback that the ce should use when processing watermark events
2460  *
2461  * Caller should guarantee that no watermark events are being processed before
2462  * switching the callback function.
2463  */
2464 void
2465 ce_watermark_cb_register(struct CE_handle *copyeng,
2466 			 CE_watermark_cb fn_ptr, void *CE_wm_context)
2467 {
2468 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2469 	struct hif_softc *scn = CE_state->scn;
2470 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2471 
2472 	CE_state->watermark_cb = fn_ptr;
2473 	CE_state->wm_context = CE_wm_context;
2474 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
2475 							0);
2476 	if (fn_ptr)
2477 		CE_state->misc_cbs = 1;
2478 }
2479 
2480 bool ce_get_rx_pending(struct hif_softc *scn)
2481 {
2482 	int CE_id;
2483 
2484 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
2485 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
2486 
2487 		if (qdf_atomic_read(&CE_state->rx_pending))
2488 			return true;
2489 	}
2490 
2491 	return false;
2492 }
2493 
2494 /**
2495  * ce_check_rx_pending() - ce_check_rx_pending
2496  * @CE_state: context of the copy engine to check
2497  *
2498  * Return: true if there per_engine_service
2499  *	didn't process all the rx descriptors.
2500  */
2501 bool ce_check_rx_pending(struct CE_state *CE_state)
2502 {
2503 	if (qdf_atomic_read(&CE_state->rx_pending))
2504 		return true;
2505 	else
2506 		return false;
2507 }
2508 qdf_export_symbol(ce_check_rx_pending);
2509 
2510 #ifdef IPA_OFFLOAD
2511 /**
2512  * ce_ipa_get_resource() - get uc resource on copyengine
2513  * @ce: copyengine context
2514  * @ce_sr_base_paddr: copyengine source ring base physical address
2515  * @ce_sr_ring_size: copyengine source ring size
2516  * @ce_reg_paddr: copyengine register physical address
2517  *
2518  * Copy engine should release resource to micro controller
2519  * Micro controller needs
2520  *  - Copy engine source descriptor base address
2521  *  - Copy engine source descriptor size
2522  *  - PCI BAR address to access copy engine regiser
2523  *
2524  * Return: None
2525  */
2526 void ce_ipa_get_resource(struct CE_handle *ce,
2527 			 qdf_dma_addr_t *ce_sr_base_paddr,
2528 			 uint32_t *ce_sr_ring_size,
2529 			 qdf_dma_addr_t *ce_reg_paddr)
2530 {
2531 	struct CE_state *CE_state = (struct CE_state *)ce;
2532 	uint32_t ring_loop;
2533 	struct CE_src_desc *ce_desc;
2534 	qdf_dma_addr_t phy_mem_base;
2535 	struct hif_softc *scn = CE_state->scn;
2536 
2537 	if (CE_UNUSED == CE_state->state) {
2538 		*ce_sr_base_paddr = 0;
2539 		*ce_sr_ring_size = 0;
2540 		return;
2541 	}
2542 
2543 	/* Update default value for descriptor */
2544 	for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
2545 	     ring_loop++) {
2546 		ce_desc = (struct CE_src_desc *)
2547 			  ((char *)CE_state->src_ring->base_addr_owner_space +
2548 			   ring_loop * (sizeof(struct CE_src_desc)));
2549 		CE_IPA_RING_INIT(ce_desc);
2550 	}
2551 
2552 	/* Get BAR address */
2553 	hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
2554 
2555 	*ce_sr_base_paddr = CE_state->src_ring->base_addr_CE_space;
2556 	*ce_sr_ring_size = (uint32_t) (CE_state->src_ring->nentries *
2557 		sizeof(struct CE_src_desc));
2558 	*ce_reg_paddr = phy_mem_base + CE_BASE_ADDRESS(CE_state->id) +
2559 			SR_WR_INDEX_ADDRESS;
2560 }
2561 #endif /* IPA_OFFLOAD */
2562 
2563 static bool ce_check_int_watermark(struct CE_state *CE_state,
2564 				   unsigned int *flags)
2565 {
2566 	uint32_t ce_int_status;
2567 	uint32_t ctrl_addr = CE_state->ctrl_addr;
2568 	struct hif_softc *scn = CE_state->scn;
2569 
2570 	ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
2571 	if (ce_int_status & CE_WATERMARK_MASK) {
2572 		/* Convert HW IS bits to software flags */
2573 		*flags =
2574 			(ce_int_status & CE_WATERMARK_MASK) >>
2575 			CE_WM_SHFT;
2576 		return true;
2577 	}
2578 
2579 	return false;
2580 }
2581 
2582 static void ce_legacy_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
2583 			struct CE_ring_state *src_ring,
2584 			struct CE_attr *attr)
2585 {
2586 	uint32_t ctrl_addr;
2587 	uint64_t dma_addr;
2588 
2589 	QDF_ASSERT(ce_id < scn->ce_count);
2590 	ctrl_addr = CE_BASE_ADDRESS(ce_id);
2591 
2592 	src_ring->hw_index =
2593 		CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2594 	src_ring->sw_index = src_ring->hw_index;
2595 	src_ring->write_index =
2596 		CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2597 	dma_addr = src_ring->base_addr_CE_space;
2598 	CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
2599 			(uint32_t)(dma_addr & 0xFFFFFFFF));
2600 
2601 	/* if SR_BA_ADDRESS_HIGH register exists */
2602 	if (is_register_supported(SR_BA_ADDRESS_HIGH)) {
2603 		uint32_t tmp;
2604 
2605 		tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
2606 				scn, ctrl_addr);
2607 		tmp &= ~0x1F;
2608 		dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
2609 		CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
2610 				ctrl_addr, (uint32_t)dma_addr);
2611 	}
2612 	CE_SRC_RING_SZ_SET(scn, ctrl_addr, src_ring->nentries);
2613 	CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
2614 #ifdef BIG_ENDIAN_HOST
2615 	/* Enable source ring byte swap for big endian host */
2616 	CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
2617 #endif
2618 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
2619 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, src_ring->nentries);
2620 
2621 }
2622 
2623 static void ce_legacy_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
2624 				struct CE_ring_state *dest_ring,
2625 				struct CE_attr *attr)
2626 {
2627 	uint32_t ctrl_addr;
2628 	uint64_t dma_addr;
2629 
2630 	QDF_ASSERT(ce_id < scn->ce_count);
2631 	ctrl_addr = CE_BASE_ADDRESS(ce_id);
2632 	dest_ring->sw_index =
2633 		CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2634 	dest_ring->write_index =
2635 		CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
2636 	dma_addr = dest_ring->base_addr_CE_space;
2637 	CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
2638 			(uint32_t)(dma_addr & 0xFFFFFFFF));
2639 
2640 	/* if DR_BA_ADDRESS_HIGH exists */
2641 	if (is_register_supported(DR_BA_ADDRESS_HIGH)) {
2642 		uint32_t tmp;
2643 
2644 		tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
2645 				ctrl_addr);
2646 		tmp &= ~0x1F;
2647 		dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
2648 		CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
2649 				ctrl_addr, (uint32_t)dma_addr);
2650 	}
2651 
2652 	CE_DEST_RING_SZ_SET(scn, ctrl_addr, dest_ring->nentries);
2653 #ifdef BIG_ENDIAN_HOST
2654 	/* Enable Dest ring byte swap for big endian host */
2655 	CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
2656 #endif
2657 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
2658 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, dest_ring->nentries);
2659 }
2660 
2661 static uint32_t ce_get_desc_size_legacy(uint8_t ring_type)
2662 {
2663 	switch (ring_type) {
2664 	case CE_RING_SRC:
2665 		return sizeof(struct CE_src_desc);
2666 	case CE_RING_DEST:
2667 		return sizeof(struct CE_dest_desc);
2668 	case CE_RING_STATUS:
2669 		qdf_assert(0);
2670 		return 0;
2671 	default:
2672 		return 0;
2673 	}
2674 
2675 	return 0;
2676 }
2677 
2678 static int ce_ring_setup_legacy(struct hif_softc *scn, uint8_t ring_type,
2679 		uint32_t ce_id, struct CE_ring_state *ring,
2680 		struct CE_attr *attr)
2681 {
2682 	int status = Q_TARGET_ACCESS_BEGIN(scn);
2683 
2684 	if (status < 0)
2685 		goto out;
2686 
2687 
2688 	switch (ring_type) {
2689 	case CE_RING_SRC:
2690 		ce_legacy_src_ring_setup(scn, ce_id, ring, attr);
2691 		break;
2692 	case CE_RING_DEST:
2693 		ce_legacy_dest_ring_setup(scn, ce_id, ring, attr);
2694 		break;
2695 	case CE_RING_STATUS:
2696 	default:
2697 		qdf_assert(0);
2698 		break;
2699 	}
2700 
2701 	Q_TARGET_ACCESS_END(scn);
2702 out:
2703 	return status;
2704 }
2705 
2706 static void ce_prepare_shadow_register_v2_cfg_legacy(struct hif_softc *scn,
2707 			    struct pld_shadow_reg_v2_cfg **shadow_config,
2708 			    int *num_shadow_registers_configured)
2709 {
2710 	*num_shadow_registers_configured = 0;
2711 	*shadow_config = NULL;
2712 }
2713 
2714 struct ce_ops ce_service_legacy = {
2715 	.ce_get_desc_size = ce_get_desc_size_legacy,
2716 	.ce_ring_setup = ce_ring_setup_legacy,
2717 	.ce_sendlist_send = ce_sendlist_send_legacy,
2718 	.ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_legacy,
2719 	.ce_revoke_recv_next = ce_revoke_recv_next_legacy,
2720 	.ce_cancel_send_next = ce_cancel_send_next_legacy,
2721 	.ce_recv_buf_enqueue = ce_recv_buf_enqueue_legacy,
2722 	.ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_legacy,
2723 	.ce_send_nolock = ce_send_nolock_legacy,
2724 	.watermark_int = ce_check_int_watermark,
2725 	.ce_completed_send_next_nolock = ce_completed_send_next_nolock_legacy,
2726 	.ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_legacy,
2727 	.ce_send_entries_done_nolock = ce_send_entries_done_nolock_legacy,
2728 	.ce_prepare_shadow_register_v2_cfg =
2729 		ce_prepare_shadow_register_v2_cfg_legacy,
2730 };
2731 
2732 
2733 struct ce_ops *ce_services_legacy()
2734 {
2735 	return &ce_service_legacy;
2736 }
2737 
2738 #if HIF_CE_DEBUG_DATA_BUF
2739 /**
2740  * hif_dump_desc_data_buf() - record ce descriptor events
2741  * @buf: buffer to copy to
2742  * @pos: Current position till which the buf is filled
2743  * @data: Data to be copied
2744  * @data_len: Length of the data to be copied
2745  */
2746 static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos,
2747 					uint8_t *data, uint32_t data_len)
2748 {
2749 	pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n",
2750 			CE_DEBUG_MAX_DATA_BUF_SIZE);
2751 
2752 	if ((data_len > 0) && data) {
2753 		if (data_len < 16) {
2754 			hex_dump_to_buffer(data,
2755 						CE_DEBUG_DATA_PER_ROW,
2756 						16, 1, buf + pos,
2757 						(ssize_t)PAGE_SIZE - pos,
2758 						false);
2759 			pos += CE_DEBUG_PRINT_BUF_SIZE(data_len);
2760 			pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
2761 		} else {
2762 			uint32_t rows = (data_len / 16) + 1;
2763 			uint32_t row = 0;
2764 
2765 			for (row = 0; row < rows; row++) {
2766 				hex_dump_to_buffer(data + (row * 16),
2767 							CE_DEBUG_DATA_PER_ROW,
2768 							16, 1, buf + pos,
2769 							(ssize_t)PAGE_SIZE
2770 							- pos, false);
2771 				pos +=
2772 				CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW);
2773 				pos += snprintf(buf + pos, PAGE_SIZE - pos,
2774 						"\n");
2775 			}
2776 		}
2777 	}
2778 
2779 	return pos;
2780 }
2781 #endif
2782 
2783 /*
2784  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
2785  * for defined here
2786  */
2787 #if HIF_CE_DEBUG_DATA_BUF
2788 static const char *ce_event_type_to_str(enum hif_ce_event_type type)
2789 {
2790 	switch (type) {
2791 	case HIF_RX_DESC_POST:
2792 		return "HIF_RX_DESC_POST";
2793 	case HIF_RX_DESC_COMPLETION:
2794 		return "HIF_RX_DESC_COMPLETION";
2795 	case HIF_TX_GATHER_DESC_POST:
2796 		return "HIF_TX_GATHER_DESC_POST";
2797 	case HIF_TX_DESC_POST:
2798 		return "HIF_TX_DESC_POST";
2799 	case HIF_TX_DESC_SOFTWARE_POST:
2800 		return "HIF_TX_DESC_SOFTWARE_POST";
2801 	case HIF_TX_DESC_COMPLETION:
2802 		return "HIF_TX_DESC_COMPLETION";
2803 	case FAST_RX_WRITE_INDEX_UPDATE:
2804 		return "FAST_RX_WRITE_INDEX_UPDATE";
2805 	case FAST_RX_SOFTWARE_INDEX_UPDATE:
2806 		return "FAST_RX_SOFTWARE_INDEX_UPDATE";
2807 	case FAST_TX_WRITE_INDEX_UPDATE:
2808 		return "FAST_TX_WRITE_INDEX_UPDATE";
2809 	case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE:
2810 		return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE";
2811 	case FAST_TX_SOFTWARE_INDEX_UPDATE:
2812 		return "FAST_TX_SOFTWARE_INDEX_UPDATE";
2813 	case RESUME_WRITE_INDEX_UPDATE:
2814 		return "RESUME_WRITE_INDEX_UPDATE";
2815 	case HIF_IRQ_EVENT:
2816 		return "HIF_IRQ_EVENT";
2817 	case HIF_CE_TASKLET_ENTRY:
2818 		return "HIF_CE_TASKLET_ENTRY";
2819 	case HIF_CE_TASKLET_RESCHEDULE:
2820 		return "HIF_CE_TASKLET_RESCHEDULE";
2821 	case HIF_CE_TASKLET_EXIT:
2822 		return "HIF_CE_TASKLET_EXIT";
2823 	case HIF_CE_REAP_ENTRY:
2824 		return "HIF_CE_REAP_ENTRY";
2825 	case HIF_CE_REAP_EXIT:
2826 		return "HIF_CE_REAP_EXIT";
2827 	case NAPI_SCHEDULE:
2828 		return "NAPI_SCHEDULE";
2829 	case NAPI_POLL_ENTER:
2830 		return "NAPI_POLL_ENTER";
2831 	case NAPI_COMPLETE:
2832 		return "NAPI_COMPLETE";
2833 	case NAPI_POLL_EXIT:
2834 		return "NAPI_POLL_EXIT";
2835 	case HIF_RX_NBUF_ALLOC_FAILURE:
2836 		return "HIF_RX_NBUF_ALLOC_FAILURE";
2837 	case HIF_RX_NBUF_MAP_FAILURE:
2838 		return "HIF_RX_NBUF_MAP_FAILURE";
2839 	case HIF_RX_NBUF_ENQUEUE_FAILURE:
2840 		return "HIF_RX_NBUF_ENQUEUE_FAILURE";
2841 	default:
2842 		return "invalid";
2843 	}
2844 }
2845 
2846 /**
2847  * hif_dump_desc_event() - record ce descriptor events
2848  * @buf: Buffer to which to be copied
2849  * @ce_id: which ce is the event occurring on
2850  * @index: index that the descriptor was/will be at.
2851  */
2852 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf)
2853 {
2854 	struct hif_ce_desc_event *event;
2855 	uint64_t secs, usecs;
2856 	ssize_t len = 0;
2857 	struct ce_desc_hist *ce_hist = NULL;
2858 	struct hif_ce_desc_event *hist_ev = NULL;
2859 
2860 	if (!scn)
2861 		return -EINVAL;
2862 
2863 	ce_hist = &scn->hif_ce_desc_hist;
2864 
2865 	hist_ev =
2866 		(struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id];
2867 
2868 	if (!hist_ev) {
2869 		qdf_print("Low Memory\n");
2870 		return -EINVAL;
2871 	}
2872 
2873 	event = &hist_ev[ce_hist->hist_index];
2874 
2875 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
2876 		(ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
2877 		qdf_print("Invalid values\n");
2878 		return -EINVAL;
2879 	}
2880 
2881 	qdf_log_timestamp_to_secs(event->time, &secs, &usecs);
2882 
2883 	len += snprintf(buf, PAGE_SIZE - len,
2884 			"\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%p",
2885 			secs, usecs, ce_hist->hist_id,
2886 			ce_event_type_to_str(event->type),
2887 			event->index, event->memory);
2888 #if HIF_CE_DEBUG_DATA_BUF
2889 	len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%d",
2890 			event->actual_data_len);
2891 #endif
2892 
2893 	len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: ");
2894 
2895 	hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc),
2896 				16, 1, buf + len,
2897 				(ssize_t)PAGE_SIZE - len, false);
2898 	len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc));
2899 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
2900 
2901 #if HIF_CE_DEBUG_DATA_BUF
2902 	if (ce_hist->data_enable[ce_hist->hist_id])
2903 		len = hif_dump_desc_data_buf(buf, len, event->data,
2904 						(event->actual_data_len <
2905 						 CE_DEBUG_MAX_DATA_BUF_SIZE) ?
2906 						event->actual_data_len :
2907 						CE_DEBUG_MAX_DATA_BUF_SIZE);
2908 #endif /*HIF_CE_DEBUG_DATA_BUF*/
2909 
2910 	len += snprintf(buf + len, PAGE_SIZE - len, "END\n");
2911 
2912 	return len;
2913 }
2914 
2915 /*
2916  * hif_store_desc_trace_buf_index() -
2917  * API to get the CE id and CE debug storage buffer index
2918  *
2919  * @dev: network device
2920  * @attr: sysfs attribute
2921  * @buf: data got from the user
2922  *
2923  * Return total length
2924  */
2925 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
2926 					const char *buf, size_t size)
2927 {
2928 	struct ce_desc_hist *ce_hist = NULL;
2929 
2930 	if (!scn)
2931 		return -EINVAL;
2932 
2933 	ce_hist = &scn->hif_ce_desc_hist;
2934 
2935 	if (!size) {
2936 		pr_err("%s: Invalid input buffer.\n", __func__);
2937 		return -EINVAL;
2938 	}
2939 
2940 	if (sscanf(buf, "%d %d", &ce_hist->hist_id,
2941 			&ce_hist->hist_index) != 2) {
2942 		pr_err("%s: Invalid input value.\n", __func__);
2943 		return -EINVAL;
2944 	}
2945 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
2946 	   (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
2947 		qdf_print("Invalid values\n");
2948 		return -EINVAL;
2949 	}
2950 
2951 	return size;
2952 }
2953 
2954 #endif  /*For MCL,  HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
2955 
2956 #if HIF_CE_DEBUG_DATA_BUF
2957 /*
2958  * hif_ce_en_desc_hist() -
2959  * API to enable recording the CE desc history
2960  *
2961  * @dev: network device
2962  * @attr: sysfs attribute
2963  * @buf: buffer to copy the data.
2964  *
2965  * Starts recording the ce desc history
2966  *
2967  * Return total length copied
2968  */
2969 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size)
2970 {
2971 	struct ce_desc_hist *ce_hist = NULL;
2972 	uint32_t cfg = 0;
2973 	uint32_t ce_id = 0;
2974 
2975 	if (!scn)
2976 		return -EINVAL;
2977 
2978 	ce_hist = &scn->hif_ce_desc_hist;
2979 
2980 	if (!size) {
2981 		pr_err("%s: Invalid input buffer.\n", __func__);
2982 		return -EINVAL;
2983 	}
2984 
2985 	if (sscanf(buf, "%d %d", &ce_id, &cfg) != 2) {
2986 		pr_err("%s: Invalid input: Enter CE Id<sp><1/0>.\n", __func__);
2987 		return -EINVAL;
2988 	}
2989 	if (ce_id >= CE_COUNT_MAX) {
2990 		qdf_print("Invalid value CE Id\n");
2991 		return -EINVAL;
2992 	}
2993 
2994 	if ((cfg > 1 || cfg < 0)) {
2995 		qdf_print("Invalid values: enter 0 or 1\n");
2996 		return -EINVAL;
2997 	}
2998 
2999 	if (!ce_hist->hist_ev[ce_id])
3000 		return -EINVAL;
3001 
3002 	qdf_mutex_acquire(&ce_dbg_datamem_lock[ce_id]);
3003 	if (cfg == 1) {
3004 		if (ce_hist->data_enable[ce_id] == 1) {
3005 			qdf_print("\nAlready Enabled\n");
3006 		} else {
3007 			if (alloc_mem_ce_debug_hist_data(scn, ce_id)
3008 							== QDF_STATUS_E_NOMEM){
3009 				ce_hist->data_enable[ce_id] = 0;
3010 				qdf_print("%s:Memory Alloc failed\n");
3011 			} else
3012 				ce_hist->data_enable[ce_id] = 1;
3013 		}
3014 	} else if (cfg == 0) {
3015 		if (ce_hist->data_enable[ce_id] == 0) {
3016 			qdf_print("\nAlready Disabled\n");
3017 		} else {
3018 			ce_hist->data_enable[ce_id] = 0;
3019 				free_mem_ce_debug_hist_data(scn, ce_id);
3020 		}
3021 	}
3022 	qdf_mutex_release(&ce_dbg_datamem_lock[ce_id]);
3023 
3024 	return size;
3025 }
3026 
3027 /*
3028  * hif_disp_ce_enable_desc_data_hist() -
3029  * API to display value of data_enable
3030  *
3031  * @dev: network device
3032  * @attr: sysfs attribute
3033  * @buf: buffer to copy the data.
3034  *
3035  * Return total length copied
3036  */
3037 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf)
3038 {
3039 	ssize_t len = 0;
3040 	uint32_t ce_id = 0;
3041 	struct ce_desc_hist *ce_hist = NULL;
3042 
3043 	if (!scn)
3044 		return -EINVAL;
3045 
3046 	ce_hist = &scn->hif_ce_desc_hist;
3047 
3048 	for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
3049 		len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n",
3050 				ce_id, ce_hist->data_enable[ce_id]);
3051 	}
3052 
3053 	return len;
3054 }
3055 #endif /* HIF_CE_DEBUG_DATA_BUF */
3056